pytorch-day02
Image data Modeling example
Prepare dataset
- Using cifr-2 as an example
- Import necessary pakcage
1
2
3
4
5import torch
from torch import nn
from torch.utils.data import Dataset,DataLoader
from torchvision import transforms as T
from torchvision import datasets1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17# define helper function
transform_img = T.Compose(
[T.ToTensor()])
def transform_label(x):
return torch.tensor([x]).float()
# load dataset using Imagefolder
ds_train = datasets.ImageFolder("./eat_pytorch_datasets/cifar2/train/",
transform = transform_img,target_transform = transform_label)
ds_val = datasets.ImageFolder("./eat_pytorch_datasets/cifar2/test/",
transform = transform_img,target_transform = transform_label)
print(ds_train.class_to_idx)
# convert into data loader
dl_train = DataLoader(ds_train,batch_size = 50,shuffle = True)
dl_val = DataLoader(ds_val,batch_size = 50,shuffle = False) - The deault order of pytorch img is : (Batch,Channel,Width,Height)
Define the model
- 是我喜欢的模型定义方式,直接背下来
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29class net(nn.Module):
def __init__(net, self):
# Define the architecture
self.conv1 = nn.Conv2d(in_channels=3,out_channels=32,kernel_size = 3)
self.pool = nn.MaxPool2d(kernel_size = 2,stride = 2)
self.conv2 = nn.Conv2d(in_channels=32,out_channels=64,kernel_size = 5)
self.dropout = nn.Dropout2d(p = 0.1)
self.adaptive_pool = nn.AdaptiveMaxPool2d((1,1))
self.flatten = nn.Flatten()
self.linear1 = nn.Linear(64,32)
self.relu = nn.ReLU()
self.linear2 = nn.Linear(32,1)
# Do the forward propagation
def forward(self,x):
x = self.conv1(x)
x = self.pool(x)
x = self.conv2(x)
x = self.pool(x)
x = self.dropout(x)
x = self.adaptive_pool(x)
x = self.flatten(x)
x = self.linear1(x)
x = self.relu(x)
x = self.linear2(x)
return x
net = Net()
print(net)
Training the model
- 不是我喜欢的style,直接不管
Something else
- Saving the model, using the model, and eval the model follows the same procedure of Day01. omit here.
All articles on this blog are licensed under CC BY-NC-SA 3.0 CN unless otherwise stated.