Article Directory
Minimalist example
from torchvision.datasets import CIFAR10 from torch.utils.data.dataloader import DataLoader from torch import nn from torch.nn import functional as F from torch import optim import torch from torchvision import transforms """Built-in dataset downloaded and converted to Tensor""" transform = transforms.Compose([transforms.ToTensor()]) dataset_train = CIFAR10(root='data', train=True, download=True, transform=transform) dataset_test = CIFAR10(root='data', train=False, download=True, transform=transform) """Load the dataset into the data loader, set batch_size""" loader_train = DataLoader(dataset_train, batch_size=16) loader_test = DataLoader(dataset_test, batch_size=16) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=15, kernel_size=3, padding=1) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2 = nn.Conv2d(in_channels=15, out_channels=30, kernel_size=3, padding=1) self.fc1 = nn.Linear(in_features=30 * 8 * 8, out_features=300) self.fc2 = nn.Linear(in_features=300, out_features=10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 30 * 8 * 8) x = F.relu(self.fc1(x)) x = self.fc2(x) return x """Neural network, loss function, optimizer""" net = Net() cross_entropy_loss = nn.CrossEntropyLoss() optimizer = optim.SGD(params=net.parameters(), lr=1e-3, momentum=.9) """train""" for epoch in range(2): running_loss = 0. for inputs, labels in loader_train: # Parametric Gradient Zeroing optimizer.zero_grad() # Forward propagation outputs = net(inputs) # Cross Entropy Loss loss = cross_entropy_loss(outputs, labels) # Reverse Propagation loss.backward() # Parameter Update optimizer.step() # Accumulated loss running_loss += loss.item() print('No.%d Wheel loss value:%.2f' % (epoch + 1, running_loss)) """Accuracy""" correct, total = 0, 0 with torch.no_grad(): # Prohibit gradient calculation to save memory for images, labels in loader_test: outputs = net(images) max_values, max_indexes = torch.max(outputs.data, dim=1) total += labels.size(0) correct += (max_indexes == labels).sum().item() print('10000 Accuracy of sample size:%d%%' % (100 * correct / total))
Data Loading
Built-in Dataset Download + Data Preprocessing
from torchvision.datasets import CIFAR10 from torchvision import transforms transform = transforms.Compose([transforms.ToTensor()]) # Data Preprocessing dataset = CIFAR10(root='data', train=True, download=True, transform=transform) # Data Download
Data Loader
from torch.utils.data.dataloader import DataLoader data_loader = DataLoader(dataset, batch_size=8)
Dataset Viewing
import matplotlib.pyplot as mp, numpy as np from torchvision.utils import make_grid images, labels = iter(data_loader).__next__() print(images.shape) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') print(' '.join(classes[i] for i in labels)) mp.imshow(np.transpose(make_grid(images).numpy(), (1, 2, 0))) mp.show()
torch.Size([8, 3, 32, 32])
frog truck truck deer car car bird horse
Construction of Neural Network
Modeling inherits the base class nn.Module and requires object-oriented Basics
from torch import nn class Net(nn.Module): def __init__(self): super(Net, self).__init__() def forward(self, x): return x
Rewrite Forward Propagation, there are three ways to write_
- Method 1
class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=15, kernel_size=3, padding=1) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2 = nn.Conv2d(in_channels=15, out_channels=30, kernel_size=3, padding=1) self.fc1 = nn.Linear(in_features=30 * 8 * 8, out_features=300) self.fc2 = nn.Linear(in_features=300, out_features=10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 30 * 8 * 8) x = F.relu(self.fc1(x)) x = self.fc2(x) return x
- Method 2
class Net(nn.Module): def __init__(self): super(Net, self).__init__() conv = nn.Sequential() conv.add_module('c1', nn.Conv2d(in_channels=3, out_channels=15, kernel_size=3, padding=1)) conv.add_module('r1', nn.ReLU(inplace=True)) conv.add_module('p1', nn.MaxPool2d(kernel_size=2, stride=2)) conv.add_module('c2', nn.Conv2d(in_channels=15, out_channels=30, kernel_size=3, padding=1)) conv.add_module('r2', nn.ReLU(inplace=True)) conv.add_module('p2', nn.MaxPool2d(kernel_size=2, stride=2)) self.conv = conv linear = nn.Sequential() linear.add_module('l1', nn.Linear(in_features=30 * 8 * 8, out_features=300)) linear.add_module('e3', nn.ReLU(inplace=True)) linear.add_module('l2', nn.Linear(in_features=300, out_features=10)) self.linear = linear def forward(self, x): x = self.conv(x) x = x.view(-1, 30 * 8 * 8) x = self.linear(x) return x
- Method 3
class Net(nn.Module): def __init__(self): super(Net, self).__init__() conv = nn.Sequential( nn.Conv2d(in_channels=3, out_channels=15, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(in_channels=15, out_channels=30, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), ) self.conv = conv linear = nn.Sequential( nn.Linear(in_features=30 * 8 * 8, out_features=300), nn.ReLU(inplace=True), nn.Linear(in_features=300, out_features=10), ) self.linear = linear def forward(self, x): x = self.conv(x) x = x.view(-1, 30 * 8 * 8) x = self.linear(x) return x
Loss function, optimizer
net = Net() # Creating model objects cross_entropy_loss = nn.CrossEntropyLoss() # Cross Entropy Loss optimizer = optim.SGD(params=net.parameters(), lr=1e-3, momentum=.9) # Random Gradient Down Optimizer
train
print Round 1 loss value: 6134.84for epoch in range(3): loss_value = 0. for inputs, labels in loader_train: # Parametric Gradient Zeroing optimizer.zero_grad() # Forward propagation outputs = net(inputs) # Cross Entropy Loss loss = cross_entropy_loss(outputs, labels) # Reverse Propagation loss.backward() # Parameter Update optimizer.step() # Accumulated loss loss_value += loss.item() print('No.%d Wheel loss value:%.2f' % (epoch + 1, loss_value))
Round 2 loss value: 4787.88
Round 3 loss value: 4185.97
Assessment
print Accuracy of 10,000 samples: 54%correct, total = 0, 0 with torch.no_grad(): # Prohibit gradient calculation to save memory for images, labels in loader_test: outputs = net(images) max_values, max_indexes = torch.max(outputs.data, dim=1) total += labels.size(0) correct += (max_indexes == labels).sum().item() print('10000 Accuracy of sample size:%d%%' % (100 * correct / total))