(pytorch-深度學習系列)使用softmax迴歸實現對Fashion-MNIST資料集進行分類-學習筆記
阿新 • • 發佈:2020-10-14
使用softmax迴歸實現對Fashion-MNIST資料集進行分類
import torch
from torch import nn
from torch.nn import init
import numpy as np
import sys
讀取資料集:
mnist_train = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=True, download=True, transform=transforms.ToTensor()) mnist_test = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=False, download=True, transform=transforms.ToTensor()) batch_size = 256 if sys.platform.startswith('win'): num_workers = 0 # 0表示不用額外的程序來加速讀取資料 else: num_workers = 4 train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers) test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
初始化模型:
num_inputs = 784 num_outputs = 10 class LinearNet(nn.Module): def __init__(self, num_inputs, num_outputs): super(LinearNet, self).__init__() self.linear = nn.Linear(num_inputs, num_outputs) def forward(self, x): # x shape: (batch, 1, 28, 28) y = self.linear(x.view(x.shape[0], -1)) return y net = LinearNet(num_inputs, num_outputs) # 初始化線性模型的引數 init.normal_(net.linear.weight, mean=0, std=0.01) init.constant_(net.linear.bias, val=0) # 定義損失函式,包括softmax運算和交叉熵損失計算 loss = nn.CrossEntropyLoss() # 定義優化演算法 optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
訓練模型:
num_epochs = 5 def evaluate_accuracy(data_iter, net): acc_sum, n = 0.0, 0 for X, y in data_iter: acc_sum += (net(X).argmax(dim=1) == y).float().sum().item() n += y.shape[0] return acc_sum / n def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params=None, lr=None, optimizer=None): for epoch in range(num_epochs): train_l_sum, train_acc_sum, n = 0.0, 0.0, 0 for X, y in train_iter: y_hat = net(X) l = loss(y_hat, y).sum() # 梯度清零 if optimizer is not None: optimizer.zero_grad() # 這裡我們用到優化器,所以直接對優化器行梯度清零 elif params is not None and params[0].grad is not None: for param in params: param.grad.data.zero_() l.backward() if optimizer is None: sgd(params, lr, batch_size) else: optimizer.step() # 用到優化器這裡 train_l_sum += l.item() train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item() n += y.shape[0] test_acc = evaluate_accuracy(test_iter, net) print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f' % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc)) train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)