1. 程式人生 > 實用技巧 >第一個深度學習網路(別人的)

第一個深度學習網路(別人的)

import numpy as np
import torch
from torchvision.datasets import mnist
import  torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
from torch import  nn
from matplotlib import pyplot as plt

#定義引數
train_batch_size=64
test_batch_size
=128 learning_rate=0.01 num_epoches=3 lr=0.01 momentum=0.5 #定義預處理函式,這些預處理依次放在Compose中 transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.5],[0.5])])#Compose將多個tranform組合,ToTensor轉換形狀,Normalize將會把Tensor正則化。 #下載資料,並對資料進行預處理 train_dataset=mnist.MNIST('./data',train=True,transform=transform,download=True) test_dataset
=mnist.MNIST('./data',train=False,transform=transform) #data_loader是一個可迭代物件,可以當迭代器使用 train_loader=DataLoader(train_dataset,batch_size=train_batch_size,shuffle=True) test_loader=DataLoader(test_dataset,batch_size=test_batch_size,shuffle=False) examples=enumerate(test_loader) batch_idx,(example_data,example_targets)
=next(examples) fig=plt.figure() for i in range(6): plt.subplot(2,3,i+1) plt.tight_layout() plt.imshow(example_data[i][0],cmap='gray',interpolation='none') #plt.show() #構建網路 class Net(nn.Module): def __init__(self,in_dim,n_hidden_1,n_hidden_2,out_dim): super(Net,self).__init__() self.layer1=nn.Sequential(nn.Linear(in_dim,n_hidden_1),nn.BatchNorm1d(n_hidden_1))#Sequential是將網路的層組合到一起 self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2), nn.BatchNorm1d(n_hidden_2)) self.layer1 = nn.Sequential(nn.Linear(n_hidden_2, out_dim)) def forward(self,x): x=F.relu(self.layer1(x))#將ReLU層新增到網路 x = F.relu(self.layer2(x)) x = self.layer1(x) return x #例項化網路 #檢查網路是否有GPU,有則使用,無則使用cpu device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model=Net(28*28,10,784,10) model.to(device) #定義損失函式和優化器 criterion=nn.CrossEntropyLoss() optimizer=optim.SGD(model.parameters(),lr=lr,momentum=momentum) #開始訓練 losses=[] acces=[] eval_losses=[] eval_acces=[] for epoch in range(num_epoches): train_loss=0 train_acc=0 model.train() #動態修改引數學習率 if epoch%5==0: optimizer.param_groups[0]['lr']*=0.1 for img,label in train_loader: img=img.to(device) label=label.to(device) img=img.view(img.size(0),-1) #前向傳播 out=model(img) loss=criterion(out,label) #f反向傳播 optimizer.zero_grad() loss.backward() optimizer.step() #記錄誤差 train_loss+=loss.item() #計算分類的準確率 _,pred=out.max(1) num_correct=(pred==label).sum().item() acc=num_correct/img.shape[0] train_acc+=acc losses.append(train_loss/len(train_loader)) acces.append(train_acc/len(train_loader)) eval_loss=0 eval_acc=0 model.eval() for img,label in test_loader: img=img.to(device) label=label.to(device) img=img.view(img.size(0),-1) out=model(img) loss=criterion(out,label) #記錄誤差 eval_loss+=loss.item() #記錄準確李 _,pred=out.max(1) num_correct=(pred==label).sum().item() acc=num_correct/img.shape[0] eval_acc+=acc eval_losses.append(eval_loss / len(test_loader)) eval_acces.append(eval_acc / len(test_loader)) print('epoch:{},Train Loss:{:.4f},Train Acc:{:.4f},Test Loss:{:.4f},Test Acc:{:.4f}' .format(epoch,train_loss/len(train_loader),train_acc/len(train_loader),eval_loss / len(test_loader),eval_acc / len(test_loader)))

體驗自己開發得深度學習樂趣