1. 程式人生 > >pytorch 搭建自己的神經網路和各種優化器例項

pytorch 搭建自己的神經網路和各種優化器例項

import torch
import torchvision
import torchvision.transforms as transform
import torch.utils.data as Data
import matplotlib.pyplot as plt
from torch.utils.data import Dataset,DataLoader
import pandas as pd
import numpy as np
from torch.autograd import Variable

# data  set
train=pd.read_csv('Thirdtest.csv')
#cut 0 col as label
train_label=train.iloc[:,[0]] #只讀取一列
#train_label=train.iloc[:,0:3]
#cut 1~16 col as data
train_data=train.iloc[:,1:]
#change  to np
train_label_np=train_label.values
train_data_np=train_data.values

#change to tensor
train_label_ts=torch.from_numpy(train_label_np)
train_data_ts=torch.from_numpy(train_data_np)

train_label_ts=train_label_ts.type(torch.LongTensor)
train_data_ts=train_data_ts.type(torch.FloatTensor)



print(train_label_ts.shape)
print(type(train_label_ts))

train_dataset=Data.TensorDataset(train_data_ts,train_label_ts)
train_loader=DataLoader(dataset=train_dataset,batch_size=64,shuffle=True)

#make a network

import torch.nn.functional as F     # 激勵函式都在這

class Net(torch.nn.Module):     # 繼承 torch 的 Module
    def __init__(self ):
        super(Net, self).__init__()     # 繼承 __init__ 功能
        self.hidden1 = torch.nn.Linear(16, 30)# 隱藏層線性輸出
        self.out = torch.nn.Linear(30, 3)       # 輸出層線性輸出

    def forward(self, x):
        # 正向傳播輸入值, 神經網路分析出輸出值
        x = F.relu(self.hidden1(x))      # 激勵函式(隱藏層的線性值)
        x = self.out(x)                 # 輸出值, 但是這個不是預測值, 預測值還需要再另外計算
        return x



# net=Net()
# optimizer = torch.optim.SGD(net.parameters(), lr=0.0001,momentum=0.001)
# loss_func = torch.nn.CrossEntropyLoss()  # the target label is NOT an one-hotted

# loss_list=[]
# for epoch in range(500):
#     for step ,(b_x,b_y) in enumerate (train_loader):
#         b_x,b_y=Variable(b_x),Variable(b_y)
#         b_y=b_y.squeeze(1)
#         output=net(b_x)
#         loss=loss_func(output,b_y)
#         optimizer.zero_grad()
#         loss.backward()
#         optimizer.step()
#         if epoch%1==0:
#             loss_list.append(float(loss))
#         print( "Epoch: ", epoch, "Step ", step, "loss: ", float(loss))


# 為每個優化器建立一個 net
net_SGD         = Net()
net_Momentum    = Net()
net_RMSprop     = Net()
net_Adam        = Net()
nets = [net_SGD, net_Momentum, net_RMSprop, net_Adam]

#定義優化器
LR=0.0001
opt_SGD         = torch.optim.SGD(net_SGD.parameters(), lr=LR,momentum=0.001)
opt_Momentum    = torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum=0.8)
opt_RMSprop     = torch.optim.RMSprop(net_RMSprop.parameters(), lr=LR, alpha=0.9)
opt_Adam        = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9, 0.99))
optimizers = [opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam]

loss_func = torch.nn.CrossEntropyLoss()
losses_his = [[], [], [], []]

for net, opt, l_his in zip(nets, optimizers, losses_his):
    for epoch in range(500):
        for step, (b_x, b_y) in enumerate(train_loader):
            b_x, b_y = Variable(b_x), Variable(b_y)
            b_y = b_y.squeeze(1)# 資料必須得是一維非one-hot向量
        # 對每個優化器, 優化屬於他的神經網路

            output = net(b_x)              # get output for every net
            loss = loss_func(output, b_y)  # compute loss for every net
            opt.zero_grad()                # clear gradients for next train
            loss.backward()                # backpropagation, compute gradients
            opt.step()                     # apply gradients
            if epoch%1==0:
                l_his.append(loss.data.numpy())     # loss recoder
                print("optimizers: ",opt,"Epoch: ",epoch,"Step ",step,"loss: ",float(loss))

labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']
for i, l_his in enumerate(losses_his):
    plt.plot(l_his, label=labels[i])
plt.legend(loc='best')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.xlim((0,1000))
plt.ylim((0,4))
plt.show()








#
# for epoch in range(5):
#     for step ,(b_x,b_y) in enumerate (train_loader):
#         b_x,b_y=Variable(b_x),Variable(b_y)
#         b_y=b_y.squeeze(1)
#         output=net(b_x)
#         loss=loss_func(output,b_y)
#         loss.backward()
#         optimizer.zero_grad()
#         optimizer.step()
#         print(loss)