pytorch筆記01)初體驗
阿新 • • 發佈:2018-11-10
下面是使用一個1個隱藏層來簡單擬合2次函式的栗子
import torch
from torch.autograd import Variable
import torch.nn.functional as func
import matplotlib.pyplot as plt
#生成模擬的資料
#unsqueeze-擴維
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
y = x.pow(2) + .3 * torch.rand(x.shape)
x, y = Variable(x), Variable(y)
class NetWork(torch.nn .Module):
def __init__(self, n_input, n_hidden, n_out):
super(NetWork, self).__init__() #這句 約定俗成
self.hidden = torch.nn.Linear(n_input, n_hidden)
self.out = torch.nn.Linear(n_hidden, n_out)
def forward(self, x):
x = func.relu(self.hidden(x))
y = self.out (x)
return y
net = NetWork(1, 32, 1)
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
loss_func = torch.nn.MSELoss()
epoch = 100
plt.ion() # 開啟互動模式
for i in range(epoch):
prediction = net(x)
optimizer.zero_grad()
loss_func(prediction, y).backward()
optimizer.step()
if i % 5 == 0:
plt.cla() # 清空畫板
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), prediction.data.numpy())
plt.pause(0.1)
plt.ioff() # 關閉互動模式
plt.show() # 記得show,不然會自動關閉