1. 程式人生 > >PyTorch入門筆記一

PyTorch入門筆記一

張量

引入pytorch,生成一個隨機的5x3張量

>>> from __future__ import print_function
>>> import torch
>>> x = torch.rand(5, 3)
>>> print(x)
tensor([[0.5555, 0.7301, 0.5655],
        [0.9998, 0.1754, 0.7808],
        [0.5512, 0.8162, 0.6148],
        [0.8618, 0.3293, 0.6236],
        [0.2787, 0.0943, 0.2074]])

宣告一個5x3的張量,張量中所有元素初始化為0

>>> x = torch.zeros(5, 3, dtype=torch.long)

從資料直接構造張量,這裡的資料一般是python陣列

>>> x = torch.tensor([5.5, 3])
>>> print(x)
tensor([5.5000, 3.0000])

從一個已有的tensor上類似建立新的張量,新、舊張量的形狀和資料型別相同,除非對dtype進行了覆蓋宣告

>>> x = x.new_ones(5, 3, dtype=torch.double) 
>>> print(x)
tensor([[1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.],
        [1., 1., 1.]], dtype=torch.float64)
>>> y = torch.rand_like(x, dtype=torch.float)
>>> print(y)
tensor([[0.6934, 0.9637, 0.0594],
        [0.0863, 0.6638, 0.4728],
        [0.3416, 0.0892, 0.1761],
        [0.6831, 0.6404, 0.8307],
        [0.6254, 0.4180, 0.2174]])

張量的size,numpy裡是shape

>>> print(x.size())
torch.Size([5, 3])

張量的操作

張量相加

>>> x=torch.rand(5, 3)
>>> y = torch.zeros(5, 3)
>>> print(x + y)
tensor([[0.8991, 0.9222, 0.2050],
        [0.2478, 0.7688, 0.4156],
        [0.4055, 0.9526, 0.2559],
        [0.9481, 0.8576, 0.4816],
        [0.0767, 0.3346, 0.0922]])

>>> print(torch.add(x, y))
tensor([[0.8991, 0.9222, 0.2050],
        [0.2478, 0.7688, 0.4156],
        [0.4055, 0.9526, 0.2559],
        [0.9481, 0.8576, 0.4816],
        [0.0767, 0.3346, 0.0922]])

>>> result = torch.empty(5, 3)
>>> torch.add(x, y, out=result)
tensor([[0.8991, 0.9222, 0.2050],
        [0.2478, 0.7688, 0.4156],
        [0.4055, 0.9526, 0.2559],
        [0.9481, 0.8576, 0.4816],
        [0.0767, 0.3346, 0.0922]])

>>> y.add_(x)
tensor([[0.8991, 0.9222, 0.2050],
        [0.2478, 0.7688, 0.4156],
        [0.4055, 0.9526, 0.2559],
        [0.9481, 0.8576, 0.4816],
        [0.0767, 0.3346, 0.0922]])

張量內元素訪問形式和numpy保持一致,如輸出張量y的第二維度上下標是1的所有元素

>>> print(y[:, 1])
tensor([0.9222, 0.7688, 0.9526, 0.8576, 0.3346])

iew函式改變tensor的形狀,類似numpy的reshape

>>> x = torch.randn(4, 4)
>>> y = x.view(16)  # 變成1x16的張量
>>> z = x.view(-1, 8)  # 變成第二維度是8,第一維度自動計算的張量,結果是2x8的張量
>>> print(x.size(), y.size(), z.size())
torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])

只有一個元素的向量,取這個元素

>>> x = torch.randn(1)
>>> print(x)
tensor([0.8542])
>>> print(x.item())
0.8541867136955261

轉換成numpy陣列

>>> x = torch.rand(5, 3)
>>> x.numpy()
array([[0.9320856 , 0.473859  , 0.6787642 ],
       [0.14365482, 0.1112923 , 0.8280207 ],
       [0.4609589 , 0.51031697, 0.15313298],
       [0.18854082, 0.4548    , 0.49709243],
       [0.8351501 , 0.6160053 , 0.61391556]], dtype=float32)

除CharTensor外,所有的cpu張量從numpy轉換成tensor

import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
print(b)

在cpu和gpu之間移動tensor,

if torch.cuda.is_available():
    device = torch.device("cuda")          # a CUDA device object
    y = torch.ones_like(x, device=device)  # 直接在GPU裝置上建立
    x = x.to(device)                       # or just use strings ``.to("cuda")``
    z = x + y
    print(z)
    print(z.to("cpu", torch.double))       # ``.to`` can also change dtype together!

構建網路和損失函式

損失函式用來衡量輸入和目標之間的距離

from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F

class Net(nn.Module):
    ## 定義了網路的結構
    def __init__(self):
        super(Net, self).__init__()
        ## input is channel 1, output 6 channels with 3x3 convulutionanl kernel
        self.conv1 = nn.Conv2d(1, 6, 3) 
        self.conv2 = nn.Conv2d(6, 16, 3)
        # an affine operation: y = Wx + b,  # 6*6 from image dimension
        self.fc1 = nn.Linear(16*6*6, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)
    
    ## 前向傳播,函式名必須是forward   
    def forward(self, x):
        # Max pooling over a (2, 2) window
        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
        # If the size is a square you can only specify a single number
        x = F.max_pool2d(F.relu(self.conv2(x)), 2)
        x = x.view(-1, self.num_flat_features(x))
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x
    
    def num_flat_features(self, x):
        size = x.size()[1:] # all dimensions except the batch dimension
        num_features = 1
        for s in size:
            num_features *= s
        return num_features
    
## 新建一個Net物件
net = Net() 
print(net)  
params = list(net.parameters())
print(len(params))
print(params[0].size())  # conv1's .weight

# 宣告一個1x1x32x32的4維張量作為網路的輸入
input = torch.randn(1, 1, 32, 32) 
# input = torch.randn(1, 1, 32, 32)
output = net(input) 

# net.zero_grad()
# out.backward(torch.randn(1, 10))
target = torch.randn(10) 
target = target.view(1, -1) 
criterion = nn.MSELoss()
loss = criterion(output, target)
print(loss)

print(loss.grad_fn)  # MSELoss
print(loss.grad_fn.next_functions[0][0])  # Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0])  # ReLU

網路的反向傳播,為了反向傳播損失(error)所做的只需要呼叫loss.backward()函式,如果沒有清除已有的梯度,反向傳播會累積梯度

呼叫loss.backward()函式,看以下conv1的bias的梯度在呼叫前後的差別。

net.zero_grad()     # zeroes the gradient buffers of all parameters

print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)

loss.backward()

print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)

 使用SGD更新權重

公式:weight = weight - learning_rate * gradient

可以用下面的torch程式碼實現

learning_rate = 0.01
for f in net.parameters():
    f.data.sub_(f.grad.data * learning_rate)

 但是torch已經實現了各種權重更新方式,比如SGD, Nesterov-SGD, Adam, RMSProp等,可以直接呼叫

import torch.optim as optim

# create your optimizer
optimizer = optim.SGD(net.parameters(), lr=0.01)

# in your training loop:
optimizer.zero_grad()   # zero the gradient buffers
output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()    # Does the update