線性迴歸模型 B站唐宇迪學習pytorch框架
阿新 • • 發佈:2021-10-30
復現視訊裡的程式碼,方便後續查閱。
import numpy as np import torch torch.cuda.current_device() import torch.nn as nn # 構造輸入資料x和其對應的標籤y x_values=[i for i in range(11)] x_train=np.array(x_values,dtype=np.float32) x_train=x_train.reshape(-1,1) y_values=[2*i+1 for i in x_values] y_train=np.array(y_values,dtype=np.float32) y_train=y_train.reshape(-1,1) # 其實線性迴歸是不加啟用函式的全連線層 class LinearRegressionModel(nn.Module): def __init__(self,input_dim,output_dim): super(LinearRegressionModel,self).__init__() self.linear=nn.Linear(input_dim,output_dim) # 重寫前向傳播方法,繼承自module def forward(self,x): out=self.linear(x) return out input_dim=1 output_dim=1 model=LinearRegressionModel(input_dim,output_dim) device=torch.device("cuda:1" if torch.cuda.is_available() else "cpu" ) model.to(device) criterion=nn.MSELoss() #訓練次數 epochs=1000 #定義學習率 learning_rate=0.01 # 優化器選擇隨機梯度下降演算法 optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate) # 選擇損失函式MSE criterion=nn.MSELoss() for epoch in range(epochs): epoch+=1 # 轉成tensor格式 inputs=torch.from_numpy(x_train).to(device) labels=torch.from_numpy(y_train).to(device) # 每次迭代梯度清零,防止累加 optimizer.zero_grad() # 前向傳播 outputs=model(inputs) # 計算損失 loss=criterion(outputs,labels) # 反向傳播 loss.backward() # 更新權重引數 optimizer.step() if epoch%50==0: print('epoch{},loss{}'.format(epoch,loss.item()))