1. 程式人生 > 其它 >邏輯迴歸torch.nn實現

邏輯迴歸torch.nn實現

1匯入實驗需要的包

import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader,TensorDataset
import matplotlib.pyplot as plt
import numpy as np
import os
os.environ["KMP_DUPLICATE_LIB_OK"]  =  "TRUE"

2人工構造資料集

  生成正/負樣本各 50 個,特徵數為2,進行二分類(0、1).

# 假資料
n_data = torch.ones(100, 2)  #
資料的基本形態 x0 = torch.normal(2 * n_data, 1) # 型別0 x data (tensor), shape=(100, 2),好處共享均值和設定張量size y0 = torch.zeros(100) # 型別0 y data (tensor), shape=(100, 1) x1 = torch.normal(-2 * n_data, 1) # 型別1 x data (tensor), shape=(100, 1) y1 = torch.ones(100) # 型別1 y data (tensor), shape=(100, 1) # 注意 x, y 資料的資料形式是一定要像下面一樣 (torch.cat 是在合併資料)
x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # FloatTensor = 32-bit floating y = torch.cat((y0, y1), 0).type(torch.FloatTensor) # LongTensor = 64-bit integer

將兩類樣本用不同形狀的標記表示出來,其中座標軸是樣本的兩個特徵。

plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
plt.show()

      

3 定義模型

# class LogisticRegression(nn.Module):
#     def __init__(self):
#         super(LogisticRegression, self).__init__()
#         self.linear = nn.Linear(2, 1)
#         self.sm = nn.Sigmoid()

#     def forward(self, x):
#         x = self.lr(x)
#         x = self.sm(x)
#         return x
logistic_model =  nn.Sequential()
logistic_model.add_module('linear',nn.Linear(2,1))
logistic_model.add_module('sm',nn.Sigmoid())

# logistic_model = LogisticRegression()
if torch.cuda.is_available():
    logistic_model.cuda()

4定義損失函式和優化器

# 定義損失函式和優化器
criterion = nn.BCELoss()
optimizer = torch.optim.SGD(logistic_model.parameters(), lr=1e-3, momentum=0.9)

5 隨機打亂資料

  感覺打亂無用,這一步。

# batch_size = 32
# data_iter = load_data(X,Y,batch_size)
def set_data(X,Y):
    index_slice = list(range(X.shape[0]))
    np.random.shuffle(index_slice)
    x =  X[index_slice]
    y =  Y[index_slice]
    if torch.cuda.is_available():
        x_data = Variable(x).cuda()
        y_data = Variable(y).cuda()
    else:
        x_data = Variable(x)
        y_data = Variable(y)
    return x_data,y_data

6 訓練模型

Train_Loss_list = []
Train_acc_list = []

# 開始訓練
for epoch in range(10000):
    x_data,y_data = set_data(x,y)
    out = logistic_model(x_data)
    out = out.view(-1,1)
    y_data = y_data.view(-1,1)
    loss = criterion(out, y_data)
    print_loss = loss.data.item()
    mask = out.ge(0.5).float()  # 以0.5為閾值進行分類
    correct = (mask == y_data).sum()  # 計算正確預測的樣本個數
    acc = correct.item() / x_data.size(0)  # 計算精度
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    Train_Loss_list.append(print_loss)
    Train_acc_list.append(acc)
    # 每隔2000輪列印一下當前的誤差和精度
    if (epoch + 1) % 2000== 0:
        print('-' * 20)
        print('epoch {}'.format(epoch + 1))  # 訓練輪數
        print('當前損失 {:.6f}'.format(print_loss))  # 誤差
        print('當前精度 {:.6f}'.format(acc))  # 精度

結果:

--------------------
epoch 2000
當前損失 0.019348
當前精度 1.000000
--------------------
epoch 4000
當前損失 0.012090
當前精度 1.000000
--------------------
epoch 6000
當前損失 0.009251
當前精度 1.000000
--------------------
epoch 8000
當前損失 0.007668
當前精度 1.000000
--------------------
epoch 10000
當前損失 0.006634
當前精度 1.000000

輸出模型引數值:

logistic_model.state_dict()
OrderedDict([('linear.weight', tensor([[-2.1929, -1.9542]], device='cuda:0')),
             ('linear.bias', tensor([-0.2197], device='cuda:0'))])

7 繪製圖表

x11= range(0,10000)
y11= Train_Loss_list
plt.xlabel('Train loss vs. epoches')
plt.ylabel('Train loss')
plt.plot(x11, y11,'.',c='b',label="Train_Loss")
plt.legend()
plt.show()

x11= range(0,10000)
y11= Train_acc_list
plt.xlabel('Train acc vs. epoches')
plt.ylabel('Train acc')
plt.plot(x11, y11,'.',c='b',label="Train_acc")
plt.legend()
plt.show()

8 繪製分類結果圖

# 結果視覺化
w0, w1 = logistic_model.linear.weight[0]
w0 = float(w0.item())
w1 = float(w1.item())
b = float(logistic_model.linear.bias.item())
plot_x = np.arange(-7, 7, 0.1)
plot_y = (-w0 * plot_x - b) / w1
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
plt.plot(plot_x, plot_y)
plt.show()

因上求緣,果上努力~~~~ 作者:希望每天漲粉,轉載請註明原文連結:https://www.cnblogs.com/BlairGrowing/p/15433570.html