Pytorch 實現邏輯迴歸
阿新 • • 發佈:2021-10-21
1 匯入實驗需要的包
import numpy as np import pandas as pd import torch from torch import nn import matplotlib.pyplot as plt from torch.autograd import Variable from torch.utils.data import DataLoader,TensorDataset import os os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
2 載入資料集
data =pd.read_csv("./dataset/credit-a.csv") X = data.iloc[:,:-1] Y = data.iloc[:,-1]
3 資料轉換
Y = Y.replace(-1,0) #替換標籤 -1 ,改成 0 Y.unique() #檢視有幾種標籤資料 #資料轉換為Tensor X = torch.from_numpy(X.values).type(torch.float32) Y = torch.from_numpy(Y.values.reshape(-1,1)).type(torch.float32)
4 設定迭代器,將資料轉換在cuda下
def load_data(X,Y,batch_size): X= torch.autograd.Variable(X).cuda() Y = torch.autograd.Variable(Y).cuda() print("type x",X) data = TensorDataset(X,Y) data_loader = DataLoader(data,batch_size,shuffle=True) # data_loader = DataLoader(data,batch_size,shuffle=False) return data_loader
5 引數初始化及讀取資料
batchs_size = 16 data_iter= load_data(X,Y,batchs_size) #最後的程式碼用到了,cpu()那行 X = torch.autograd.Variable(X).cuda() Y = torch.autograd.Variable(Y).cuda()
6 設定模型
model = nn.Sequential() model.add_module('mylinear1',nn.Linear(15,1)) model.add_module('mysigmoid',nn.Sigmoid()) if torch.cuda.is_available(): model.cuda()
7 設定損失函式
loss_fn = nn.BCELoss()
8 設定優化器
optimizer = torch.optim.Adam(model.parameters(),lr=0.0001) # optimizer = torch.optim.SGD(model.parameters(),lr=0.001)
9 訓練模型
epochs = 150 for epoch in range(epochs): correct = 0 for x,y in data_iter: y_hat = model(x) loss = loss_fn(y_hat,y) # print(y_hat.ge(0.5)) out = y_hat.ge(0.5).float() # 以0.5為閾值進行分類 correct += (out == y).sum() # 計算正確預測的樣本個數 optimizer.zero_grad() loss.backward() optimizer.step() print("epoch %d ,current acc = %.4f"%(epoch+1,correct/Y.shape[0])) print("epoch %d ,current loss = %.4f"%(epoch+1,loss))
10 其他
# ((model(X).data.numpy()> 0.5).astype('int')==Y.numpy()).mean() ((model(X).data.cpu().numpy()> 0.5).astype('int')==Y.cpu().numpy()).mean()
model(X).data.cpu()
因上求緣,果上努力~~~~ 作者:希望每天漲粉,轉載請註明原文連結:https://www.cnblogs.com/BlairGrowing/p/15432982.html