機器學習演算法(4) Logistic迴歸
阿新 • • 發佈:2019-02-17
基於Logistic迴歸的思想,利用梯度上升的方法,求取回歸係數。並且完成對馬生病資料的訓練和預測。
例子來自《Machine Learning in Action》 Peter Harrington
梯度上升
載入資料集
資料集合中有兩類共100個數據點
""" 載入資料集 """
def loadDataSet():
dataMat = []; labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0 , float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
sigmoid函式
利用該函式的函式性質,用於分類
""" sigmoid函式 """
def sigmoid(inX):
return 1.0/(1+exp(-inX))
梯度上升求權重向量
""" 梯度上升 """
def gradAscent(dataMatIn, classLabels):
dataMatrix = mat(dataMatIn) #轉換為 NumPy 矩陣
labelMat = mat(classLabels).transpose() #轉換為 NumPy 矩陣,求轉置 (行向量-->列向量)
m,n = shape(dataMatrix) #獲取矩陣的大小
alpha = 0.001 #步長
maxCycles = 500 #迭代代數
weights = ones((n,1)) #權重向量
for k in range(maxCycles):
h = sigmoid(dataMatrix*weights)
error = (labelMat - h) # 懲罰度
weights = weights + alpha * dataMatrix.transpose()* error
return weights
測試
def testGradAscent():
dataArr,labelMat = logRegres.loadDataSet()
weights=logRegres.gradAscent(dataArr,labelMat)
print(weights)
結果
[[ 4.12414349]
[ 0.48007329]
[-0.6168482 ]]
視覺化結果
""" 繪製擬合後的直線 """
def plotBestFit(weights):
import matplotlib.pyplot as plt
dataMat,labelMat=loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0] # 資料點的個數
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n): # 根據資料點的型別進行分類
if int(labelMat[i])== 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
y = (-weights[0]-weights[1]*x)/weights[2]
ax.plot(x, y)
plt.xlabel('X1'); plt.ylabel('X2');
plt.show()
測試
注意numpy矩陣轉換為python陣列
"""測試繪製擬合的直線"""
def testPlotBestFit():
dataArr,labelMat = logRegres.loadDataSet()
weights =logRegres.gradAscent(dataArr,labelMat)
logRegres.plotBestFit(weights.getA()) # getA() : matrix --> array
結果
隨機梯度
之前的梯度計算,當資料集很大的時候,計算量會很大,所以採用隨機梯度演算法,即每一次迭代只計算一個點。
隨機梯度演算法_0
""" 隨機梯度上升0 """
def stocGradAscent0(dataMatrix, classLabels):
m,n = shape(dataMatrix)
alpha = 0.01
weights = ones(n)
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights)) # 每次只選取一個特徵點進行訓練
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
測試
"""測試隨機梯度上升0"""
def teststocGradAscent0():
dataArr,labelMat = logRegres.loadDataSet()
weights =logRegres.stocGradAscent0(array(dataArr),labelMat)
logRegres.plotBestFit(weights)
結果
由於迭代次數比較少,所以劃分效果不是很理想。
隨機梯度演算法_1
分析之前效果不理想的原因:
1. 由於迭代過程中步長固定,所以在最後收斂的過程中,會週期震盪。
2. 每次的訓練點不是隨機取得,會收到資料週期性的影響。
針對這兩點,做出如下修改:
1. alpha 步長大小隨著迭代的次數而減少
2. 隨機選取資料點進行訓練
""" 隨機梯度上升1 """
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m,n = shape(dataMatrix)
weights = ones(n)
for j in range(numIter):
dataIndex = list(range(m)) # rang 物件無法迭代
for i in range(m):
alpha = 4/(1.0+j+i)+0.0001 # 步長會隨著迭代進行而減少,但不會為0。防止波動和停止不前
randIndex = int(random.uniform(0,len(dataIndex))) # 隨機選取迭代值,防止週期波動
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights
測試
"""測試隨機梯度上升1"""
def teststocGradAscent1():
dataArr,labelMat = logRegres.loadDataSet()
weights =logRegres.stocGradAscent1(array(dataArr),labelMat)
logRegres.plotBestFit(weights)
結果
可以看到,這次的劃分效果就很好了。
應用
利用Logistic迴歸來預測病馬的死亡率
訓練
""" 利用迴歸係數和特徵量計算類別"""
def classifyVector(inX, weights):
prob = sigmoid(sum(inX*weights))
if prob > 0.5: return 1.0
else: return 0.0
""" 載入資料 訓練 測試"""
def colicTest():
# 訓練迴歸係數
frTrain = open('horseColicTraining.txt'); frTest = open('horseColicTest.txt')
trainingSet = []; trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr =[]
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)
# 測試分類效果
errorCount = 0; numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr =[]
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr), trainWeights))!= int(currLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print ("the error rate of this test is: %f" % errorRate)
return errorRate
測試
"""預測病馬死亡率"""
def multiTest():
numTests = 10; errorSum=0.0
for k in range(numTests):
errorSum += logRegres.colicTest()
print ("after %d iterations the average error rate is: %f" % (numTests, errorSum/float(numTests)))
結果
the error rate of this test is: 0.432836
the error rate of this test is: 0.268657
the error rate of this test is: 0.417910
the error rate of this test is: 0.313433
the error rate of this test is: 0.298507
the error rate of this test is: 0.358209
the error rate of this test is: 0.298507
the error rate of this test is: 0.283582
the error rate of this test is: 0.388060
the error rate of this test is: 0.402985
after 10 iterations the average error rate is: 0.346269