《機器學習實戰》第六章----支援向量機
阿新 • • 發佈:2018-12-06
支援向量機
SVM(Support Vector Machine)實際上是應用於二分類的一個分類器,其基本模型定義為特徵空間上的間隔最大的線性分類器,其學習策略便是間隔最大化,最終可轉化為一個凸二次規劃問題的求解。這裡不對整個過程進行推導,因為看了很多部落格,有幾篇大佬的部落格寫的非常清晰,這裡只貼出程式碼和自己寫的程式碼註釋.
程式碼實現
(1)簡化版本的SMO演算法,隨機選取內層迴圈變數:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : //
# @Author : FC
# @Site : [email protected]
# @license : BSD
from numpy import *
def loadDataSet(fileName):
dataMat = [];labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split('\t' )
dataMat.append([float(lineArr[0]),float(lineArr[1])])
labelMat.append(float(lineArr[2]))
return dataMat,labelMat
#隨機選擇i外的j
def selectJrand(i,m):
j=i
while (j==i):
j = int(random.uniform(0,m))
return j
#對拉格朗日乘子alpha進行裁剪
def clipAlpha(aj,H,L):
if aj>H:
aj=H
if L>aj:
aj=L
return aj
# 簡化版的smo演算法
# INPUT:dataMatIn:輸入訓練資料,classLabels:分類標籤,C:alpha閾值,即0<alpha<C,maxIter:最大迭代次數
# toler:容錯率,因為Ei是函式g(x)對xi的預測值和真實輸出值yi之差(見<統計學習方法>Page127)
# OUTPUT:alhpas,b
def smoSimple(dataMatIn,classLabels,C,toler,maxIter):
dataMatrix = mat(dataMatIn);labelMat = mat(classLabels).transpose()
b=0;m,n =shape(dataMatrix)
alphas = mat(zeros((m,1)))
iter = 0
while(iter<maxIter):
alphaPairsChanged = 0 #
for i in range(m):
fXi = float(multiply(alphas,labelMat).T*\
(dataMatrix*dataMatrix[i,:].T))+b #f(x)<統計學習方法>Page124
Ei = fXi - float(labelMat[i]) #<統計學習方法>Page127 (7.105)
# 分類誤差比toler大,需要繼續迭代優化
if ((labelMat[i]*Ei<-toler) and (alphas[i]<C)) or \
((labelMat[i]*Ei>toler) and \
(alphas[i]>0)):
j = selectJrand(i,m) #選擇alpha_2
fXj = float(multiply(alphas,labelMat).T*\
(dataMatrix*dataMatrix[j,:].T))+b
Ej = fXj-float(labelMat[j])
alphaIold = alphas[i].copy()#alpha_1_old
alphaJold = alphas[j].copy()#alpha_2_old
#對alpha_2_new進行裁剪的上下界 (L,H)
if (labelMat[i] != labelMat[j]):
L = max(0,alphas[j]-alphas[i])
H = min(C,C+alphas[j]-alphas[i])
else:
L = max(0,alphas[j]+alphas[i]-C)
H = min(C,alphas[j]+alphas[i])
if L==H:print("L==H");continue
# <統計學習方法>Page128
eta = 2.0*dataMatrix[i,:]*dataMatrix[j,:].T-\
dataMatrix[i,:]*dataMatrix[i,:].T-\
dataMatrix[j,:]*dataMatrix[j,:].T
if eta>=0:print("eta>=0");continue
alphas[j] -= labelMat[j]*(Ei-Ej)/eta #這裡的減號是因為eta和書中的正好取反了
alphas[j] =clipAlpha(alphas[j],H,L)#alpha_2_new
if (abs(alphas[j]-alphaJold)<0.00001):print("j not moving enough");continue
alphas[i] += labelMat[j]*labelMat[i]*(alphaJold-alphas[j]) #alpha_1_new
b1 = b-Ei-labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[i,:].T-\
labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[i,:]*dataMatrix[j,:].T
b2 = b-Ej-labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[j,:].T-\
labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[j,:]*dataMatrix[j,:].T
if(0<alphas[i]) and (C>alphas[i]):b=b1
elif (0<alphas[j]) and (C>alphas[j]):b=b2
else:b=(b1+b2)/2.0
alphaPairsChanged += 1
print("iter:%d i:%d,paris changed %d" % (iter,i,alphaPairsChanged))
if(alphaPairsChanged == 0):iter+=1
else:iter=0
print("iteration number:%d"% iter)
return b,alphas
if '__main__':
dataArr,labelArr = loadDataSet('testSet.txt')
b,alphas = smoSimple(dataArr,labelArr,0.6,0.001,40)
(2)Platt的完整版的SMO演算法,由於用例樣本較小,外層變數的選取並沒有檢測KKT條件
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : //
# @Author : FC
# @Site : [email protected]
# @license : BSD
from numpy import *
import SMO
#內層:alphaJ
#外層:alphaI
#定義資料結構體:
# dataMatIn:訓練資料
# classLabels:資料標籤
# C:alpha閾值
# toler:容錯率
# kTup:核函式型別和引數,'rbf'是高斯徑向基函式(radial basis function) kTup=('rbf',sigma)
# eCache:誤差E的快取區
class optStruct:
def __init__(self,dataMatIn,classLabels,C,toler,kTup):
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0] #返回dataMatIn的行數
self.alphas = mat(zeros((self.m,1)))
self.b=0
self.eCache=mat(zeros((self.m,2)))
self.K = mat(zeros((self.m,self.m)))
for i in range(self.m):
self.K[:,i] = kernelTrans(self.X,self.X[i,:],kTup)#按列插值
def calcEk(oS,k):
# fXK = float(multiply(oS.alphas,oS.labelMat).T*\
# (oS.X*oS.X[k,:].T))+oS.b
# Ek = fXK - float(oS.labelMat[k])
fXk = float(multiply(oS.alphas,oS.labelMat).T*oS.K[:,k]+oS.b)#<統計學習方法>Page127g(x)
Ek = fXk - float(oS.labelMat[k])#預測值和真實值的誤差
return Ek
#alphaJ的選擇:第一次迴圈隨機抽取,後面的迴圈滿足條件max|Ei-Ej|選取
#eCache:快取誤差E的值
def selectJ(i,oS,Ei):
maxK = -1;maxDeltaE=0;Ej=0
oS.eCache[i]=[1,Ei] #eCahe是所有誤差的快取區
vaildEcacheList = nonzero(oS.eCache[:,0].A)[0]
#.A表示轉化為array,注意nonzero返回的成對的值,這裡validEcache返回的是第一列非零的行索引.見部落格:https://blog.csdn.net/qq_28773183/article/details/81013226
if(len(vaildEcacheList))>1:
for k in vaildEcacheList:
if k==i:continue
Ek=calcEk(oS,k) #計算Ek,有索引k即可
deltaE = abs(Ei-Ek)
if(deltaE>maxDeltaE):
maxK=k;maxDeltaE=deltaE;Ej=Ek
return maxK,Ej
else:#隨機選擇alphaJ
j=SMO.selectJrand(i,oS.m)
Ej =calcEk(oS,j)
return j,Ej
#更新誤差快取區eCache,一旦更新了設定標誌位為1
def updateEk(oS,k):
Ek = calcEk(oS,k)
oS.eCache[k]=[1,Ek]
#內迴圈
def innerL(i,oS):
Ei=calcEk(oS,i)
if((oS.labelMat[i]*Ei<-oS.tol) and (oS.alphas[i]<oS.C)) or \
((oS.labelMat[i]*Ei>oS.tol) and (oS.alphas[i]>0)):
j,Ej = selectJ(i,oS,Ei)#選擇alphaJ
alphaIold = oS.alphas[i].copy();alphaJold = oS.alphas[j].copy()
#對最有值的邊界確定
if(oS.labelMat[i] != oS.labelMat[j]):
L = max(0,oS.alphas[j]-oS.alphas[i])
H = min(oS.C,oS.C+oS.alphas[j]-oS.alphas[i])
else:
L=max(0,oS.alphas[j]+oS.alphas[i]-oS.C)
H=min(oS.C,oS.alphas[j]+oS.alphas[i])
if L==H:print("L==H");return 0
#eta = 2.0*oS.X[i,:]*oS.X[j,:].T-oS.X[i,:]*oS.X[i,:].T- oS.X[j,:]*oS.X[j,:].T
eta = 2.0*oS.K[i,j]-oS.K[i,i]-oS.K[j,j]#<統計學習方法>Page128
if eta >=0:print("eta>=0");return 0
oS.alphas[j] -= oS.labelMat[j]*(Ei-Ej)/eta
oS.alphas[j] = SMO.clipAlpha(oS.alphas[j],H,L)#將alphaJ限定在[L,H]之間 更新alphas[j]
updateEk(oS,j)
if(abs(oS.alphas[j]-alphaJold)<0.00001):
print("j not moving enough!");return 0
oS.alphas[i] += oS.labelMat[j]*oS.labelMat[i]*(alphaJold-oS.alphas[j])#更新alpha[i]
updateEk(oS,i)
b1 = oS.b-Ei-oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.K[i,i]-oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.K[i,j]
b2 = oS.b-Ej-oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.K[i,j]-oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.K[j,j]
# b1=oS.b-Ei-oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[i,:].T-oS.labelMat[j]*\
# (oS.alphas[j]-alphaJold)*oS.X[i,:]*oS.X[j,:].T
# b2=oS.b-Ej-oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[j,:].T-oS.labelMat[j]*\
# (oS.alphas[j]-alphaJold)*oS.X[j,:]*oS.X[j,:].T
if(0<oS.alphas[i]) and (oS.C>oS.alphas[i]):oS.b=b1
elif (0<oS.alphas[j]) and (oS.C>oS.alphas[j]):oS.b=b2
else:oS.b=(b1+b2)/2.0
return 1
else:return 0
def smoP(dataMatIn,classLabels,C,toler,maxIter,kTup=('lin',0)):
oS = optStruct(mat(dataMatIn),mat(classLabels).transpose(),C,toler,kTup)
iter = 0
entireSet = True;alphaPairsChanged = 0
while(iter<maxIter) and ((alphaPairsChanged>0)or(entireSet)):
alphaPairsChanged=0
if entireSet:
for i in range(oS.m):#小樣本不檢查違背KKT條件,全部更新?
alphaPairsChanged += innerL(i,oS)
print("fullSet,iter: %d i:%d,pairs changed %d"%(iter,i,alphaPairsChanged))
iter +=1
else:#第二輪更新,之針對間隔內的alpha進行迭代更新
nonBoundIs = nonzero((oS.alphas.A>0)*(oS.alphas.A<C))[0]
for i in nonBoundIs:
alphaPairsChanged += innerL(i,oS)
print("non-bound,iter:%d i:%d,pairs changed %d"%(iter,i,alphaPairsChanged))
iter += 1
if entireSet:entireSet = False
elif (alphaPairsChanged == 0):entireSet=True
print("iteration number:%d"%iter)
return oS.b,oS.alphas
def calcWs(alphas,dataArr,classLabels):
X = mat(dataArr);labelMat = mat(classLabels).transpose()
m,n = shape(X)
w = zeros((n,1))
for i in range(m):
w += multiply(alphas[i]*labelMat[i],X[i,:].T)#<統計學習方法>Page111 (7.50)
return w
def kernelTrans(X,A,kTup):
m,n = shape(X)
K = mat(zeros((m,1)))
if kTup[0] == 'lin':K=X*A.T
elif kTup[0] == 'rbf':
for j in range(m):
deltaRow = X[j,:]-A
K[j]=deltaRow*deltaRow.T
K = exp(K/(-1*kTup[1]**2))
else:raise NameError('HOUSTON We Have a Problem -- That Kernel is not recognized')
return K
def testRbf(k1=1.3):
dataArr,labelArr = SMO.loadDataSet('testSetRBF.txt')
b,alphas = smoP(dataArr,labelArr,200,0.0001,10000,('rbf',k1))
dataMat = mat(dataArr);labelMat = mat(labelArr).transpose()
svInd=nonzero(alphas.A>0)[0]
sVs = dataMat[svInd]
labelLSV = labelMat[svInd]
print("there are %d Support Vectors"% shape(sVs)[0])
m,n=shape(dataMat)
errorCount = 0
for i in range(m):
kernelEval = kernelTrans(sVs,dataMat[i,:],('rbf',k1))
predict = kernelEval.T*multiply(labelLSV,alphas[svInd])+b
if sign(predict)!=sign(labelArr[i]):errorCount +=1
print("the training error rate is: %f"% (float(errorCount)/m))
dataArr,labelArr = SMO.loadDataSet('testSetRBF2.txt')
errorCount = 0
dataMat = mat(dataArr);labelMat = mat(labelArr).transpose()
m,n = shape(dataMat)
for i in range(m):
kernelEval = kernelTrans(sVs,dataMat[i,:],('rbf',k1))
predict = kernelEval.T*multiply(labelLSV,alphas[svInd])+b
if sign(predict)!= sign(labelArr[i]):errorCount +=1
print("the test error rate is: %f"% (float(errorCount)/m))
if '__main__':
testRbf()
(3)handwriting示例
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : //
# @Author : FC
# @Site : [email protected]
# @license : BSD
from os import listdir
from numpy import *
import Platt_SMO
######
# 將一個檔案的資料變成1x1024的array
# 輸入:要讀的檔名
# 輸出:處理後的資料
######
def img2vector(filename):
returnVect = zeros((1,1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0,32*i+j] = int(lineStr[j])
return returnVect
def loadImages(dirName):
hwLabels = []
trainingFileList = listdir(dirName)
m = len(trainingFileList)
trainingMat = zeros((m,1024))
for i in range(m):
fileNameStr =trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
if classNumStr == 9:hwLabels.append(-1)
else:hwLabels.append(1)
trainingMat[i,:]=img2vector('%s/%s' %(dirName,fileNameStr))
return trainingMat,hwLabels
def testDigits(kTup=('rbf',10)):
dataArr,labelArr = loadImages('digits/trainingDigits')
b,alphas = Platt_SMO.smoP(dataArr,labelArr,200,0.001,10000,kTup)
dataMat = mat(dataArr);labelMat = mat(labelArr).transpose()
svInd = nonzero(alphas.A>0)[0]
sVs = dataMat[svInd]
labelSV = labelMat[svInd]
print("there are %d Support Vectors" % shape(sVs)[0])
m,n = shape(dataMat)
errorCount = 0
for i in range(m):
kernelEval = Platt_SMO.kernelTrans(sVs,dataMat[i,:],kTup)
predict = kernelEval.T*multiply(labelSV,alphas[svInd])+b
if sign(predict)!= sign(labelArr[i]):errorCount +=1
print("the training error rate is: %f" % (float(errorCount)/m))
dataArr,labelArr = loadImages('digits/testDigits')
errorCount =0
dataMat=mat(dataArr);labelMat=mat(labelArr).transpose()
m,n=shape(dataMat)
for i in range(m):
kernelEval = Platt_SMO.kernelTrans(sVs,dataMat[i,:],kTup)
predict = kernelEval.T*multiply(labelSV,alphas[svInd])+b
if sign(predict) != sign(labelArr[i]):errorCount +=1
print("the test error rate is: %f" % (float(errorCount)/m))
if '__main__':
testDigits()
(4)執行結果:
fullSet,iter: 2 i:401,pairs changed 0
iteration number:3
there are 125 Support Vectors
the training error rate is: 0.000000
the test error rate is: 0.010753
完整程式碼和資料集
完整程式碼地址:SVM