決策樹實戰:從原理到實現
主要內容:
1. 決策樹簡介
2. 決策樹的構建演算法
3. python實現決策樹
4. 擴充套件
5. 程式碼附錄
6 .參考資料
決策樹(decision tree) 是一種基本的分類和迴歸方法,決策樹的學習包括3個步驟:特徵選擇、決策樹的生成和決策樹的剪枝。本文討論決策樹用作分類的原理和ID3演算法的python實現。
1.決策樹
決策樹模型是一種描述對例項進行分類的樹形結構。決策樹由節點和有向邊組成。節點有內部節點(internal node)和葉子節點(leaf node),內部節點表示一個特徵(feature),葉子節點(node)表示一個類(label)。有向邊表示相應特徵的取值。如圖所示是判斷一個是否能夠有償還債務的能力的決策樹
2. 如何從歷史資料構建決策樹
如何一步步構建決策樹,關鍵是找出相應的節點和邊。我們需要解決的第一個問題就是,當前資料集上哪個特徵在劃分資料分類時起決定性作用。為了找出決定性的特徵,劃分出最好的結果,必須評估每個特徵。資訊增益石一種有效的評估方法。
2.1.資訊增益(information gain)
資訊增益表示得知特徵X的資訊而使得類Y的資訊的不確定性減少程度
g(D,A)=H(D)-H(D|A)
H(D)資料D的經驗熵, H(D|A) 特徵A給定條件下D的經驗條件熵, 也稱為互資訊,決策樹學習中的資訊增益等價於訓練資料集中類與特徵的互資訊
基於資訊增益準則的特徵選擇方法是:對訓練資料集D,計算其每個特徵的資訊增益熵, 選擇資訊增益最大的特徵。
資訊增益演算法:
2.2 決策樹的生產
演算法流程:
3. 實戰決策樹
首先獲取資料:
def createDataSet(): #dataSet=pd.read_csv(datafile) # label=[] dataSet=[[1,1,'yes'],[1,1,'yes'],[1,0,'no'],[0,1,'no'],[0,1,'no']] labels=['no surfacing','flippers'] return dataSet, labels
根據資料遞迴生成樹:
def createTree(dataSet,labels):
classList=[example[-1] for example in dataSet]
if classList.count(classList[0])==len(classList):
return classList[0] #如果改子集全部是同一類,返回該類
if len(dataSet[0])==1:
return majorityCnt(classList) #返回葉子節點的值
bestFeat=chooseBestFeatureToSplit(dataSet) #選取最佳特徵
bestFeatLabel=labels[bestFeat]
myTree={bestFeatLabel:{}}
del (labels[bestFeat])
featValues=[example[bestFeat] for example in dataSet]
uniqueVals=set(featValues)
for value in uniqueVals:
subLabels=labels[:]
myTree[bestFeatLabel][value]=createTree(splitDataSet\ #遞迴呼叫,採用字典方式儲存樹的結構
(dataSet,bestFeat,value),subLabels)
return myTree
計算決策用到的子函式:
計算給定資料集的夏農熵:
def calcShannonEnt(dataSet):
numEntries=len(dataSet)
labelCounts={}
for featVec in dataSet:
currentLabel=featVec[-1]
#計算feature值的頻數,為所有可能的類建立字典
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel]=0
labelCounts[currentLabel]+=1
shannonEnt=0.0
#計算Shannon entropy
for key in labelCounts:
prob=float(labelCounts[key])/numEntries
shannonEnt-=prob*log(prob,2)
return shannonEnt
分割資料
def splitDataSet(dataSet, axis, value):
retDataSet=[]
for featVec in dataSet:
if featVec[axis]==value:
reducedFeatVec=featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
選擇最好的特徵進行分割資料
def chooseBestFeatureToSplit(dataSet):
numFeatures=len(dataSet[0])-1
baseEntropy=calcShannonEnt(dataSet)
bestInfoGain=0.0; bestFeature=-1;
for i in range(numFeatures):
featList=[example[i] for example in dataSet]
uniqueVals=set(featList)
newEntropy=0.0
#計算每一種劃分方式的資訊熵
for value in uniqueVals:
subDataSet=splitDataSet(dataSet,i,value)
prob=len(subDataSet)/float(len(dataSet))
newEntropy+=prob*calcShannonEnt(subDataSet)
#資訊增益熵
infoGain=baseEntropy - newEntropy
#最好資訊增益
if(infoGain>bestInfoGain):
bestInfoGain=infoGain
bestFeature=i
return bestFeature
#統計資料集出現頻率最高的label
def majorityCnt(classList):
classCount={}
for vote in classList:
if vote not in classCount.keys(): classCount[vote]=0
classCount[vote]+=1
sortedClassCount=sorted(classCount.iteritems(),\
key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
5.附錄程式碼:.
5.1 trees.py
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 03 09:13:58 2015
@author: beta
"""
from math import log
import operator
import treePlotter
def calcShannonEnt(dataSet):
numEntries=len(dataSet)
labelCounts={}
for featVec in dataSet:
currentLabel=featVec[-1]
#計算feature值的頻數,為所有可能的類建立字典
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel]=0
labelCounts[currentLabel]+=1
shannonEnt=0.0
#計算Shannon entropy
for key in labelCounts:
prob=float(labelCounts[key])/numEntries
shannonEnt-=prob*log(prob,2)
return shannonEnt
def createDataSet():
#dataSet=pd.read_csv(datafile)
# label=[]
dataSet=[[1,1,'yes'],[1,1,'yes'],[1,0,'no'],[0,1,'no'],[0,1,'no']]
labels=['no surfacing','flippers']
return dataSet, labels
def splitDataSet(dataSet, axis, value):
retDataSet=[]
for featVec in dataSet:
if featVec[axis]==value:
reducedFeatVec=featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
numFeatures=len(dataSet[0])-1
baseEntropy=calcShannonEnt(dataSet)
bestInfoGain=0.0; bestFeature=-1;
for i in range(numFeatures):
featList=[example[i] for example in dataSet]
uniqueVals=set(featList)
newEntropy=0.0
#計算每一種劃分方式的資訊熵
for value in uniqueVals:
subDataSet=splitDataSet(dataSet,i,value)
prob=len(subDataSet)/float(len(dataSet))
newEntropy+=prob*calcShannonEnt(subDataSet)
#資訊增益熵
infoGain=baseEntropy - newEntropy
#最好資訊增益
if(infoGain>bestInfoGain):
bestInfoGain=infoGain
bestFeature=i
return bestFeature
def majorityCnt(classList):
classCount={}
for vote in classList:
if vote not in classCount.keys(): classCount[vote]=0
classCount[vote]+=1
sortedClassCount=sorted(classCount.iteritems(),\
key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def createTree(dataSet,labels):
classList=[example[-1] for example in dataSet]
if classList.count(classList[0])==len(classList):
return classList[0]
if len(dataSet[0])==1:
return majorityCnt(classList)
bestFeat=chooseBestFeatureToSplit(dataSet)
bestFeatLabel=labels[bestFeat]
myTree={bestFeatLabel:{}}
del (labels[bestFeat])
featValues=[example[bestFeat] for example in dataSet]
uniqueVals=set(featValues)
for value in uniqueVals:
subLabels=labels[:]
myTree[bestFeatLabel][value]=createTree(splitDataSet\
(dataSet,bestFeat,value),subLabels)
return myTree
if __name__=='__main__':
myDat, labels=createDataSet()
myTree=createTree(myDat,labels)
treePlotter.createPlot(myTree)
5.2 treePlotter.py 程式碼:
'''
Created on Oct 14, 2010
@author: Peter Harrington
'''
import matplotlib.pyplot as plt
decisionNode = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
def getNumLeafs(myTree):
numLeafs = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
numLeafs += getNumLeafs(secondDict[key])
else: numLeafs +=1
return numLeafs
def getTreeDepth(myTree):
maxDepth = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
thisDepth = 1 + getTreeDepth(secondDict[key])
else: thisDepth = 1
if thisDepth > maxDepth: maxDepth = thisDepth
return maxDepth
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',
xytext=centerPt, textcoords='axes fraction',
va="center", ha="center", bbox=nodeType, arrowprops=arrow_args )
def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString, va="center", ha="center", rotation=30)
def plotTree(myTree, parentPt, nodeTxt):#if the first key tells you what feat was split on
numLeafs = getNumLeafs(myTree) #this determines the x width of this tree
depth = getTreeDepth(myTree)
firstStr = myTree.keys()[0] #the text label for this node should be this
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
plotTree(secondDict[key],cntrPt,str(key)) #recursion
else: #it's a leaf node print the leaf node
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
#if you do get a dictonary you know it's a tree, and the first element will be another dict
def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) #no ticks
#createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW; plotTree.yOff = 1.0;
plotTree(inTree, (0.5,1.0), '')
plt.show()
#def createPlot():
# fig = plt.figure(1, facecolor='white')
# fig.clf()
# createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
# plotNode('a decision node', (0.5, 0.1), (0.1, 0.5), decisionNode)
# plotNode('a leaf node', (0.8, 0.1), (0.3, 0.8), leafNode)
# plt.show()
def retrieveTree(i):
listOfTrees =[{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
{'no surfacing': {0: 'no', 1: {'flippers': {0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
]
return listOfTrees[i]
#createPlot(thisTree)
6.參考資料:
1. 李航 《統計學習方法》
2.. Peter Harrington 《機器學習實戰》