1. 程式人生 > 其它 >實驗四、決策樹演算法及應用

實驗四、決策樹演算法及應用

實驗三 樸素貝葉斯演算法及應用

部落格班級 機器學習18級
作業要求 https://edu.cnblogs.com/campus/ahgc/machinelearning/homework/12086
學號 3180701315

實驗目的
1.理解決策樹演算法原理,掌握決策樹演算法框架;
2.理解決策樹學習演算法的特徵選擇、樹的生成和樹的剪枝;
3.能根據不同的資料型別,選擇不同的決策樹演算法;
4.針對特定應用場景及資料,能應用決策樹演算法解決實際問題。

實驗內容
1.設計演算法實現熵、經驗條件熵、資訊增益等方法。
2.實現ID3演算法。
3.熟悉sklearn庫中的決策樹演算法;
4.針對iris資料集,應用sklearn的決策樹演算法進行類別預測。
5.針對iris資料集,利用自編決策樹演算法進行類別預測。

實驗報告要求
1.對照實驗內容,撰寫實驗過程、演算法及測試結果;
2.程式碼規範化:命名規則、註釋;
3.分析核心演算法的複雜度;
4.查閱文獻,討論ID3、5演算法的應用場景;

實驗內容以及結果
In [1]:

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from collections import Counter
import math
from math import log
import pprint

In [2]:

# 書上題目5.1
def create_data():
    datasets = [['青年', '否', '否', '一般', '否'],
                ['青年', '否', '否', '好', '否'],
                ['青年', '是', '否', '好', '是'],
                ['青年', '是', '是', '一般', '是'],
                ['青年', '否', '否', '一般', '否'],
                ['中年', '否', '否', '一般', '否'],
                ['中年', '否', '否', '好', '否'],
                ['中年', '是', '是', '好', '是'],
                ['中年', '否', '是', '非常好', '是'],
                ['中年', '否', '是', '非常好', '是'],
                ['老年', '否', '是', '非常好', '是'],
                ['老年', '否', '是', '好', '是'],
                ['老年', '是', '否', '好', '是'],
                ['老年', '是', '否', '非常好', '是'],
                ['老年', '否', '否', '一般', '否'],
                ]
    labels = [u'年齡', u'有工作', u'有自己的房子', u'信貸情況', u'類別']
    # 返回資料集和每個維度的名稱
    return datasets, labels

In [3]:

datasets, labels = create_data()

In [4]:

train_data = pd.DataFrame(datasets, columns=labels)

In [5]:

train_data

In [6]:

# 熵
def calc_ent(datasets):
    data_length = len(datasets)
    label_count = {}
    for i in range(data_length):
        label = datasets[i][-1]
        if label not in label_count:
            label_count[label] = 0
        label_count[label] += 1
    ent = -sum([(p / data_length) * log(p / data_length, 2)
            for p in label_count.values()])
    return ent
# def entropy(y):
# """
# Entropy of a label sequence
# """
# hist = np.bincount(y)
# ps = hist / np.sum(hist)
# return -np.sum([p * np.log2(p) for p in ps if p > 0])
# 經驗條件熵
def cond_ent(datasets, axis=0):
    data_length = len(datasets)
    feature_sets = {}
    for i in range(data_length):
        feature = datasets[i][axis]
        if feature not in feature_sets:
            feature_sets[feature] = []
        feature_sets[feature].append(datasets[i])
    cond_ent = sum(
        [(len(p) / data_length) * calc_ent(p) for p in feature_sets.values()])
    return cond_ent
# 資訊增益
def info_gain(ent, cond_ent):
    return ent - cond_ent

def info_gain_train(datasets):
    count = len(datasets[0]) - 1
    ent = calc_ent(datasets)
    # ent = entropy(datasets)
    best_feature = []
    for c in range(count):
        c_info_gain = info_gain(ent, cond_ent(datasets, axis=c))
        best_feature.append((c, c_info_gain))
        print('特徵({}) - info_gain - {:.3f}'.format(labels[c], c_info_gain))
    # 比較大小
    best_ = max(best_feature, key=lambda x: x[-1])
    return '特徵({})的資訊增益最大,選擇為根節點特徵'.format(labels[best_[0]])

In [7]:

info_gain_train(np.array(datasets))

In[8]:

# 定義節點類 二叉樹
class Node:
    def __init__(self, root=True, label=None, feature_name=None, feature=None):
        self.root = root
        self.label = label
        self.feature_name = feature_name
        self.feature = feature
        self.tree = {}
        self.result = {
            'label:': self.label,
            'feature': self.feature,
            'tree': self.tree
        }
    def __repr__(self):
        return '{}'.format(self.result)
    def add_node(self, val, node):
        self.tree[val] = node
    def predict(self, features):
        if self.root is True:
            return self.label
        return self.tree
class DTree:
    def __init__(self, epsilon=0.1):
        self.epsilon = epsilon
        self._tree = {}
    # 熵   
    @staticmethod
    def calc_ent(datasets):
        data_length = len(datasets)
        label_count = {}
        for i in range(data_length):
            label = datasets[i][-1]
            if label not in label_count:
                label_count[label] = 0
            label_count[label] += 1
        ent = -sum([(p / data_length) * log(p / data_length, 2)
                    for p in label_count.values()])
        return ent 
    # 經驗條件熵
    def cond_ent(self, datasets, axis=0):
        data_length = len(datasets)
        feature_sets = {}
        for i in range(data_length):
            feature = datasets[i][axis]
            if feature not in feature_sets:
                feature_sets[feature] = []
            feature_sets[feature].append(datasets[i])
        cond_ent = sum([(len(p) / data_length) * self.calc_ent(p)
                    for p in feature_sets.values()])
        return cond_ent
    
    # 資訊增益
    @staticmethod
    def info_gain(ent, cond_ent):
        return ent - cond_ent
    
    def info_gain_train(self, datasets):
        count = len(datasets[0]) - 1
        ent = self.calc_ent(datasets)
        best_feature = []
        for c in range(count):
            c_info_gain = self.info_gain(ent, self.cond_ent(datasets, axis=c))
            best_feature.append((c, c_info_gain))
        # 比較大小
        best_ = max(best_feature, key=lambda x: x[-1])
        return best_
    
    def train(self, train_data):
        """
        input:資料集D(DataFrame格式),特徵集A,閾值eta
        output:決策樹T
        """
        _, y_train, features = train_data.iloc[:, :
                                                -1], train_data.iloc[:,
                                                                    -1], train_data.columns[:
                                                                                           -1]
        # 1,若D中例項屬於同一類Ck,則T為單節點樹,並將類Ck作為結點的類標記,返回T
        if len(y_train.value_counts()) == 1:
            return Node(root=True, label=y_train.iloc[0])
        # 2, 若A為空,則T為單節點樹,將D中例項樹最大的類Ck作為該節點的類標記,返回T
        if len(features) == 0:
            return Node(
                root=True,
                label=y_train.value_counts().sort_values(
                    ascending=False).index[0])

        # 3,計算最大資訊增益 同5.1,Ag為資訊增益最大的特徵
        max_feature, max_info_gain = self.info_gain_train(np.array(train_data))
        max_feature_name = features[max_feature]

        # 4,Ag的資訊增益小於閾值eta,則置T為單節點樹,並將D中是例項數最大的類Ck作為該節點的類標記,返
        if max_info_gain < self.epsilon:
            return Node(
                root=True,
                label=y_train.value_counts().sort_values(
                    ascending=False).index[0])
        # 5,構建Ag子集
        node_tree = Node(
            root=False, feature_name=max_feature_name, feature=max_feature)

        feature_list = train_data[max_feature_name].value_counts().index
        for f in feature_list:
            sub_train_df = train_data.loc[train_data[max_feature_name] ==
                                        f].drop([max_feature_name], axis=1)

            # 6, 遞迴生成樹
            sub_tree = self.train(sub_train_df)
            node_tree.add_node(f, sub_tree)
        # pprint.pprint(node_tree.tree)
        return node_tree

    def fit(self, train_data):
        self._tree = self.train(train_data)
        return self._tree
    def predict(self, X_test):
        return self._tree.predict(X_test)

In[9]:

data_df = pd.DataFrame(datasets, columns=labels)
dt = DTree()
tree = dt.fit(data_df)

In [10]:

tree


In[11]:

dt.predict(['老年', '否', '否', '一般'])

scikit-learn例項
In [12]:

# data
def create_data():
  iris = load_iris()
  df = pd.DataFrame(iris.data, columns=iris.feature_names)
  df['label'] = iris.target
  df.columns = [
    'sepal length', 'sepal width', 'petal length', 'petal width', 'label'
  ]
  data = np.array(df.iloc[:100, [0, 1, -1]])
  # print(data)
  return data[:, :2], data[:, -1]
X, y = create_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

In [13]:

from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
import graphviz

In [14]:

clf = DecisionTreeClassifier()
clf.fit(X_train, y_train,)

Out[14]:

DecisionTreeClassifier()

In [15]:

clf.score(X_test, y_test)

Out[15]:0.9666666666666667
In [16]:

tree_pic = export_graphviz(clf, out_file="mytree.pdf")
with open('mytree.pdf') as f:
  dot_graph = f.read()

In [19]:

graphviz.Source(dot_graph)

Out[19]:


實驗小結
決策樹演算法首先對資料進行處理,利用歸納演算法生成可讀的規則和決策樹,然後使用決策對新資料進行分析。
優點是分類精度高、生成的模式簡單。