1. 程式人生 > >樸素貝葉斯演算法——實現新聞分類(Sklearn實現)

樸素貝葉斯演算法——實現新聞分類(Sklearn實現)

1、樸素貝葉斯實現新聞分類的步驟

(1)提供文字檔案,即資料集下載

(2)準備資料

         將資料集劃分為訓練集測試集;使用jieba模組進行分詞詞頻統計停用詞過濾,文字特徵提取,將文字資料向量化

(3)分析資料:使用matplotlib模組分析

(4)訓練演算法:使用sklearn.naive_bayes 的MultinomialNB進行訓練

         在scikit-learn中,一共有3個樸素貝葉斯的分類演算法類。分別是GaussianNBMultinomialNBBernoulliNB

        其中GaussianNB就是先驗為高斯分佈的樸素貝葉斯

MultinomialNB就是先驗為多項式分佈的樸素貝葉斯,而BernoulliNB就是先驗為伯努利分佈的樸素貝葉斯

(5)測試演算法:使用測試集對貝葉斯分類器進行測試

2、程式碼實現

# -*- coding: UTF-8 -*-
import os
import random
import jieba
from sklearn.naive_bayes import MultinomialNB
import matplotlib.pyplot as plt

"""
函式說明:中文文字處理
Parameters:
    folder_path - 文字存放的路徑
    test_size - 測試集佔比,預設佔所有資料集的百分之20
Returns:
    all_words_list - 按詞頻降序排序的訓練集列表
    train_data_list - 訓練集列表
    test_data_list - 測試集列表
    train_class_list - 訓練集標籤列表
    test_class_list - 測試集標籤列表
"""
def TextProcessing(folder_path, test_size=0.2):
    folder_list = os.listdir(folder_path)  # 檢視folder_path下的檔案
    data_list = []  # 資料集資料
    class_list = []  # 資料集類別
    # 遍歷每個子資料夾
    for folder in folder_list:
        new_folder_path = os.path.join(folder_path, folder)  # 根據子資料夾,生成新的路徑
        files = os.listdir(new_folder_path)  # 存放子資料夾下的txt檔案的列表
        j = 1
        # 遍歷每個txt檔案
        for file in files:
            if j > 100:  # 每類txt樣本數最多100個
                break
            with open(os.path.join(new_folder_path, file), 'r', encoding='utf-8') as f:  # 開啟txt檔案
                raw = f.read()

            word_cut = jieba.cut(raw, cut_all=False)  # 精簡模式,返回一個可迭代的generator
            word_list = list(word_cut)  # generator轉換為list

            data_list.append(word_list)  # 新增資料集資料
            class_list.append(folder)  # 新增資料集類別
            j += 1
    data_class_list = list(zip(data_list, class_list))  # zip壓縮合並,將資料與標籤對應壓縮
    random.shuffle(data_class_list)  # 將data_class_list亂序
    index = int(len(data_class_list) * test_size) + 1  # 訓練集和測試集切分的索引值
    train_list = data_class_list[index:]  # 訓練集
    test_list = data_class_list[:index]  # 測試集
    train_data_list, train_class_list = zip(*train_list)  # 訓練集解壓縮
    test_data_list, test_class_list = zip(*test_list)  # 測試集解壓縮

    all_words_dict = {}  # 統計訓練集詞頻
    for word_list in train_data_list:
        for word in word_list:
            if word in all_words_dict.keys():
                all_words_dict[word] += 1
            else:
                all_words_dict[word] = 1

    # 根據鍵的值倒序排序
    all_words_tuple_list = sorted(all_words_dict.items(), key=lambda f: f[1], reverse=True)
    all_words_list, all_words_nums = zip(*all_words_tuple_list)  # 解壓縮
    all_words_list = list(all_words_list)  # 轉換成列表
    return all_words_list, train_data_list, test_data_list, train_class_list, test_class_list


"""
函式說明:讀取檔案裡的內容,並去重
Parameters:
    words_file - 檔案路徑
Returns:
    words_set - 讀取的內容的set集合
"""
def MakeWordsSet(words_file):
    words_set = set()  # 建立set集合
    with open(words_file, 'r', encoding='utf-8') as f:  # 開啟檔案
        for line in f.readlines():  # 一行一行讀取
            word = line.strip()  # 去回車
            if len(word) > 0:  # 有文字,則新增到words_set中
                words_set.add(word)
    return words_set  # 返回處理結果


"""
函式說明:文字特徵選取
Parameters:
    all_words_list - 訓練集所有文字列表
    deleteN - 刪除詞頻最高的deleteN個詞
    stopwords_set - 指定的結束語
Returns:
    feature_words - 特徵集
"""
def words_dict(all_words_list, deleteN, stopwords_set=set()):
    feature_words = []  # 特徵列表
    n = 1
    for t in range(deleteN, len(all_words_list), 1):
        if n > 1000:  # feature_words的維度為1000
            break
            # 如果這個詞不是數字,並且不是指定的結束語,並且單詞長度大於1小於5,那麼這個詞就可以作為特徵詞
        if not all_words_list[t].isdigit() and all_words_list[t] not in stopwords_set and 1 < len(all_words_list[t]) < 5:
            feature_words.append(all_words_list[t])
        n += 1
    return feature_words


"""
函式說明:根據feature_words將文字向量化
Parameters:
    train_data_list - 訓練集
    test_data_list - 測試集
    feature_words - 特徵集
Returns:
    train_feature_list - 訓練集向量化列表
    test_feature_list - 測試集向量化列表
"""
def TextFeatures(train_data_list, test_data_list, feature_words):
    def text_features(text, feature_words):  # 出現在特徵集中,則置1
        text_words = set(text)
        features = [1 if word in text_words else 0 for word in feature_words]
        return features

    train_feature_list = [text_features(text, feature_words) for text in train_data_list]
    test_feature_list = [text_features(text, feature_words) for text in test_data_list]
    return train_feature_list, test_feature_list  # 返回結果


"""
函式說明:新聞分類器
Parameters:
    train_feature_list - 訓練集向量化的特徵文字
    test_feature_list - 測試集向量化的特徵文字
    train_class_list - 訓練集分類標籤
    test_class_list - 測試集分類標籤
Returns:
    test_accuracy - 分類器精度
"""
def TextClassifier(train_feature_list, test_feature_list, train_class_list, test_class_list):
    classifier = MultinomialNB().fit(train_feature_list, train_class_list)
    test_accuracy = classifier.score(test_feature_list, test_class_list)
    return test_accuracy


if __name__ == '__main__':
    # 文字預處理
    folder_path = './SogouC/Sample'  # 訓練集存放地址
    all_words_list, train_data_list, test_data_list, train_class_list, test_class_list = TextProcessing(folder_path,test_size=0.2)
    # 生成stopwords_set
    stopwords_file = './stopwords_cn.txt'
    stopwords_set = MakeWordsSet(stopwords_file)

    test_accuracy_list = []
    """
    deleteNs = range(0, 1000, 20)  # 0 20 40 60 ... 980
    for deleteN in deleteNs:
        feature_words = words_dict(all_words_list, deleteN, stopwords_set)
        train_feature_list, test_feature_list = TextFeatures(train_data_list, test_data_list, feature_words)
        test_accuracy = TextClassifier(train_feature_list, test_feature_list, train_class_list, test_class_list)
        test_accuracy_list.append(test_accuracy)
    plt.figure()
    plt.plot(deleteNs, test_accuracy_list)
    plt.title('Relationship of deleteNs and test_accuracy')
    plt.xlabel('deleteNs')
    plt.ylabel('test_accuracy')
    plt.show()
    """
    feature_words = words_dict(all_words_list, 450, stopwords_set)
    train_feature_list, test_feature_list = TextFeatures(train_data_list, test_data_list, feature_words)
    test_accuracy = TextClassifier(train_feature_list, test_feature_list, train_class_list, test_class_list)
    test_accuracy_list.append(test_accuracy)
    ave = lambda c: sum(c) / len(c)
    print(ave(test_accuracy_list))

結果為: 

注:

結巴分詞詞性標註常用詞性表示