1. 程式人生 > >基於樸素貝葉斯的垃圾郵件過濾

基於樸素貝葉斯的垃圾郵件過濾

1.文字切分 

#對於一個文字字串,可以使用Python的string.split()方法將其切分
mySent = 'This book is the best book on python or M.L. I have ever laid eyes upon'
words = mySent.split(' ')
#Python中有一些內嵌的方法,可以將字串全部轉換成小寫(.lower())或者大寫(.upper())
[a.lower() for a in words]
>>['this','book','is','the','best','book','on','python','or','m.l.','i','have','ever','laid','eyes','upon']

#上面標點符號也被當成了詞的一部分。可以使用正則表示式來切分句子,其中分隔符是除單詞、數字外的任意字串
import re
words = re.split(r'\W*',mySent)
[a.lower() for a in words if len(a)>0 ]#只返回長度大於0的字串
>>['this','book','is','the','best','book','on','python','or','m','l','i','have','ever','laid','eyes','upon']

2.貝葉斯相關函式

#構建詞表
def vocabularyTable(dataSet):
    vocabSet = set([])
    for document in dataSet:
        vocabSet = vocabSet | set(document)
    return list(vocabSet)

#構建詞向量
def doc2vec(vocabSet,document):
    docVec = [0]*len(vocabSet)
    for word in document:
        if (word in vocabSet):
            docVec[vocabSet.index(word)] = 1
    return docVec   

#訓練貝葉斯分類器
import numpy as np
import math
def trainBayes(trainVec,classLabel):
    numData = len(trainVec)
    numWords = len(trainVec[0])
    pAbusive = sum(classLabel)/float(numData)
    p0num = np.ones(numWords); p1num = np.ones(numWords)
    p0sum = 2; p1sum = 2
    p1Vect =np.array([0]*numWords); p0Vect = np.array([0]*numWords)
    for i in range(numData):
        if(classLabel[i]==1):
            p1num += trainVec[i]
            p1sum += sum(trainVec[i])
        else:
            p0num += trainVec[i]
            p0sum += sum(trainVec[i])
    for i in range(numWords):
        p1Vect[i] = math.log(p1num[i]/p1sum)
        p0Vect[i] = math.log(p0num[i]/p0sum)
    return pAbusive,p1Vect,p0Vect

#貝葉斯分類函式
def classify(docVec,pClass1,p1Vect,p0Vect):
    p1 = sum(docVec * p1Vect)+math.log(pClass1)
    p0 = sum(docVec *p0Vect) +math.log(1-pClass1)
    if(p1>p0):
        return 1
    else:
        return 0

3、檔案解析

def textParse(email):
    import re
    words = re.split(r'\W*',email)
    return [a.lower() for a in words if(len(a)>2)]

4、垃圾郵件測試

def spamTest():
    docList = []; classList = []
    for i in range(1,26):
        wordList = textParse(open('email/spam/%d.txt'%i).read())
        docList.append(wordList)
        classList.append(1)
        wordList = textParse(open('email/ham/%d.txt'%i).read())
        docList.append(wordList)
        classList.append(0)
    vocabList = vocabularyTable(docList)
    
    #隨機抽取10封郵件用來測試
    trainingSet = list(range(50)); testSet = []
    for i in range(10):
        #這地方不能用50,只能用len(trainingSet),因為刪除一個元素之後只有49個元素了,
        #索引最大值為48,下一次迴圈若生成的隨機數為49則超過了索引
        randIndex = int(np.random.uniform(0,len(trainingSet)))
        testSet.append(trainingSet[randIndex])
        del(trainingSet[randIndex])
    trainMat = []; trainClass = []
    for index in trainingSet:
        trainMat.append(doc2vec(vocabList,docList[index]))
        trainClass.append(classList[index])

    #訓練
    pSpam,p1Vect,p0Vect = trainBayes(trainMat,trainClass)

    #測試
    errorCount = 0
    for index in testSet:
        wordVect = doc2vec(vocabList,docList[index])
        if(classify(np.array(wordVect),pSpam,p1Vect,p0Vect)!= classList[index]):
            errorCount+=1
            print('the real class is %d,the predict class is %d'%(classList[index],classify(np.array(wordVect),pSpam,p1Vect,p0Vect)))
            print(docList[index])    
    print('the erroe rate is %.2f'%(float(errorCount)/len(testSet)))

spamTest()
>>the erroe rate is 0.00
spamTest()
>>the real class is 1,the predict class is 0
['home', 'based', 'business', 'opportunity', 'knocking', 'your', 'door', 'don抰', 'rude', 'and', 'let', 'this', 'chance', 'you', 'can', 'earn', 'great', 'income', 'and', 'find', 'your', 'financial', 'life', 'transformed', 'learn', 'more', 'here', 'your', 'success', 'work', 'from', 'home', 'finder', 'experts']
the erroe rate is 0.10

因為這些電子郵件是隨機選擇的,所以每次的輸出結果可能有些差別。比如上面執行的結果,第一次錯誤率為0,第二次錯誤率為0.1,即10封郵件中有一封預測錯誤,多執行幾次發現這裡的錯誤大部分都是將垃圾郵件誤判為正常郵件。相比之下,將垃圾郵件誤判為正常郵件要比將正常郵件歸到垃圾郵件好。