Python抓取電視劇《天盛長歌》豆瓣短評,並製作成詞雲。
阿新 • • 發佈:2018-12-09
最近在看《天盛長歌》,才看了30多集,感覺裡邊的劇情還是很有深度,每個反派都是智商線上,劇情也是環環相扣,以至於每個鏡頭給了哪些特寫我都要細細斟酌一番。不過可能劇情是根據小說改編,所以部分劇情有些老套,而且因為節奏有點慢,劇情過多,光是大皇子領盒飯就用了20集。目前來說不喜歡韶寧公主有關的劇情,不知道她後邊的劇情怎麼發展,配角選的也是十分用心了,喜歡珠茵姐姐,可惜十幾集就領盒飯了,而且還有點不值,蘭香院的姑娘們顏值也是線上的!和別的劇比起來,真真是美的各有千秋。
一、抓取資料 首先要抓取豆瓣影評,豆瓣比較奇怪,即使登陸了也只能抓取480條短評,不登陸可以抓200條短評,所以我就抓取了不登陸的200條(還不會用登陸抓取)。 上程式碼:
#coding=utf-8
import requests
from lxml import etree
import random
import pymysql
# 獲取網頁內容
def geturl(url,IP_pools):
USER_AGENTS = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36" ,
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36"
]
Agent_Value = random.choice(USER_AGENTS)
headers = {
"User-Agent":Agent_Value ,
"Host" : "movie.douban.com",
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
}
try:
# 我使用了代理,在代理池中隨機獲取驗證成功的IP
ip_one = random.choice(IP_pools)
print(ip_one)
proxies1 = {'http': "http://" + ip_one}
print(url)
r = requests.get(url=url, headers=headers, proxies=proxies1, timeout=5)
print(r.status_code)
assert r.status_code == 200
return etree.HTML(r.content)
except:
print("**"*20+"錯誤!"+"**"*20)
# 從資料庫的代理池取出全部驗證成功的IP,並存入列表中
def get_IP():
con = pymysql.connect(host='192.111.111.111', user='root', passwd='111111', db='databace', port=3306,charset='utf8')
if con:
print("ok")
cur = con.cursor()
if cur:
sql_read = "select IP,port from ip_pool where score = %s "
cur.execute(sql_read, "T")
con.commit()
lines = cur.fetchall()
a_list = []
for i in lines:
li = i[0] + ":" + i[1]
# print(li)
a_list.append(li)
return a_list
else:
print("開啟遊標失敗!")
cur.close()
else:
print("資料庫開啟失敗!")
con.close()
IP_pools = get_IP()
def TSCG():
for i in range(0,220,20):
url = "https://movie.douban.com/subject/26761328/comments?start=" + str(i)
# 直接獲取每頁的短評
res = geturl(url,IP_pools).xpath('//span[@class="short"]/text()')
for a in res:
a = a.strip().replace("\n","")
with open("output/tianshengchangge_200.txt","a",encoding="utf-8") as fw:
fw.write(a + "\n")
# 抓取短評
TSCG()
二、處理資料
1、抓到資料後要先製作停止詞;停止詞的txt檔案在下一篇文章裡
# 製作停止詞
def make_stopdict():
stopdict = set()
#網上下載來的停止詞文字
with open("stopwords.txt","r",encoding="utf-8") as fr:
lines = fr.readlines()
for l in lines:
stopdict.add(l.strip())
print(type(stopdict))
return stopdict
2、去掉停止詞,使用了 jieba(停止詞就是類似於“的、啊、是”等這型別的詞。詳情請百度)
import jieba.analyse
import re
def removeStopwords():
stopdict = make_stopdict()
# 使用jieba分詞去掉停止詞
zhongwen_pat = re.compile(r'^[\u4e00-\u9fa5a-zA-Z]+$')
all_content = []
# 從檔案中取出短評內容
with open("output/tianshengchangge_200.txt","r",encoding="utf-8") as fr:
review = fr.readlines()
for t in review:
cut_list = [c for c in jieba.cut(t) if zhongwen_pat.search(c)]
cut_set = set(cut_list)
res_set = cut_set - stopdict
res_list = list(res_set)
all_content.extend(res_list)
for a in res_list:
a = a.strip().replace("\n","")
with open("output/douban1.txt","a",encoding="utf-8") as fw:
fw.write(a + "\n")
return("output/douban1.txt")
3、使用jieba分詞分析短評中各個詞的出現頻率
def get_top_keywords():
file = "output/douban1.txt"
top_word_lists = [] # 關鍵詞列表,待填充
with open(file,'r',encoding="utf-8") as f:
texts = f.read() # 讀取整個檔案作為一個字串
result = jieba.analyse.textrank(texts,topK=100,withWeight=True) #保留最高頻的100個詞
keywords = dict()
for i in result:
keywords[i[0]]=i[1]
print(keywords)
return keywords
4、製作詞雲
from wordcloud import WordCloud
import matplotlib.pyplot as plt
# 製作詞雲
def makeWordcloud():
keywords = get_top_keywords()
color_mask = imread("output/mark2.jpg") # 讀取背景圖片,
cloud = WordCloud(
# 設定字型,不指定就會出現亂碼,檔名不支援中文(選擇系統預設就好)
font_path="C:/Windows/Fonts/simfang.ttf",
# 設定背景色,預設為黑,可根據需要自定義為顏色
background_color='white',
# 詞雲背景,
mask=color_mask,
# 允許最大詞彙
max_words=100,
# 最大號字型,如果不指定則為影象高度
max_font_size=80,
# 畫布寬度和高度,如果設定了msak則不會生效
width=600, height=400, margin=2,
# 詞語水平擺放的頻率,預設為0.9.即豎直襬放的頻率為0.1
prefer_horizontal=0.4)
wc = cloud.generate_from_frequencies(keywords) # 產生詞雲
wc.to_file("output/Wc_tscg4.png") # 儲存圖片
# 顯示詞雲圖片
plt.imshow(wc)
# 不現實座標軸
plt.axis('off')
# 繪製詞雲
plt.show()
# 製作詞雲
makeWordcloud()
5、最終效果