python爬蟲——爬取豆瓣電影top250資訊並載入到MongoDB資料庫中
最近在學習關於爬蟲方面的知識,因為剛開始接觸,還是萌新,所以有什麼錯誤的地方,歡迎大家指出
from multiprocessing import Pool
from urllib.request import Request, urlopen
import re, pymongo
index = 0
class DouBanSpider(object):
client = pymongo.MongoClient('localhost')
db = client['dbmovie'] #程序池無法序列化pymongo物件,因為pymongo資料庫中含有執行緒鎖。
def __init__(self):
self.headers = {
'User-Agent': '這裡新增自己的瀏覽器代理'
,'Cookie': '豆瓣需要登入後才能訪問爬取資訊所以要加上自己的Cookie'
}
self.tool = DataTool()
def get_list_html(self, page_num):
page_num = (page_num - 1) * 25
list_url = 'https://movie.douban.com/top250?start={}'.format(page_num)
request = Request(list_url, headers=self.headers)
try:
response = urlopen(request)
except Exception as e:
print('請求失敗:地址{},原因{}'.format(list_url,e))
return None
else:
html = response.read().decode()
return html
def parse_list_html(self, html):
if html:
pattern = re.compile(r'<div class="hd">.*?<a href="(.*?)" class.*?>.*?', re.S)
detail_urls = re.findall(pattern, html)
# for detail_url in detail_urls:
# print(detail_url)
return detail_urls
else:
print('html原始碼為None')
return None
def get_detail_html(self, detail_url):
request = Request(detail_url, headers=self.headers)
try:
response = urlopen(request)
except Exception as e:
print('請求失敗:地址{},原因{}'.format(detail_url,e))
return None
else:
detail_html = response.read().decode()
return detail_html
def parse_detail_html(self, detail_html):
dic={}
data = re.findall(re.compile(
r'<h1>.*?<span property="v:itemreviewed">(.*?)</span>.*?<div id="info">.*?<span .*?><a href=.*?>(.*?)</a></span>.*?<span class=.*?><a href=.*?>(.*?)</a>.*?<span class="actor">.*?<span class=.*?><a href=.*?>(.*?)</a></span></span><br/>.*?<span class=.*?>.*?<span property=.*?>(.*?)</span><br/>.*?<span class=.*?>.*?</span> (.*?)<br/>.*?<span class=.*?>.*?</span>(.*?)<br/>.*?</span> <span .*?>(.*?)</span><br/>.*?<span class=.*?>.*?</span> <span property=.*? content=.*?>(.*?)</span>.*?<br/>.*?<span class="pl">.*?</span>(.*?)<br/>.*?', re.S), detail_html)[0]
global index
index = index+1
print(index, data)
print('影片名:', data[0])
print('導演:', data[1])
print('編劇:', data[2])
print('主演:', data[3])
print('型別:', data[4])
print('製片國家/地區:', data[5])
print('語言:', data[6])
print('上映日期:', data[7])
print('片長:', data[8])
print('又名:', data[9])
dic['影片名'] = data[0]
dic['導演'] = data[1]
dic['編劇'] = data[2]
dic['主演'] = data[3]
dic['型別'] = data[4]
dic['製片國家/地區'] = data[5]
dic['語言'] = data[6]
dic['上映日期'] = data[7]
dic['片長'] = data[8]
dic['又名'] = data[9]
self.db['movie'].insert_one(dic)
def start_spider(self, num):
i = 0
print('正在請求第{}頁'.format(num))
list_html = self.get_list_html(num)
if list_html:
detail_urls = self.parse_list_html(list_html)
if detail_urls:
for detail_url in detail_urls:
i = i+1
if i != 164: # 164 因為二十二那部電影沒有又名導致爬不出來所以手動過濾
detail_html = self.get_detail_html(detail_url)
else:
continue
if detail_html:
self.parse_detail_html(detail_html)
if __name__ == '__main__':
obj = DouBanSpider()
pool = Pool(1)
pool.map(obj.start_spider, [x for x in range(1, 10)])
pool.close()
pool.join()
執行結果:
寫爬蟲一定要有耐心,有時候爬不出來資訊很有可能是因為正則表示式寫錯了!所以要細心。