使用tornado實現簡單爬蟲
阿新 • • 發佈:2018-12-30
程式碼在官方文件的示例程式碼中有,但是作為一個tornado新手來說閱讀起來還是有點困難的,於是我在程式碼中添加了註釋,方便理解,程式碼如下:
# coding=utf-8
#!/usr/bin/env python
import time
from datetime import timedelta
try:
from HTMLParser import HTMLParser
from urlparse import urljoin, urldefrag
except ImportError:
from html.parser import HTMLParser
from urllib.parse import urljoin, urldefrag
from tornado import httpclient, gen, ioloop, queues
# 設定要爬取的網址
base_url = 'http://www.baidu.com'
# 設定worker數量
concurrency = 10
# 此程式碼會獲取base_url下的所有其他url
@gen.coroutine
def get_links_from_url(url):
try:
# 通過異步向url發起請求
response = yield httpclient.AsyncHTTPClient().fetch(url)
print('fetched %s' % url)
# 響應如果是位元組型別 進行解碼
html = response.body if isinstance(response.body, str) \
else response.body.decode(errors='ignore')
# 構建url列表
urls = [urljoin(url, remove_fragment(new_url))
for new_url in get_links(html)]
except Exception as e:
print('Exception: %s %s' % (e, url))
# 報錯返回空列表
raise gen.Return([])
# 返回url列表
raise gen.Return(urls)
def remove_fragment(url):
#去除錨點
pure_url, frag = urldefrag(url)
return pure_url
def get_links(html):
#從html頁面裡提取url
class URLSeeker(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.urls = []
def handle_starttag(self, tag, attrs):
href = dict(attrs).get('href')
if href and tag == 'a':
self.urls.append(href)
url_seeker = URLSeeker()
url_seeker.feed(html)
return url_seeker.urls
@gen.coroutine
def main():
# 建立佇列
q = queues.Queue()
# 記錄開始時間戳
start = time.time()
# 構建兩個集合
fetching, fetched = set(), set()
@gen.coroutine
def fetch_url():
# 從佇列中取出資料
current_url = yield q.get()
try:
# 如果取出的資料在佇列中已經存在 返回
if current_url in fetching:
return
print('fetching %s' % current_url)
# 如果不存在新增到集合當中
fetching.add(current_url)
# 從新放入的連結中繼續獲取連結
urls = yield get_links_from_url(current_url)
# 將已經請求玩的url放入第二個集合
fetched.add(current_url)
for new_url in urls:
# Only follow links beneath the base URL
# 如果連結是以傳入的url開始則放入佇列
if new_url.startswith(base_url):
yield q.put(new_url)
finally:
# 佇列內資料減一
q.task_done()
@gen.coroutine
def worker():
while True:
# 保證程式持續執行
yield fetch_url()
# 將第一個url放入佇列
q.put(base_url)
# Start workers, then wait for the work queue to be empty.
for _ in range(concurrency):
# 啟動對應數量的worker
worker()
# 等待佇列資料處理完成
yield q.join(timeout=timedelta(seconds=300))
# 如果兩個集合不相等丟擲異常
assert fetching == fetched
# 列印執行時間
print('Done in %d seconds, fetched %s URLs.' % (
time.time() - start, len(fetched)))
if __name__ == '__main__':
io_loop = ioloop.IOLoop.current()
io_loop.run_sync(main)