Pyspider 爬蟲使用說明
一、 pyspider安裝
pip install pyspider
啟動之後如下:
Windows系統
Linux系統
二、pyspider例子
html bodydiv#container.ss-list div.main div.box div.content div.siteList ul li h3 a
將路徑加上>即可,變成形如
html>body>div#container.ss-list>div.main>div.box>div.content>div.siteList>ul>li>h3>a
注意:有時候可能需要修改某些內容,如去掉(tbody)
FireFox瀏覽器抓取結果
html>body>div.wrapper.bgf>div.school>article.schoolintro>dl.clearfix>dd>table>tbody>tr:last-child>td:last-child
實際使用時候為:
table>tr:nth-child(6)>td:nth-child(2)
備註:
tbody可能是頁面自動生成的,可以通過開啟原始碼檢視,確定是否需要去掉!
原始碼沒有<tbody>這個元素,而瀏覽器開啟是有<tobdy >
原始碼如下:
瀏覽器檢視結果,含有<tbody>這個元素。
程式碼如下:
爬取豆瓣分類資料
from pyspider.libs.base_handler import *
class Handler(BaseHandler):
crawl_config = {
}
@every(minutes=24 * 60)
def on_start(self):
self.crawl('http://movie.douban.com/tag/', callback=self.index_page)
@config(age=10 * 24 * 60 *60)
def index_page(self,response):
for each inresponse.doc('#content>div>div.article>table:nth-child(9)>tbody>tr>td>a').items():
a=each.attr.href.replace('www','movie')
self.crawl(a,callback=self.list_page)
def list_page(self,response):
#獲得電影詳細內容連結並交給下一個函式處理
for each inresponse.doc('td > .pl2 > a').items():
self.crawl(each.attr.href,callback=self.detail_page)
#翻頁,然後繼續由list_page函式處理
for each inresponse.doc('.next > a').items():
self.crawl(each.attr.href,callback=self.list_page)
def detail_page(self,response):
return {
"url": response.url,
"title":response.doc('* > * > div#wrapper >div#content > h1 > span').text(),
"rate":response.doc('.rating_num').text(),
"導演":response.doc('#info> span:nth-child(1) > span.attrs > a').text()
}
例2、爬取dir001網址的分類資訊
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2016-09-08 09:22:15
# Project: dir003
from pyspider.libs.base_handler import *
class Handler(BaseHandler):
crawl_config = {
}
@every(minutes=24 * 60)
def on_start(self):
self.crawl('http://www.dir001.com/category', callback=self.index_page)
@config(age=100 * 24 * 60 *60)
def index_page(self,response):
for each inresponse.doc('html>body>div#container.ss-category>div.main>div.box.catalogbox>div.content>dl>dd>ul>li>a').items():
self.crawl(each.attr.href,callback=self.list_page)
@config(priority=2)
def list_page(self,response):
for each inresponse.doc('html>body>div#container.ss-list>div.main>div.box>div.content>div.siteList>ul>li>h3>a').items():
self.crawl(each.attr.href,callback=self.detail_page)
for each inresponse.doc('html>body>div#container.ss-list>div.main>div.box>div.content>div.pagelink>form#pageForm>ul.yiiPager>li:last-child>a').items():
self.crawl(each.attr.href,callback=self.list_page)
def detail_page(self,response):
return {
"url":response.doc('html>body>div#container.site-content>div.box.contenInfo>div.content>div.contentMain>div.siteInfo>div.movieDetail>p>span.site-domain').text(),#url地址
"mulu":response.doc('html>body>div#container.site-content>div.box.contenInfo>div.content>div.contentMain>div.siteInfo>div.movieDetail>p:nth-child(3)').text(),#目錄
"location":response.doc('html>body>div#container.site-content>div.box.contenInfo>div.content>div.contentMain>div.siteInfo>div.movieDetail>p:nth-child(4)').text(),#地址
"Rank":response.doc('html>body>div#container.site-content>div.box.contenInfo>div.content>div.contentMain>div.siteInfo>div.movieDetail>p>b.alexarank').text(),#等級
"on_clicks":response.doc('html>body>div#container.site-content>div.box.contenInfo>div.content>div.contentMain>div.siteInfo>div.movieDetail>p>span.c_red').text(), #點選次數
"description":response.doc("html>body>div#container.site-content>div.box.contenInfo>div.content>div.contentMain>div.siteInfo>div.movieDetail>p>span.site-description").text()
}
參考文獻