1. 程式人生 > >scrapy爬取美女圖片

scrapy爬取美女圖片

setting ipa ins star font @class del 分頁 date

使用scrapy爬取整個網站的圖片數據。並且使用 CrawlerProcess 啟動。 1 # -*- coding: utf-8 -* 2 import scrapy 3 import requests

 4 from bs4 import BeautifulSoup
 5 
 6 from meinr.items import MeinrItem
 7 
 8 
 9 class Meinr1Spider(scrapy.Spider):
10     name = meinr1
11     # allowed_domains = [‘www.baidu.com‘]
12     # start_urls = [‘http://m.tupianzj.com/meinv/xiezhen/‘]
13 headers = { 14 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36, 15 } 16 def num(self,url,headers): #獲取網頁每個分類的頁數和URL格式 17 html = requests.get(url=url,headers=headers) 18 if html.status_code != 200: 19
return ‘‘,‘‘ 20 soup = BeautifulSoup(html.text,html.parser) 21 nums = soup.select(#pageNum li)[3] 22 nums = nums.select(a)[0].attrs.get(href) 23 num = str(nums[:-5]).split(_)[-1] 24 papa = str(nums[:-5]).split(_)[:-1] 25 papa =
_.join(papa)+_ 26 return int(num),papa 27 28 29 def start_requests(self):
      #這是網站的所有分類
30 urls = [http://m.tupianzj.com/meinv/xiezhen/,http://m.tupianzj.com/meinv/xinggan/,http://m.tupianzj.com/meinv/guzhuang/,http://m.tupianzj.com/meinv/siwa/,http://m.tupianzj.com/meinv/chemo/,http://m.tupianzj.com/meinv/qipao/,http://m.tupianzj.com/meinv/mm/] 31 num = 0 32 for url in urls: 33 num,papa = self.num(url,self.headers) 34 for i in range(1,num): 35 if i != 1: 36 urlzz = url + papa + str(i) + .html‘ #拼裝每頁URL 37 else: 38 urlzz = url 39 yield scrapy.Request(url=urlzz,headers=self.headers,callback=self.parse) 40 def parse(self, response): 41 # print(response.body) 42 htmllist = response.xpath(//div[@class="IndexList"]/ul[@class="IndexListult"]/li)#獲取每頁的圖集URL和title 43 # print(htmllist) 44 for html in htmllist: 45 url = html.xpath(./a/@href).extract() 46 title = html.xpath(./a/span[1]/text()).extract() 47 # print(url) 48 # print(title) 49 yield scrapy.Request(url=url[0],meta={ 50 url:url[0], 51 title:title[0]}, 52 headers=self.headers, 53 callback=self.page 54 ) 55 def page(self,response): 56 is_it = response.xpath(//div[@class="m-article"]/h1/text()).extract() 57 if is_it: 58 is_it = is_it[0].strip() 59 num = int(is_it[-4]) 60 a = 0 61 for i in range(1,int(num)): 62 a += 1 63 url = str(response.url)[:-5] + _ + str(i) + .html‘ #拼裝圖集內的URL分頁 64 yield scrapy.Request(url=url, headers=self.headers, callback=self.download, meta={ 65 url: response.meta.get(url), 66 title: response.meta.get(title), 67 num:a 68 69 },dont_filter=True) #使用了dont_filter取消去重是因為我們需要進入第一頁獲取總頁數
70 71 def download(self,response): 72 img = response.xpath("//img[@id=‘bigImg‘]/@src").extract() #獲取每個頁面裏的img 73 if img: 74 time = MeinrItem() 75 time[img] = img[0] 76 time[title] = response.meta.get(title) 77 time[num] = response.meta.get(num) 78 yield time

上面的是spider文件

import scrapy


class MeinrItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    title = scrapy.Field()
    img = scrapy.Field()
    num = scrapy.Field()
    pass

上面的是item文件

 1 import os
 2 import requests
 3 
 4 class MeinrPipeline(object):
 5     def open_spider(self,spider):#打開spider時啟動。獲取下載地址
 6         self.path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + os.sep + download
 7         # print(self.path)
 8     def process_item(self, item, spider):
 9         title = item[title]
10         img = item[img]
11         num = item[num]
12         path = self.path + os.sep + title #將圖集的title設置為每個圖集的文件夾的名字
13         if not os.path.exists(path): #沒有則創建
14             os.makedirs(path)
15         html = requests.get(url=img,headers=spider.headers).content
16         path = path + os.sep + str(num) + .jpg‘ #這是每個圖集內的圖片是第幾頁
17         with open(path,wb)as f:
18             f.write(html)
19         return item

這上面是管道文件

1 import datetime,os
2 time = datetime.datetime.now().strftime(%Y_%m_%H_%M_%S)
3 LOG_FILE = logs+ os.sep +str(time) + _ + "meinr.log"
4 LOG_LEVEL = "INFO"

這是在setting裏面的,設置的日誌信息和保存的位置以及消息的級別

 1 # -*- coding: utf-8 -*-
 2 import sys,datetime
 3 import os
 4 
 5 from meinr.spiders.meinr1 import Meinr1Spider
 6 from scrapy.crawler import CrawlerProcess
 7 from scrapy.utils.project import get_project_settings
 8 
 9 
10 process = CrawlerProcess(get_project_settings()) #這裏獲取spider裏面的setting
11 process.crawl(Meinr1Spider) #使用crawl啟動Meinr1Spider爬蟲
12 process.start() #開始運行

這是spider的啟動文件

技術分享圖片

文件格式就是這樣

Git地址 :https://github.com/18370652038/meinr.git

scrapy爬取美女圖片