1. 程式人生 > >Python 爬蟲 七夕福利

Python 爬蟲 七夕福利

mkdir ins print mage -- max src wow inpu

祝大家七夕愉快

妹子圖

 1 import requests
 2 from lxml import etree
 3 import os
 4 def headers(referer):#圖片的下載可能和頭部的referer有關,所以將referer設為變換值,以躲避反扒
 5     headers = {
 6         User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36,
 7         Referer
: {}.format(referer)} 8 return headers 9 def Tuji(pag):#找圖集 10 fullurl = http://www.mzitu.com/page/{}/.format(pag) 11 shouye_html = requests.get(fullurl) 12 shouye_html_text = shouye_html.text 13 shouye_ele = etree.HTML(shouye_html_text) 14 tj_list = shouye_ele.xpath(//*[@id="pins"]/li/a/@href
)#找每頁的圖集url 15 Tuji_url_list = [] 16 for tj_url in tj_list: 17 Tuji_url_list.append(tj_url) 18 return Tuji_url_list 19 def gettuji_info(tj_url_list):#圖集的url列表 收集圖集的相關信息 20 for tj_url_1 in tj_url_list: #tj_url_1 --- > http://www.mzitu.com/146823 21 tj_html = requests.get(tj_url_1, headers=headers(tj_url_1))
22 tj_html_text = tj_html.text 23 tj_ele = etree.HTML(tj_html_text) 24 img_title = tj_ele.xpath(//h2[@class="main-title"]/text())[0] # 圖集名稱 25 max_pag_list = int(tj_ele.xpath(/html/body/div[2]/div[1]/div[4]/a[5]/span/text())[0]) # 找最大頁數 26 if os.path.exists(img_title) == True: 27 print(!) 28 else: 29 os.mkdir(img_title) 30 for i in range(1, int(max_pag_list + 1)): 31 tj_url_2 = tj_url_1 + /+str(i) #tj_url_2 ---> http://www.mzitu.com/146823 + pag 32 tj_html = requests.get(tj_url_2, headers=headers(tj_url_1)) 33 tj_html_text = tj_html.text 34 tj_ele = etree.HTML(tj_html_text) 35 img_url = tj_ele.xpath(//div[@class="main-image"]/p/a/img/@src)[0] # 從不同的tj_url_2中找圖片的url 36 print(正在下載+img_title++str(i)+) 37 with open(img_title+/+str(i)+.jpg, "wb+") as jpg: 38 jpg.write(requests.get(img_url, headers=headers(tj_url_2)).content) 39 40 if __name__ == __main__: 41 pags = int(input(你想搞幾頁的嘿嘿?)) 42 for pag in range(1,pags+1): 43 gettuji_info(Tuji(pag))

Python 爬蟲 七夕福利