Python爬蟲爬取新聞資訊案例詳解
前言
本文的文字及圖片來源於網路,僅供學習、交流使用,不具有任何商業用途,版權歸原作者所有,如有問題請及時聯絡我們以作處理。
一個簡單的Python資訊採集案例,列表頁到詳情頁,到資料儲存,儲存為txt文件,網站網頁結構算是比較規整,簡單清晰明瞭,資訊新聞內容的採集和儲存!
應用到的庫
requests,time,re,UserAgent,etree
import requests,time,re
from fake_useragent import UserAgent
from lxml import etree
列表頁面
列表頁,連結xpath解析
href_list=req.xpath('//ul[@class="news-list"]/li/a/@href')
詳情頁
內容xpath解析
h2=req.xpath('//div[@class="title-box"]/h2/text()')[0]
author=req.xpath('//div[@class="title-box"]/span[@class="news-from"]/text()')[0]
details=req.xpath('//div[@class="content-l detail"]/p/text()')
內容格式化處理
detail='\n'.join(details)
標題格式化處理,替換非法字元
pattern = r"[\/\\\:\*\?\"\<\>\|]"
new_title = re.sub(pattern,"_",title) # 替換為下劃線
儲存資料,儲存為txt文字
def save(self,h2,author,detail):
with open(f'{h2}.txt','w',encoding='utf-8') as f:
f.write('%s%s%s%s%s'%(h2,'\n',detail,author))print(f"儲存{h2}.txt文字成功!")
遍歷資料採集,yield處理
def get_tasks(self):
data_list = self.parse_home_list(self.url)
for item in data_list:
yield item
程式執行效果
程式採集效果
附原始碼參考:
# -*- coding: UTF-8 -*- import requests,re from fake_useragent import UserAgent from lxml import etree class RandomHeaders(object): ua=UserAgent() @property def random_headers(self): return { 'User-Agent': self.ua.random,} class Spider(RandomHeaders): def __init__(self,url): self.url=url def parse_home_list(self,url): response=requests.get(url,headers=self.random_headers).content.decode('utf-8') req=etree.HTML(response) href_list=req.xpath('//ul[@class="news-list"]/li/a/@href') print(href_list) for href in href_list: item = self.parse_detail(f'https://yz.chsi.com.cn{href}') yield item def parse_detail(self,url): print(f">>正在爬取{url}") try: response = requests.get(url,headers=self.random_headers).content.decode('utf-8') time.sleep(2) except Exception as e: print(e.args) self.parse_detail(url) else: req = etree.HTML(response) try: h2=req.xpath('//div[@class="title-box"]/h2/text()')[0] h2=self.validate_title(h2) author=req.xpath('//div[@class="title-box"]/span[@class="news-from"]/text()')[0] details=req.xpath('//div[@class="content-l detail"]/p/text()') detail='\n'.join(details) print(h2,detail) self.save(h2,detail) return h2,detail except IndexError: print(">>>採集出錯需延時,5s後重試..") time.sleep(5) self.parse_detail(url) @staticmethod def validate_title(title): pattern = r"[\/\\\:\*\?\"\<\>\|]" new_title = re.sub(pattern,title) # 替換為下劃線 return new_title def save(self,detail): with open(f'{h2}.txt',encoding='utf-8') as f: f.write('%s%s%s%s%s'%(h2,author)) print(f"儲存{h2}.txt文字成功!") def get_tasks(self): data_list = self.parse_home_list(self.url) for item in data_list: yield item if __name__=="__main__": url="https://yz.chsi.com.cn/kyzx/jyxd/" spider=Spider(url) for data in spider.get_tasks(): print(data)
以上就是本文的全部內容,希望對大家的學習有所幫助,也希望大家多多支援我們。