Python爬蟲__微博某個話題的內容資料
阿新 • • 發佈:2020-08-18
1 # -*- coding: utf-8 -*- 2 # @Time : 2020/8/18 15:39 3 # @Author : Chunfang 4 # @Email : [email protected] 5 # @File : Weibo_content.py 6 # @Software: PyCharm 7 8 from urllib.parse import urlencode 9 import requests 10 from pyquery import PyQuery as pq 11 import time 12 import os 13import csv 14 import json 15 16 base_url = 'https://m.weibo.cn/api/container/getIndex?' 17 18 headers = { 19 'Host': 'm.weibo.cn', 20 'Referer': 'https://m.weibo.cn/u/2830678474', 21 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36', 22 'X-Requested-With': 'XMLHttpRequest', 23 } 24 class SaveCSV(object): 25 26 def save(self, keyword_list,path, item): 27 """ 28 儲存csv方法 29 :param keyword_list: 儲存檔案的欄位或者說是表頭 30 :param path: 儲存檔案路徑和名字 31 :param item: 要儲存的字典物件 32 :return:33 """ 34 try: 35 # 第一次開啟檔案時,第一行寫入表頭 36 if not os.path.exists(path): 37 with open(path, "w", newline='', encoding='utf-8-sig') as csvfile: # newline='' 去除空白行 38 writer = csv.DictWriter(csvfile, fieldnames=keyword_list) # 寫字典的方法 39 writer.writeheader() # 寫表頭的方法 40 41 # 接下來追加寫入內容 42 with open(path, "a", newline='', encoding='utf-8-sig') as csvfile: # newline='' 一定要寫,否則寫入資料有空白行 43 writer = csv.DictWriter(csvfile, fieldnames=keyword_list) 44 writer.writerow(item) # 按行寫入資料 45 print("^_^ write success") 46 47 except Exception as e: 48 print("write error==>", e) 49 # 記錄錯誤資料 50 with open("error.txt", "w") as f: 51 f.write(json.dumps(item) + ",\n") 52 pass 53 54 def get_page(page,title): #得到頁面的請求,params是我們要根據網頁填的,就是下圖中的Query String裡的引數 55 params = { 56 'containerid': '100103type=1&q='+title, 57 'page': page,#page是就是當前處於第幾頁,是我們要實現翻頁必須修改的內容。 58 'type':'all', 59 'queryVal':title, 60 'featurecode':'20000320', 61 'luicode':'10000011', 62 'lfid':'106003type=1', 63 'title':title 64 } 65 url = base_url + urlencode(params) 66 print(url) 67 try: 68 response = requests.get(url, headers=headers) 69 if response.status_code == 200: 70 print(page) 71 return response.json() 72 except requests.ConnectionError as e: 73 print('Error', e.args) 74 75 # 解析介面返回的json字串 76 def parse_page(json , label): 77 res = [] 78 if json: 79 items = json.get('data').get('cards') 80 for i in items: 81 if i == None: 82 continue 83 item = i.get('mblog') 84 if item == None: 85 continue 86 weibo = {} 87 weibo['id'] = item.get('id') 88 weibo['label'] = label 89 weibo['text'] = pq(item.get('text')).text().replace(" ", "").replace("\n" , "") 90 res.append(weibo) 91 return res 92 93 if __name__ == '__main__': 94 95 title = input("請輸入搜尋關鍵詞:") 96 path = "article.csv" 97 item_list = ['id','text', 'label'] 98 s = SaveCSV() 99 for page in range(10,20):#迴圈頁面 100 try: 101 time.sleep(1) #設定睡眠時間,防止被封號 102 json = get_page(page , title ) 103 results = parse_page(json , title) 104 if requests == None: 105 continue 106 for result in results: 107 if result == None: 108 continue 109 print(result) 110 s.save(item_list, path , result) 111 except TypeError: 112 print("完成") 113 continue