1. 程式人生 > >python爬微博

python爬微博

2.x nor request 設置代理 post eat 正在 create 內容

# -*- coding: utf-8 -*-

import urllib.request
import json

#定義要爬取的微博大V的微博ID
id=3924739974

#設置代理IP
proxy_addr="122.241.72.191:808"

#定義頁面打開函數
def use_proxy(url,proxy_addr):
    req=urllib.request.Request(url)
    req.add_header("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0
") proxy=urllib.request.ProxyHandler({http:proxy_addr}) opener=urllib.request.build_opener(proxy,urllib.request.HTTPHandler) urllib.request.install_opener(opener) data=urllib.request.urlopen(req).read().decode(utf-8,ignore) return data #獲取微博主頁的containerid,爬取微博內容時需要此id def get_containerid(url): data
=use_proxy(url,proxy_addr) content=json.loads(data).get(data) for data in content.get(tabsInfo).get(tabs): if(data.get(tab_type)==weibo): containerid=data.get(containerid) return containerid #獲取微博大V賬號的用戶基本信息,如:微博昵稱、微博地址、微博頭像、關註人數、粉絲數、性別、等級等 def get_userInfo(id): url
=https://m.weibo.cn/api/container/getIndex?type=uid&value=+id data=use_proxy(url,proxy_addr) content=json.loads(data).get(data) profile_image_url=content.get(userInfo).get(profile_image_url) description=content.get(userInfo).get(description) profile_url=content.get(userInfo).get(profile_url) verified=content.get(userInfo).get(verified) guanzhu=content.get(userInfo).get(follow_count) name=content.get(userInfo).get(screen_name) fensi=content.get(userInfo).get(followers_count) gender=content.get(userInfo).get(gender) urank=content.get(userInfo).get(urank) print("微博昵稱:"+name+"\n"+"微博主頁地址:"+profile_url+"\n"+"微博頭像地址:"+profile_image_url+"\n"+"是否認證:"+str(verified)+"\n"+"微博說明:"+description+"\n"+"關註人數:"+str(guanzhu)+"\n"+"粉絲數:"+str(fensi)+"\n"+"性別:"+gender+"\n"+"微博等級:"+str(urank)+"\n") #獲取微博內容信息,並保存到文本中,內容包括:每條微博的內容、微博詳情頁面地址、點贊數、評論數、轉發數等 def get_weibo(id,file): i=1 url = https://m.weibo.cn/api/container/getIndex?type=uid&value= + id weibo_url = https://m.weibo.cn/api/container/getIndex?type=uid&value= + id + &containerid= + get_containerid( url) + &page= + str(i) try: data = use_proxy(weibo_url, proxy_addr) content = json.loads(data).get(data) cards = content.get(cards) print(content) ‘‘‘ if(len(cards)>0): for j in range(len(cards)): print("-----正在爬取第"+str(i)+"頁,第"+str(j)+"條微博------") card_type=cards[j].get(card_type) if(card_type==9): mblog=cards[j].get(mblog) attitudes_count=mblog.get(attitudes_count) comments_count=mblog.get(comments_count) created_at=mblog.get(created_at) reposts_count=mblog.get(reposts_count) scheme=cards[j].get(scheme) text=mblog.get(text) with open(file,a,encoding=utf-8) as fh: fh.write("----第"+str(i)+"頁,第"+str(j)+"條微博----"+"\n") fh.write("微博地址:"+str(scheme)+"\n"+"發布時間:"+str(created_at)+"\n"+"微博內容:"+text+"\n"+"點贊數:"+str(attitudes_count)+"\n"+"評論數:"+str(comments_count)+"\n"+"轉發數:"+str(reposts_count)+"\n") i+=1 else: break ‘‘‘ except Exception as e: print(e) pass ‘‘‘ while True: url=https://m.weibo.cn/api/container/getIndex?type=uid&value=+id weibo_url=https://m.weibo.cn/api/container/getIndex?type=uid&value=+id+&containerid=+get_containerid(url)+&page=+str(i) try: data=use_proxy(weibo_url,proxy_addr) content=json.loads(data).get(data) cards=content.get(cards) print(content) if(len(cards)>0): for j in range(len(cards)): print("-----正在爬取第"+str(i)+"頁,第"+str(j)+"條微博------") card_type=cards[j].get(card_type) if(card_type==9): mblog=cards[j].get(mblog) attitudes_count=mblog.get(attitudes_count) comments_count=mblog.get(comments_count) created_at=mblog.get(created_at) reposts_count=mblog.get(reposts_count) scheme=cards[j].get(scheme) text=mblog.get(text) with open(file,a,encoding=utf-8) as fh: fh.write("----第"+str(i)+"頁,第"+str(j)+"條微博----"+"\n") fh.write("微博地址:"+str(scheme)+"\n"+"發布時間:"+str(created_at)+"\n"+"微博內容:"+text+"\n"+"點贊數:"+str(attitudes_count)+"\n"+"評論數:"+str(comments_count)+"\n"+"轉發數:"+str(reposts_count)+"\n") i+=1 else: break except Exception as e: print(e) pass ‘‘‘ if __name__=="__main__": file=id+".txt" get_userInfo(id) get_weibo(id,file)

python爬微博