python爬蟲構建代理ip池抓取資料庫。
阿新 • • 發佈:2020-09-01
爬蟲的小夥伴,肯定經常遇到ip被封的情況,而現在網路上的代理ip免費的已經很難找了,那麼現在就用python的requests庫從爬取代理ip,建立一個ip代理池,以備使用。
本程式碼包括ip的爬取,檢測是否可用,可用儲存,通過函式get_proxies可以獲得ip,如:{'HTTPS': '106.12.7.54:8118'}
下面放上原始碼,並詳細註釋:
import requests from lxml import etree from requests.packages import urllib3 import random, time urllib3.disable_warnings() def spider(pages, max_change_porxies_times=300): """ 抓取 XiciDaili.com 的 http型別-代理ip-和埠號 將所有抓取的ip存入 raw_ips.csv 待處理, 可用 check_proxies() 檢查爬取到的代理ip是否可用 ----- :param pages:要抓取多少頁 :return:無返回 """ s = requests.session() s.trust_env = False s.verify = False urls =com/nn/{}' proxies = {} try_times = 0 for i in range(pages): url = urls.format(i + 1) s.headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Connection': 'keep-alive', 'Referer': urls.format(i if i > 0 else ''), 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36'} while True: content = s.get(url, headers=s.headers, proxies=proxies) time.sleep(random.uniform(1.5, 4)) # 每讀取一次頁面暫停一會,否則會被封 if content.status_code == 503: # 如果503則ip被封,就更換ip proxies = get_proxies() try_times += 1 print(f'第{str(try_times):0>3s}次變更,當前{proxies}') if try_times > max_change_porxies_times: print('超過最大嘗試次數,連線失敗!') return -1 continue else: break # 如果返回碼是200 ,就跳出while迴圈,對爬取的頁面進行處理 print(f'正在抓取第{i+1}頁資料,共{pages}頁') for j in range(2, 102): # 用簡單的xpath提取http,host和port tree = etree.HTML(content.text) http = tree.xpath(f'//table[@id="ip_list"]/tr[{j}]/td[6]/text()')[0] host = tree.xpath(f'//table[@id="ip_list"]/tr[{j}]/td[2]/text()')[0] port = tree.xpath(f'//table[@id="ip_list"]/tr[{j}]/td[3]/text()')[0] check_proxies(http, host, port) # 檢查提取的代理ip是否可用 def check_proxies(http, host, port, test_url='http://www.baidu.com'): """ 檢測給定的ip資訊是否可用 根據http,host,port組成proxies,對test_url進行連線測試,如果通過,則儲存在 ips_pool.csv 中 :param http: 傳輸協議型別 :param host: 主機 :param port: 埠號 :param test_url: 測試ip :return: None """ proxies = {http: host + ':' + port} try: res = requests.get(test_url, proxies=proxies, timeout=2) if res.status_code == 200: print(f'{proxies}檢測通過') with open('ips_pool.csv', 'a+') as f: f.write(','.join([http, host, port]) + '\n') except Exception as e: # 檢測不通過,就不儲存,別讓報錯打斷程式 print(e) def check_local_ip(fn, test_url): """ 檢查存放在本地ip池的代理ip是否可用 通過讀取fn內容,載入每一條ip對test_url進行連線測試,連結成功則儲存在 ips_pool.csv 檔案中 :param fn: filename,儲存代理ip的檔名 :param test_url: 要進行測試的ip :return: None """ with open(fn, 'r') as f: datas = f.readlines() ip_pools = [] for data in datas: # time.sleep(1) ip_msg = data.strip().split(',') http = ip_msg[0] host = ip_msg[1] port = ip_msg[2] proxies = {http: host + ':' + port} try: res = requests.get(test_url, proxies=proxies, timeout=2) if res.status_code == 200: ip_pools.append(data) print(f'{proxies}檢測通過') with open('ips_pool.csv', 'a+') as f: f.write(','.join([http, host, port]) + '\n') except Exception as e: print(e) continue def get_proxies(ip_pool_name='ips_pool.csv'): """ 從ip池獲得一個隨機的代理ip :param ip_pool_name: str,存放ip池的檔名, :return: 返回一個proxies字典,形如:{'HTTPS': '106.12.7.54:8118'} """ with open(ip_pool_name, 'r') as f: datas = f.readlines() ran_num = random.choice(datas) ip = ran_num.strip().split(',') proxies = {ip[0]: ip[1] + ':' + ip[2]} return proxies if __name__ == '__main__': t1 = time.time() spider(pages=3400) t2 = time.time() print('抓取完畢,時間:', t2 - t1) # check_local_ip('raw_ips.csv','http://www.baidu.com')