python 爬蟲--同花順-使用代理
1.http://www.goubanjia.com/ 在上面獲取 使用http協議的公網IP和端口
參考:https://blog.csdn.net/qq_23934063/article/details/79063937
2. 關鍵代碼如下:
#python3
# coding: utf-8
from bs4 import BeautifulSoup
import requests
import json
import time
import random
codelist = []
for zxcode000 in range(100,999):
code000 = ‘000‘ + str(zxcode000)
codelist.append(code000)
for zhongxiaocode in range(10,100):
code0000 = ‘0000‘ + str(zhongxiaocode)
codelist.append(code0000)
for szzhubancode in range(1,10):
code00000 = ‘00000‘ + str(szzhubancode)
codelist.append(code00000)
for stocknum00 in range(2001,2999):
stocknum00 = ‘00‘ +str(stocknum00)
codelist.append(stocknum00)
for cybcode in codelist:
proxys = [‘http://101.4.136.34:81‘, ‘http://50.224.173.189:8080‘, ‘http://119.28.195.93:8888‘,
‘http://58.240.170.108:8080‘,
‘http://140.143.96.216:80‘]
myproxy = random.choice(proxys)
try:
code600 = str(cybcode)
time.sleep(3)
url6000 = ‘http://basic.10jqka.com.cn/‘+code600+‘/finance.html‘
session = requests.Session()
session.headers = {
‘User-Agent‘: ‘Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36‘
}
session.get(‘http://basic.10jqka.com.cn‘)
session.proxies = myproxy
r1 = session.get(url6000)
rawtext1 = r1.text
rawdata = r1.content
obj = BeautifulSoup(rawdata, ‘html5lib‘)
pid = obj.findAll("p", id="main")
textPid = pid[0]
jsonData = textPid.get_text()
dictData = json.loads(jsonData)
simpleData = dictData[‘simple‘]
simpleList = simpleData[5]
rate0 = simpleList[0].split(‘%‘)[0]
rate1 = simpleList[1].split(‘%‘)[0]
rate2 = simpleList[2].split(‘%‘)[0]
growList = simpleData[7]
grow0 = growList[0].split(‘%‘)[0]
grow1 = growList[1].split(‘%‘)[0]
grow2 = growList[2].split(‘%‘)[0]
if float(rate0) > float(rate1) and float(rate1) > 35 and float(grow0) > 35 and float(grow1) > 30 and float(
grow2) > 35 and float(grow0) > float(grow1):
print(cybcode, rate0, rate1, rate2, grow0, grow1, grow2)
except:
print(myproxy,cybcode)
python 爬蟲--同花順-使用代理