1. 程式人生 > 其它 >python爬取CVPR論文標題、作者、pdf連結並儲存到MySQL資料庫

python爬取CVPR論文標題、作者、pdf連結並儲存到MySQL資料庫

訪問外網,程式跑的時間會長一點,耐心等待!

全部原始碼:

# -*- coding = utf-8 -*-
# @Time : 2022/5/13 9:33
# @Author :王敬博
# @File : spider.py
# @Software: PyCharm
from bs4 import BeautifulSoup  #網頁解析
import re    #正則表表達式文字匹配
import parsel as parsel
import urllib.request,urllib.error  #指定url,獲取網頁資料
import pymysql.cursors  #連線mysql資料庫
num = 1; def main(): baseurl = "https://openaccess.thecvf.com/CVPR2019?day=2019-06-18" (datalist,num) = getData(baseurl) print("爬取完畢!") #調研分析資料函式 conn(datalist,num) #呼叫儲存函式 print("儲存到資料庫!") def askURL(url): head = { #偽裝請求頭,模擬瀏覽器訪問 "User-Agent":" Mozilla / 5.0(Linux;Android6.0;Nexus5 Build / MRA58N) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 99.0.4844.51Mobile Safari / 537.36
" } request = urllib.request.Request(url,headers=head) html = "" try: response = urllib.request.urlopen(request) html = response.read().decode('utf-8') #print(html) except urllib.error.URLError as e: if hasattr(e,"code"): print(e.code)
if hasattr(e,"reason"): print(e.reason) return html #返回爬到所有的html資料 def getData(baseurl): html = askURL(baseurl) selector = parsel.Selector(html) datalist = [] titlelist = [] authorlist = [] abstractlist = [] #摘要列表 datelist = [] #時間列表 pdflist = [] #pdf連結列表 href_list = selector.xpath('//dl/dt/a/@href').getall() href_list1 = [] for href in href_list: href1 = 'https://openaccess.thecvf.com/'+href href_list1.append(href1) num = len(href_list1) for href in href_list1: href_data = askURL(href) selector_02 = parsel.Selector(href_data) paper_title =selector_02.xpath('// *[ @ id = "papertitle"]/text()').get() title = paper_title.strip() titlelist.append(title) paper_author = selector_02.xpath('//*[@id="authors"]/b/i/text()').get() authorlist.append(paper_author) papaer_abstract = selector_02.xpath('//*[@id="abstract"]/text()').get() abstract = papaer_abstract.strip() abstractlist.append(abstract) paper_date = selector_02.xpath('//*[@id="authors"]').get() finddata = re.compile(r'(CVPR)(.*),') paper_date = str(re.findall(finddata,paper_date)) date = paper_date.replace("[('CVPR', '),"," ") date = date.replace("')]","") date = date.strip() datelist.append(date) pdflink = selector_02.xpath('//*[@id="content"]/dl/dd/a[1]').get() findpdflink = re.compile(r'<a href="../../(.*?)">pdf</a>') pdflink = str(re.findall(findpdflink, pdflink)) pdflink = pdflink.replace("['", "") pdflink = pdflink.replace("']", "") pdflink = "https://openaccess.thecvf.com/" + pdflink pdflist.append(pdflink) for i in range(0,len(href_list1)): print(f"--------------正在爬取第{i}條--------------") data = [] data.append(titlelist[i]) data.append(authorlist[i]) data.append(abstractlist[i]) data.append(pdflist[i]) data.append(datelist[i]) datalist.append(data) return datalist,num def conn(datalist,num): #改成自己的資料庫資訊即可 conn = pymysql.connect(host='localhost',user='root',password='1767737316.',database='paperdata',cursorclass=pymysql.cursors.DictCursor) cursor = conn.cursor() for i in range(0,num): print(f"--------------正在儲存第{i+1}條--------------") list = datalist[i] data1 = tuple(list) #print(data1) sql = 'insert into paper(title,author,abstract,pdflink,date1) values(%s,%s,%s,%s,%s)' #五個字串對應MySQL的列名 # (2)準備資料 ,此外設定的字元長度一定要大一點。 # (3)操作 try: cursor.execute(sql, data1) conn.commit() except Exception as e: print('插入資料失敗', e) conn.rollback() # 回滾 # 關閉遊標 # cursor.close() # 關閉連線 # conn.close()
if __name__ == "__main__": main()

截圖