1. 程式人生 > >鬥魚直播實時資料爬取

鬥魚直播實時資料爬取

思路

1, 解析URL

頁面解析

2, 利用爬蟲神器 bs4 和 正則表示式得到想要的資訊;
3, 進庫和本地儲存

DJango後臺展示和本地CSV(賣相太差,不發了)

Django後臺部分資料展示

* 儲存本地的CSV 直接執行 DySpyder().summary_data180() 即可*

直接上程式碼

# -*- coding: utf-8 -*-
import os
import re
import django
import urllib.request as ur

class DySpyder():

    def __init__(self):
        pass
def open_url(self, url): headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'} req = ur.Request(url=url, headers=headers) # python2,urllib.request() response = ur.urlopen(req) # python2,urllib2.urlopen() return response.read().decode('utf-8'
) def tv_spyder(self): ## 通過 xhr 請求可以看到 url = "https://www.douyu.com/directory/all/?page=1&isAjax=1" data = self.open_url(url) from bs4 import BeautifulSoup cate = ['', 'serach_lastli', 'last','lastserach_lastli'] # - - s- l - ll 6loop soup1 = BeautifulSoup(data, 'html.parser'
) soup = soup1.find("ul", id='live-list-contentbox') res = [] for c in cate: tmp = soup.findAll('li', c) res.extend(tmp) return res import datetime def set_data(self, x): import datetime res = {} # title.__init__ title0 = str(x.find("h3").next_element) spans = x.findAll(["span"]) # basic info to the link tag, dy_name, dy_num = tuple([s.next_element for s in spans][2:5]) parterb = r'''.*<img data-original=(.*?) height="163" src=(.*?) width="283"/>.*''' # the urls of img and gif img, gif = re.findall(parterb, repr(x))[0] p2 = r'''.*a class="play-list-link" (.*?)=(.*?) (.*?)=(.*?) (.*?)=(.*?) (.*?)=(.*?) (.*?)=(.*?) (.*?)=(.*?) (.*?)=(.*?) (.*?)=(.*?)>.*''' t1 = [x for x in re.findall(p2, repr(x))][0] # the head of link-info for i in range(int(len(t1)/2 - 1)): res.setdefault(t1[2*i], t1[2*i+1]) res.setdefault("dt", datetime.datetime.today()) res.setdefault('tag', tag) res.setdefault('dy_name', dy_name) res.setdefault('dy_num', dy_num) res.setdefault('title0', title0) res.setdefault('img', img) res.setdefault('gif', gif) return res def summary_data180(self): l = [self.set_data(x) for x in self.tv_spyder()] import pandas as pd df_tmp = pd.DataFrame(l) df_tmp.to_csv("C:\\Users\\lenovo\\Desktop\\dy180.csv") return df_tmp #print(summary_data180()) def main(self): os.environ.setdefault("DJANGO_SETTINGS_MODULE", "minicms.settings") django.setup() from tv.models import Info from django.utils import timezone df = self.summary_data180() print(df.columns) import numpy as np array2 = np.array(df) for i in range(len(df)): Info.objects.create(data_rid=array2[i][0], data_rpos=array2[i][1], data_sid=array2[i][2], data_sub_rt=array2[i][3], data_tid=array2[i][4], dt=timezone.now(), ##修改了時間 dy_name=array2[i][6], dy_num=array2[i][7], gif=array2[i][8], href=array2[i][9], img=array2[i][10], tag=array2[i][11], target=array2[i][12], title0=array2[i][13] ) print("執行完畢") dyspyder = DySpyder() #dyspyder.main()

沒有 Django 模板, 爬取所有的模板2

import os
import re
import urllib.request as ur

class DySpyder():

    def __init__(self, url):
        self.url = url

    def open_url(self, url):
        headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
        req = ur.Request(url=url, headers=headers)  # python2,urllib.request()
        response = ur.urlopen(req)  # python2,urllib2.urlopen()
        return response.read().decode('utf-8')

    def from_url_get_all_lis(self):
        data = self.open_url(self.url)
        from bs4 import BeautifulSoup
        soup1 = BeautifulSoup(data, 'html.parser')
        soup = soup1.findAll("li")
        return soup

    def tv_spyder(self, x):
        rid = re.findall(""".*?data-rid="(.*?)".*""", str(x))[0]
        title = re.findall(""".*?title=(.*?)>.*""", str(x))[0]
        href = re.findall(""".*?href="(.*?)".*""", str(x))[0]
        pic = re.findall('''.*?<img data-original="(.*?)".*''', str(x))[0]
        tag = re.findall('''.*<span class="tag ellipsis">(.*?)</span>.*''', str(x))[0]
        name = re.findall('''.*<span class="dy-name ellipsis fl">(.*?)</span>.*''', str(x))[0]
        see_num = re.findall('''.*<span class="dy-num fr".*?>(.*?)</span>.*''', str(x))[0]
        t = rid, pic, title, tag, name, see_num, href
        return t

def get_url(page):
    return "https://www.douyu.com/directory/all?page="+ str(page) +"&isAjax=1"

res1 = []
for i in [j+1 for j in range(20)]:
    douyu = DySpyder(get_url(i))
    for x in douyu.from_url_get_all_lis():
        try:
            res1.append(list(douyu.tv_spyder(x)))
        except:
            print(x)
import pandas as pd
import numpy as np
df = pd.DataFrame(np.array(res1))
df.to_csv("demo.csv")

後續

  • 隨著時間更新, 每隔 10min 自動一次到資料庫——可以獲取 Tag 或者使用者的規律
  • 增加 虎牙-戰旗-龍珠 的資料
  • 增加圖片進庫和自己定義的頁面實時優化; 實現多直播平臺的歸一化推薦

Django 存庫模板

from django.db import models

# Create your models here.

class Info(models.Model):
    data_rid = models.CharField("房間ID", max_length=20)
    data_rpos = models.CharField("", max_length=20)
    data_sid= models.CharField("", max_length=20)
    data_sub_rt = models.CharField("", max_length=20)
    data_tid = models.CharField("", max_length=20)
    dt = models.DateTimeField("時間")
    dy_name = models.CharField("賬號名字", max_length=50)
    dy_num = models.CharField("觀看數", max_length=20)
    gif= models.CharField("GIF", max_length=120)
    href = models.CharField("房間url", max_length=20)
    img = models.CharField("IMG_url", max_length=120)
    tag = models.CharField("標籤", max_length=120)
    target = models.CharField("目標", max_length=20)
    title0 = models.CharField("標題", max_length=120)

    def __str__(self):
        return self.dy_name + "_" + self.title0

    class Meta:
        verbose_name = '鬥魚時間資訊'
        verbose_name_plural = '鬥魚時間資訊180條'

class ImgTools(models.Model):
    img_url = models.URLField(verbose_name="線上路徑")
    dt = models.DateTimeField("時間")
    data_rid = models.CharField("房間ID", max_length=20)
    upload_to = models.URLField(verbose_name="本地路徑")

TXT 爬取更新

def find_min(nums):
    for i in range(len(nums)):
        if nums[i+1] > nums[i]:
            return i, nums[i]

def set_urls(book_id):
    url = "http://www.biqudu.com/" + book_id + "/"
    partern = r".*<dd> <a href=(.*?)>(.*?)</a></dd>.*"
    import pandas as pd
    import numpy as np
    ## 本方法不能分卷, 後續補上相關的事情優化
    df1 = pd.DataFrame(np.array(re.findall(partern, open_url(url))), columns=["url", "title"])
    df1["num"] = [int(list(re.findall(r".*/(.*?).html", x))[0]) for x in df1["url"]]
    ####### find all-span
    start_index = find_min(df1["num"])[0]
    return df1[start_index: len(df1)]


# 為單獨一個小說頁面爬取;txt 文件; content;
def detail():
    url = "http://www.biqudu.com/21_21470/1394112.html"
    data = open_url(url)
    from bs4 import BeautifulSoup
    soup = BeautifulSoup(data, 'html.parser')
    content = soup.findAll('div', id="content")[0]
    return content

# print(detail())


def test(request):
    content = detail()
    return render(request, "base_test.html", {"content": content})

* 近期會花精力弄微信小程式, 爬蟲放置一段時間。 ==== END ====*