1. 程式人生 > >Scrapy三種模擬登陸策略

Scrapy三種模擬登陸策略

模擬登陸時,必須保證settings.py裡的 COOKIES_ENABLED (Cookies中介軟體) 處於開啟狀態,COOKIES_ENABLED = True 或 # COOKIES_ENABLED = False,使用Scrapy模擬登陸簡單易懂,可以說能處理大部分的登入操作。

一、策略一:直接POST資料

只要是需要提供post資料的,就可以用這種方法
# -*- coding: utf-8 -*-
import scrapy
class Renren1Spider(scrapy.Spider):
    name = "renren"
    allowed_domains = ["renren.com"]

    def start_requests(self):
        url = 'http://www.renren.com/PLogin.do'
        # FormRequest 是Scrapy傳送POST請求的方法
        yield scrapy.FormRequest(
                url = url,
                formdata = {"email"
: "[email protected]", "password" : "xxx"}, callback = self.parse_page) def parse_page(self, response): with open("renren.html", "w") as filename: filename.write(response.body)

二、策略二:正規的模擬登陸步驟

① 首先發送登入頁面的get請求,獲取到頁面裡的登入必須的引數(比如說zhihu登陸介面的 _xsrf)② 然後和賬戶密碼一起post到伺服器,登入成功
# -*- coding: utf-8 -*-
import scrapy class Renren2Spider(scrapy.Spider): name = "renren" allowed_domains = ["renren.com"] start_urls = ( "http://www.renren.com/PLogin.do", ) # 處理start_urls裡的登入url的響應內容,提取登陸需要的引數(如果需要的話) def parse(self, response): # 提取登陸需要的引數 #_xsrf = response.xpath("//_xsrf").extract()[0]
# 傳送請求引數,並呼叫指定回撥函式處理 yield scrapy.FormRequest.from_response( response, formdata = {"email" : "[email protected]", "password" : "xxx"}, "_xsrf" = _xsrf}, callback = self.parse_page ) # 獲取登入成功狀態,訪問需要登入後才能訪問的頁面 def parse_page(self, response): url = "http://www.renren.com/422167102/profile" yield scrapy.Request(url, callback = self.parse_newpage) # 處理響應內容 def parse_newpage(self, response): with open("renren.html", "w") as filename: filename.write(response.body)

三、策略三:使用儲存登陸狀態的Cookie模擬登陸

這種方法模擬登入成功率100%

# -*- coding: utf-8 -*-
import scrapy

class RenrenSpider(scrapy.Spider):
    name = "renren"
    allowed_domains = ["renren.com"]
    start_urls = (
        'http://www.renren.com/hahaha',
        'http://www.renren.com/meinv',
        'http://www.renren.com/shuaige',
    )

    cookies = {
    "anonymid" : "ixrna3fysufnwv",
    "_r01_" : "1",
    "ap" : "327550029",
    "JSESSIONID" : "abciwg61A_RvtaRS3GjOv",
    "depovince" : "GW",
    "springskin" : "set",
    "jebe_key" : "f6fb270b-d06d-42e6-8b53-e67c3156aa7e%7Cc13c37f53bca9e1e7132d4b58ce00fa3%7C1484060607478%7C1%7C1486198628950",
    "t" : "691808127750a83d33704a565d8340ae9",
    "societyguester" : "691808127750a83d33704a565d8340ae9",
    "id" : "327550029",
    "xnsid" : "f42b25cf",
    "loginfrom" : "syshome"
    }

    # 可以重寫Spider類的start_requests方法,附帶Cookie值,傳送POST請求
    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.FormRequest(url, cookies = self.cookies, callback = self.parse_page)

    # 處理響應內容
    def parse_page(self, response):
        with open("deng.html", "w") as filename:

            filename.write(response.body)
本文歸屬:博行天下 連結:https://www.jianshu.com/p/7b4486ba942f