1. 程式人生 > >定製起始url(scrapy-redis)

定製起始url(scrapy-redis)

爬蟲:(在這裡不用配置start_url,直接可以取redis裡面取start_url,可以多個)
from scrapy_redis.spiders import RedisSpider
# class ChoutiSpider(scrapy.Spider):
class ChoutiSpider(RedisSpider):
name = 'baidu'##在這裡設定了這個name,那麼在redispider裡面就可以按照這個key來找到裡面對應的值(開始url,可能多個),
  #key的格式是:self.redis_key = self.redis_key % {'name': self.name}

allowed_domains = ['baidu.com']
  def parse(self, response):
  print('執行操作')
  print(response)



在settings裡面的配置:
#true的話,就是集合,false的話,就為列表
REDIS_START_URLS_AS_SET=False#預設是false,列表的格式取資料出來
如果是列表的話,取資料是lpop(key),key就是下面的這個
如果是集合的話,集合取資料是spop(key),例如:spop('baidu:start_urls')>>後面就是對應的全部的開始url(可以多個)
# REDIS_START_URLS_KEY = '%(name)s:start_urls'#不設定預設是這個,這個是存入redis裡面的key,可以根據這來取value,例如:baidu:start_urls

redis存入開始rul:
在了一個.py檔案裡面存入開始url
列表:
import  redis
conn=redis.Redis(host='127.0.0.1',port=6379)
conn.lpush('baidu:start_urls','http://www.baidu.com')
如果是settings裡面:
REDIS_START_URLS_AS_SET=False的話,就是列表的形式,存入就是lpush或者是rpush等操作

如果是true的話,那麼存入就是集合的形式,sadd等操作

集合:
import  redis
conn=redis.Redis(host='127.0.0.1',port=6379)
conn.sadd('baidu:start_urls','http://www.baidu.com')##按照這個格式來存資料的
print(conn.smembers('baidu:start_urls'))
 


spider-redis裡面的spider原始碼分析:
class RedisMixin(object):
"""Mixin class to implement reading urls from a redis queue."""
redis_key = None
redis_batch_size = None
redis_encoding = None

# Redis client placeholder.
server = None

def start_requests(self):
"""Returns a batch of start requests from redis."""
return self.next_requests()

def setup_redis(self, crawler=None):
"""Setup redis connection and idle signal.

This should be called after the spider has set its crawler object.
"""
if self.server is not None:
return

if crawler is None:
# We allow optional crawler argument to keep backwards
# compatibility.
# XXX: Raise a deprecation warning.
crawler = getattr(self, 'crawler', None)

if crawler is None:
raise ValueError("crawler is required")

settings = crawler.settings

#####去配置檔案裡面那這個其始url,START_URLS_KEY = '%(name)s:start_urls',如果沒有配置檔案的話,就讀取後面部分
if self.redis_key is None:
self.redis_key = settings.get(
'REDIS_START_URLS_KEY', defaults.START_URLS_KEY,
)##在這裡可以自己設定這個格式,REDIS_START_URLS_KEY在settigs裡面設定成自己想要儲存的格式,注意:自己就按照這個
,格式進行儲存,下面就以這個格式作為鍵進行查詢到相對應的全部的開始url


self.redis_key = self.redis_key % {'name': self.name}####在這裡設定這個name的redis查詢的key,如果在redis裡面有這個key存在的話,就取出裡面的值進行查詢
'''
所以可以自己在新增開始到這個name裡面去,這個key格式是固定的,START_URLS_KEY = '%(name)s:start_urls'''
######寫入redis的這個key裡面存進去,裡面可以放url,多個,然後拿到多個開始的url
if not self.redis_key.strip():
raise ValueError("redis_key must not be empty")

if self.redis_batch_size is None:
# TODO: Deprecate this setting (REDIS_START_URLS_BATCH_SIZE).
self.redis_batch_size = settings.getint(
###取配置檔案裡面取值,後面是int的型別,轉化為int的型別
'REDIS_START_URLS_BATCH_SIZE',
settings.getint('CONCURRENT_REQUESTS'),
)

try:
self.redis_batch_size = int(self.redis_batch_size)
except (TypeError, ValueError):
raise ValueError("redis_batch_size must be an integer")

if self.redis_encoding is None:
self.redis_encoding = settings.get('REDIS_ENCODING', defaults.REDIS_ENCODING)

self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
"(batch size: %(redis_batch_size)s, encoding: %(redis_encoding)s",
self.__dict__)

self.server = connection.from_settings(crawler.settings)
# The idle signal is called when the spider has no requests left,
# that's when we will schedule new requests from redis queue
crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)

def next_requests(self):
"""Returns a request to be scheduled or none."""
use_set = self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET)
fetch_one = self.server.spop if use_set else self.server.lpop
##做了判斷,如果是REDIS_START_URLS_AS_SET=True得話,那麼就為集合
##做了判斷,如果是REDIS_START_URLS_AS_SET=False得話,那麼就為列表
# XXX: Do we need to use a timeout here?
found = 0
# TODO: Use redis pipeline execution.
####在下面進行尋找,如果存在這個redis_key的話,就執行,有多個就執行多個其實url,


''''
下面是一直迴圈著,看有沒有其實url,在redis裡面,這個格式是,REDIS_START_URLS_KEY = '%(name)s:start_urls'>>
當為false的時候,就是以列表的形式查詢
conn.lpush('baidu:start_urls','http://www.baidu.com')
當true的時候,就是集合

'''
while found < self.redis_batch_size:
data = fetch_one(self.redis_key)##可能是spop或者是lpop
if not data:
# Queue empty.
break
req = self.make_request_from_data(data)
if req:
yield req
found += 1
else:
self.logger.debug("Request not made from data: %r", data)

if found:
self.logger.debug("Read %s requests from '%s'", found, self.redis_key)

def make_request_from_data(self, data):
"""Returns a Request instance from data coming from Redis.

By default, ``data`` is an encoded URL. You can override this method to
provide your own message decoding.

Parameters
----------
data : bytes
Message from redis.

"""
url = bytes_to_str(data, self.redis_encoding)
return self.make_requests_from_url(url)

def schedule_next_requests(self):
"""Schedules a request if available"""
# TODO: While there is capacity, schedule a batch of redis requests.
for req in self.next_requests():
self.crawler.engine.crawl(req, spider=self)

def spider_idle(self):
"""Schedules a request if available, otherwise waits."""
# XXX: Handle a sentinel to close the spider.
self.schedule_next_requests()
raise DontCloseSpider