python开源爬虫框架scrapy源码解析(四)

走远了吗. 提交于 2019-12-10 01:42:50

引擎是整个scrapy的核心控制和调度scrapy运行.Engineopen_spider方法完成了一些初始化,以及启动调度器获取种子队列以及去重队列.最后调用self._nest_request开始一次爬取过程.

    @defer.inlineCallbacks
    def open_spider(self, spider, start_requests=(), close_if_idle=True):
        assert self.has_capacity(), "No free spider slot when opening %r" % \
            spider.name
        logger.info("Spider opened", extra={'spider': spider})
        nextcall = CallLaterOnce(self._next_request, spider)  #首先获取next调用。
        scheduler = self.scheduler_cls.from_crawler(self.crawler)  #从crawler获取调度器
        start_requests = yield self.scraper.spidermw.process_start_requests(start_requests, spider)
        slot = Slot(start_requests, close_if_idle, nextcall, scheduler)
        self.slot = slot
        self.spider = spider
        yield scheduler.open(spider)   #启动调度器,
        yield self.scraper.open_spider(spider)
        self.crawler.stats.open_spider(spider)
        yield self.signals.send_catch_log_deferred(signals.spider_opened, spider=spider)
        slot.nextcall.schedule()  #调用self._nest_request方法
        slot.heartbeat.start(5)

 open_spider中slot调用_next_request,    接下来我们看看_next_request ,先是通过_needs_backout(spider)判断是否需要结束爬虫然后返回~然后通过self._next_request_from_scheduler(spider)方法判断是否还有URL需要去爬.

def _next_request(self, spider):
        slot = self.slot
        if not slot:
            return
        if self.paused:
            return 
        while not self._needs_backout(spider):   #是否需要返回
            if not self._next_request_from_scheduler(spider): #是否还有URL需要爬取
                break
 
        if slot.start_requests and not self._needs_backout(spider):
            try:
                request = next(slot.start_requests)
            except StopIteration:
                slot.start_requests = None
            except Exception:
                slot.start_requests = None
                logger.error('Error while obtaining start requests',
                             exc_info=True, extra={'spider': spider})
            else:
                self.crawl(request, spider)
 
        if self.spider_is_idle(spider) and slot.close_if_idle:
            self._spider_idle(spider)

_next_request又循环通过_next_request_from_scheduler(self,spider)方法从scheduler获取下一个需要爬取的request,然后送到下载器下载页面.

def _next_request_from_scheduler(self, spider):
        slot = self.slot
        request = slot.scheduler.next_request() #从队列获取下一个待爬取的request
        if not request:
            return
        d = self._download(request, spider)  #使用download下载request
        d.addBoth(self._handle_downloader_output, request, spider)  #输出下载的response
        d.addErrback(lambda f: logger.info('Error while handling downloader output',
                                           exc_info=failure_to_exc_info(f),
                                           extra={'spider': spider}))
        d.addBoth(lambda _: slot.remove_request(request))
        d.addErrback(lambda f: logger.info('Error while removing request from slot',
                                           exc_info=failure_to_exc_info(f),
                                           extra={'spider': spider}))
        d.addBoth(lambda _: slot.nextcall.schedule())
        d.addErrback(lambda f: logger.info('Error while scheduling new request',
                                           exc_info=failure_to_exc_info(f),
                                           extra={'spider': spider}))
        return d

   接下来将接受下scrapy中下载器download是如何实现的.

ccdef _download(self, request, spider):
        slot = self.slot
        slot.add_request(request)
        def _on_success(response):
            assert isinstance(response, (Response, Request))
            if isinstance(response, Response): #如果返回的是Response对象打印日志
                response.request = request # tie request to response received
                logkws = self.logformatter.crawled(request, response, spider)
                logger.log(*logformatter_adapter(logkws), extra={'spider': spider})
                self.signals.send_catch_log(signal=signals.response_received, \
                    response=response, request=request, spider=spider)
            return response
 
        def _on_complete(_):
            slot.nextcall.schedule()
            return _
 
        dwld = self.downloader.fetch(request, spider)  #使用downloader的fetch下载request
        dwld.addCallbacks(_on_success) #添加成功回掉方法
        dwld.addBoth(_on_complete)
        return dwld



易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!