Scrapy-splash not allowing infinite scroll to complete

早过忘川 提交于 2019-12-13 03:24:56

问题


I am scraping a used car dealer website that has some javascript on the car listing pages hence using scrapy-splash.

The car dealer webpages also have infinite scroll until all their cars are listed.

The problem i am having is that on some occasions the code below does not let the infinite scroll continue to the end - and i am not sure why - and so i miss some of the cars.

I have reduced the Concurrent requests right back to 1 in the Settings file and therefore i know that i at least start to scrap all the car dealers on the start_url page.

any ideas i am missing?

Settings File code:

# -*- coding: utf-8 -*-

# Scrapy settings for autotrader1 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     http://doc.scrapy.org/en/latest/topics/settings.html
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'autotrader1'

SPIDER_MODULES = ['autotrader1.spiders']
NEWSPIDER_MODULE = 'autotrader1.spiders'

SPLASH_URL = 'http://192.168.99.100:8050/'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'autotrader1 (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 1

# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'autotrader1.middlewares.Autotrader1SpiderMiddleware': 543,
#}

SPIDER_MIDDLEWARES = {
    'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}

# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'autotrader1.middlewares.MyCustomDownloaderMiddleware': 543,
#}
DOWNLOADER_MIDDLEWARES = {
    'scrapy_splash.SplashCookiesMiddleware': 723,
    'scrapy_splash.SplashMiddleware': 725,
    'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}


# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {'autotrader1.pipelines.CSVPipeline': 300 }

# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'

DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'

Scrapy Code:

import scrapy
from scrapy_splash import SplashRequest

from urllib.parse import urljoin
from autotrader1.items import Autotrader1Item


script = """
function main(splash)
    local scroll_delay = 2 # i have tried to vary this number with some success
    local is_down = splash:jsfunc(
        "function() { return((window.innerHeight + window.scrollY) >= document.body.offsetHeight);}"
        )

    local scroll_to = splash:jsfunc("window.scrollTo")
    local get_body_height = splash:jsfunc(
        "function() {return document.body.scrollHeight;}"
    )
    assert(splash:go(splash.args.url))

    while not is_down() do
        scroll_to(0, get_body_height())
        splash:wait(scroll_delay)
    end        
    return splash:html()
end
"""


class Autotrader1Spider(scrapy.Spider):
    name = "autotrader1"
    allowed_domains = ["autotrader.co.uk"]  
    start_urls = ["http://www.autotrader.co.uk/car-dealers/search?channel=cars&postcode=M4+3AQ&radius=1501&forSale=on&toOrder=on&page=1"]

## this gets the URLs from the start_url page and re-writes them to get straight to the dealer listing page
    def parse(self,response):
        for href in response.xpath('//*[@class="dealerList__itemName"]/a[@class="dealerList__itemUrl tracking-standard-link"]/@href'):
            dealer_id = href.re('([^-]*)$')[0]
            new_url = str(href.extract()) + str('/stock?sort=price-asc&onesearchad=Used&onesearchad=Nearly%20New&dealer=') + str(dealer_id) + str('&advertising-location=at_cars&page=12&advertising-location=at_profile_cars')
            url_dealer = urljoin('https://www.autotrader.co.uk/',new_url)
            yield SplashRequest(url_dealer, self.parse_second, endpoint='render.html', args={'wait':2, 'lua_source': script})            

## these are the dealer listing pages with javascript and infinite scrolling
    def parse_second(self,response):
        item = Autotrader1Item()
        for sel in response.xpath('//div[@class="app-root"]'):

            name = sel.xpath('//*[@class="dealer-stock-view-header"]/span/h1/text()[4]').extract()
            item['name'] = name
            item['page_type'] = "normal"
            item['cars'] = sel.xpath('//*[@class="dealer_stock_search"]/div/div/h2/text()[1]').extract()
            URLs = sel.xpath('//*[@class="stock-view-listing"]/../@href').extract()
            URLs = [URL.strip() for URL in URLs]
            descriptions = sel.xpath('//*[@class="stock-view-listing"]/div[@class="information-container"]/span[@class="information"]/h2/text()').extract()
            descriptions = [description.strip() for description in descriptions]
            prices = sel.xpath('//*[@class="price-container"]/h2').re(r'<h2>(.*?)</h2>')
            prices = [price.strip() for price in prices]
            registrations = sel.xpath('//*[@class="stock-view-listing"]/div[@class="information-container"]/span[@class="information"]/p[1]/text()').re(r'^([^|]*)')
            registrations = [registration.strip() for registration in registrations]
            miless = sel.xpath('//*[@class="stock-view-listing"]/div[@class="information-container"]/span[@class="information"]/p[1]/text()').re(r'([^|.]+)(?=miles)')
            miless = [miles.strip() for miles in miless]
            engines = sel.xpath('//*[@class="stock-view-listing"]/div[@class="information-container"]/span[@class="information"]/p[1]/text()').re(r'(?<=miles \| )(.*?)(?= )')
            engines = [engine.strip() for engine in engines]
            transmissions = sel.xpath('//*[@class="stock-view-listing"]/div[@class="information-container"]/span[@class="information"]/p[1]/text()').re(r'^(?:[^\|]*\|){4}([^\|]*)')
            transmissions = [transmission.strip() for transmission in transmissions]
            fuels = sel.xpath('//*[@class="stock-view-listing"]/div[@class="information-container"]/span[@class="information"]/p[1]/text()').re(r'^(?:[^\|]*\|){5}([^\|]*)')
            fuels = [fuel.strip() for fuel in fuels]

            result = zip(URLs, descriptions, prices, registrations, miless, engines, transmissions, fuels)
            for URL, description, price, registration, miles, engine, transmission, fuel in result:
                item['URL'] = URL
                item['description'] = description
                item['price'] = price
                item['registration'] = registration
                item['miles'] = miles
                item['engine'] = engine
                item['transmission'] = transmission
                item['fuel'] = fuel
                yield item

回答1:


Yo need to change the render.html to execute in endpoint. see the third example here

in your case

yield SplashRequest(url_dealer, self.parse_second, endpoint='execute', args={'wait':2, 'lua_source': script})            


来源:https://stackoverflow.com/questions/50091655/scrapy-splash-not-allowing-infinite-scroll-to-complete

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!