Scrapy and Selenium : only scrap two pages

99封情书 提交于 2019-12-11 08:35:32

问题


I want to crawl a website, there are more than 10 pages
every page has 10 links, the spider will get the linksdef parse():
and go the the link to crawl another data I want def parse_detail():

Please guide me how to write to crawl only two pages not all pages THX Here is my code it only crawl one pages and than the spider closed

def __init__(self):
    self.driver = webdriver.Firefox()
    dispatcher.connect(self.spider_closed, signals.spider_closed)

def parse(self, response):
    self.driver.implicitly_wait(20) 
    self.driver.get(response.url)
    sites = self.driver.find_elements_by_css_selector("")
    for site in sites:
        item = CItem()
        linkiwant = site.find_element_by_css_selector(" ") 
        start = site.find_element_by_css_selector(" ")  
        item['link'] = linkiwant.get_attribute("href") 
        item['start_date']  = start.text
        yield Request(url=item['link'], meta={'item':item}, callback=self.parse_detail)  

    #how to write to only catch 2 pages??
    i=0
    if i< 2:
        try:
            next = self.driver.find_element_by_xpath("/li[@class='p_next'][1]")   
            next_page = next.text
            if next_page == "next_page":  
                next.click()    
                self.driver.refresh()  
                yield Request(self.driver.current_url, callback=self.parse)
                i+=1
        except:
             print "page not found"     
def parse_detail(self,response):
    item = response.meta['item']
    self.driver.implicitly_wait(20)  
    self.driver.get(response.url)
    sel = Selector(response)
    sites = sel.css("")            
    for site in sites:
        item['title'] = site.css(" ").extract()[0] 
        item['titleURL'] = site.css(" ").extract()[0]
        ..
        yield item   
def spider_closed(self, spider):
    self.driver.close()

回答1:


Make i persistent:

def __init__(self):
    self.page_num = 0
    self.driver = webdriver.Firefox()
    dispatcher.connect(self.spider_closed, signals.spider_closed)
    #how to write to only catch 2 pages??
    if self.page_num < 2:
        try:
            next = self.driver.find_element_by_xpath("/li[@class='p_next'][1]")   
            next_page = next.text
            if next_page == "next_page":  
                next.click()    
                self.driver.refresh()  
                yield Request(self.driver.current_url, callback=self.parse)
                self.page_num += 1
        except:
             print "page not found"


来源:https://stackoverflow.com/questions/25215623/scrapy-and-selenium-only-scrap-two-pages

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!