问题
I make a test that crawler taobao.com by use pyppeteer. the taobao.com have identify code, which is the slider button, so I have add some method in code. but the error happend when the code running. the error info like that:
2018-11-30 18:15:32 [websockets.protocol] DEBUG: client ! failing WebSocket connection in the OPEN state: 1006 [no reason] 2018-11-30 18:15:32 [websockets.protocol] DEBUG: client - event = connection_lost(None) 2018-11-30 18:15:32 [websockets.protocol] DEBUG: client - state = CLOSED 2018-11-30 18:15:32 [websockets.protocol] DEBUG: client x code = 1006, reason = [no reason] 2018-11-30 18:15:32 [websockets.protocol] DEBUG: client - aborted pending ping: 7ac33fd3 [I:pyppeteer.connection] connection closed Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False
Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False
Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False
Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False
Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False Protocol Error (Runtime.callFunctionOn): Session closed. Most likely the page has been closed. ***********************************:slide login False
.... dead loop like above infomation.
the error happend after the slider button roll several times,(the slider button need roll more times), but it should be continue until the action success. because I set retry in the code. I wondering why the connection closed.
The core code like that
**#middlewares.py**
from scrapy import signals
from scrapy.http import HtmlResponse
from logging import getLogger
import asyncio
import time, os
from pyppeteer.launcher import launch
from seleniumtest.moveslider import mouse_slide, input_time_random
from seleniumtest.jsflagsetter import js1, js3, js4, js5
class SeleniumMiddleware():
def __init__(self,username=None, password=None, timeout=None):
self.logger = getLogger(__name__);
self.username=username;
self.password=password;
self.timeout = timeout;
print("Init downloaderMiddleware use pypputeer.")
os.environ['PYPPETEER_CHROMIUM_REVISION'] ='588429'
# pyppeteer.DEBUG = False
print(os.environ.get('PYPPETEER_CHROMIUM_REVISION'))
loop = asyncio.get_event_loop();
task = asyncio.ensure_future(self.getbrowser());
loop.run_until_complete(task);
async def getbrowser(self):
self.browser = await launch({
'headless': False,
'userDataDir':'tmp',
'args': ['--no-sandbox'],
'executablePath': "C:\\Users\\Edwin\\AppData\\Local\\Google\\Chrome\\Application\\chrome.exe",
'dumpio':True
}
)
self.page = await self.browser.newPage();
async def usePypuppeteer(self, current_page, url):
await asyncio.sleep(0.3);
await self.page.setUserAgent(
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36')
await self.page.setViewport({'width': 1366, 'height': 768 });
response = await self.page.goto(url, options={'timeout': self.timeout * 1000});
if response.status != 200:
return None;
# evaluate with script
await self.page.evaluate(js1)
await self.page.evaluate(js3)
await self.page.evaluate(js4)
await self.page.evaluate(js5)
if current_page == 1:
try:
login_text = await self.page.Jeval('.qrcode-login .login-title', 'node => node.textContent');
except Exception as e:
login_text = None;
if login_text:
if login_text == '手机扫码,安全登录':
switch_btn = await self.page.querySelector('.login-switch #J_Quick2Static');
await self.page.evaluate('(element) => element.click()', switch_btn);
else:
pass;
user_edit = await self.page.querySelector('.login-text.J_UserName');
await self.page.evaluate('(element) => element.value = ""', user_edit);
await user_edit.type(self.username, {'delay': input_time_random()});
await self.page.type('#J_StandardPwd #TPL_password_1', self.password, {'delay': input_time_random()})
time.sleep(1)
slider = await self.page.Jeval('#nocaptcha', 'node => node.style')
if slider:
flag = await mouse_slide(page=self.page)
if flag:
try:
print('******************** get logging button');
login_btn = await self.page.querySelector('#J_SubmitStatic');
await self.page.evaluate('(element) => element.click()', login_btn);
await self.page.waitForSelector('#mainsrp-itemlist .m-itemlist');
await self.get_cookie(self.page);
content = await self.page.content();
return content;
except Exception as e:
return None;
else:
return None;
else:
try:
await self.page.keyboard.press('Enter') #press enter
await self.page.waitFor(20)
await self.page.waitForSelector('#mainsrp-itemlist .m-itemlist');
content = await self.page.content();
return content;
except Exception as e:
return None;
else:
try:
input = await self.page.querySelector('#mainsrp-pager div.form > input');
submit = await self.page.querySelector('#mainsrp-pager div.form > span.btn.J_Submit');
await self.page.evaluate('(element) => element.value = ""', input);
await input.type(current_page);
await submit.click();
await self.page.waitForSelector('#mainsrp-itemlist .m-itemlist');
current_page_text = await self.page.Jeval('#mainsrp-pager li.item.active > span', 'node => node.textContent');
items = await self.page.Jeval('.m-itemlist .items .item');
if current_page_text == str(current_page) and items:
content = await self.page.content();
return content;
else:
return None;
except Exception as e:
return None;
def process_request(self, request, spider):
self.logger.debug('Browser is Starting');
current_page= request.meta.get('page', 1);
loop = asyncio.get_event_loop();
task = asyncio.ensure_future(self.usePypuppeteer(current_page, request.url));
loop.run_until_complete(task);
return HtmlResponse(url=request.url, body=task.result(), encoding="utf-8",request=request, status=200);
@classmethod
def from_crawler(cls, crawler):
s = cls(username=crawler.settings.get('USERNAME'),
password=crawler.settings.get('PASSWORD'),
timeout=crawler.settings.get('TIMEOUT')
);
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
async def get_cookie(page):
res = await page.content()
cookies_list = await page.cookies()
cookies = ''
for cookie in cookies_list:
str_cookie = '{0}={1};'
str_cookie = str_cookie.format(cookie.get('name'), cookie.get('value'))
cookies += str_cookie
return cookies
def process_response(self, request, response, spider):
return response;
def process_exception(self, request, exception, spider):
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name);
**#moveslider.py**
# -*- coding:utf-8 -*-
from retrying import retry
import time, asyncio, random
def retry_if_result_none(result):
return result is None
def tries(func):
def func_wrapper(f):
async def wrapper(*args, **kwargs):
while True:
try:
if func(await f(*args, **kwargs)):
continue
else:
break
except Exception as exc:
pass
return True
return wrapper
return func_wrapper
@tries(retry_if_result_none)
async def mouse_slide(page=None):
try:
await page.hover('#nc_1_n1z') #move to slider button
await page.mouse.down() # press tee mouse
await page.mouse.move(1700, 0, {'delay': random.randint(1000, 2000)}) # move mouse to speial location
await page.mouse.up() # release mouse
except Exception as e:
print(e, '***********************************:slide login False')
slider_move_text = await page.Jeval('.errloading .nc-lang-cnt', 'node => node.textContent'); #get
print('**********************,slider_move_text=', slider_move_text);
if "哎呀,出错了,点击" in slider_move_text:
refresh_btn = await page.querySelector('.errloading .nc-lang-cnt a');
await page.evaluate('(element) => element.click()', refresh_btn);
await asyncio.sleep(3);
return None
else:
await asyncio.sleep(3)
slider_again = await page.Jeval('.nc-lang-cnt', 'node => node.textContent')
if slider_again != '验证通过':
return None
else:
await page.screenshot({'path': './headless-slide-result.png'})
return 1
def input_time_random():
return random.randint(100, 151)
**#taobao.py**
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request, Spider
from urllib.parse import quote
from seleniumtest.items import ProductItem
import json
class TaobaoSpider(scrapy.Spider):
name = 'taobao'
allowed_domains = ['www.taobao.com']
base_url = 'https://s.taobao.com/search?q='
def start_requests(self):
for keyword in self.settings.get('KEYWORDS'):
for page in range(1, self.settings.get('MAX_PAGE')+1):
url = self.base_url + quote(keyword);
yield Request(url=url, callback=self.parse, meta={'page':page}, dont_filter=True);
def parse(self, response):
products = response.xpath('//div[@id="mainsrp-itemlist"]//div[@class="items"][1]//div[contains(@class,"item")]');
for product in products:
item = ProductItem();
item['price'] = ''.join(product.xpath('.//div[contains(@class,"price")]//text()').extract()).strip();
item['title'] = ''.join(product.xpath('.//div[contains(@class,"title")]//text()').extract()).strip();
item['shop'] = ''.join(product.xpath('.//div[contains(@class,"shop")]//text()').extract()).strip();
item['image'] = ''.join(product.xpath('.//div[@class="pic"]//img[contains(@class,"img")]/@data-src').extract()).strip();
item['deal'] = product.xpath('.//div[contains(@class,"deal-cnt")]//textxt()').extract_first();
item['location'] = product.xpath('.//div[@class="location"]//text()').extract_first();
print(item['price'], item['title'], item['shop'], item['image'], item['deal'], item['location']);
yield item;
回答1:
For now, we have a workaround hack:
def patch_pyppeteer():
import pyppeteer.connection
original_method = pyppeteer.connection.websockets.client.connect
def new_method(*args, **kwargs):
kwargs['ping_interval'] = None
kwargs['ping_timeout'] = None
return original_method(*args, **kwargs)
pyppeteer.connection.websockets.client.connect = new_method
patch_pyppeteer()
Hopefully this pull request #160 will be merged soon.
来源:https://stackoverflow.com/questions/53556006/the-connection-closed-when-code-running-which-use-pyppeteer-to-crawl-a-web