第一步:获取拉勾首页信息
1 # 获取拉勾首页信息 2 3 url = 'https://www.lagou.com/' 4 headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36', 5 'Connection': 'keep-alive', 6 'Cookie': '_ga=GA1.2.759756292.1572002144; _gid=GA1.2.1024774356.1572002144; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1572002144; user_trace_token=20191025191854-3af2cb8b-f719-11e9-a08c-525400f775ce; PRE_UTM=; PRE_HOST=cn.bing.com; PRE_SITE=https%3A%2F%2Fcn.bing.com%2F; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2F; LGUID=20191025191854-3af2ce76-f719-11e9-a08c-525400f775ce; LGSID=20191025191854-3af2ccf6-f719-11e9-a08c-525400f775ce; index_location_city=%E5%85%A8%E5%9B%BD; _gat=1; LGRID=20191025192516-1eb14395-f71a-11e9-a607-5254005c3644; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1572002526; X_HTTP_TOKEN=7f3a7ebf1bb8dfc4987200275129b8d1ec41735636' 7 } 8 9 response = requests.get(url, headers=headers) 10 soup = BeautifulSoup(response.text, 'lxml') 11 # soup
第二步:获取首页所有职位链接
# 获取拉勾全网职位链接 all_info = soup.select('div.mainNavs a') position_urls = [] for i in all_info: url = i.get('href') position_urls.append(url) # position_urls
第三步:获取所对应链接的职位招聘信息
# 获取对应链接的职位信息 position_info = [] for i in position_urls: # print(i) response_i = requests.get(i, headers = headers) soup = BeautifulSoup(response_i.text, 'lxml') # print(soup) con_list = soup.select('ul.item_con_list')[1].select('li') # print(con_list[0]) # 获取职位信息 for li in con_list: dic = {} dic['position_name'] = li.select('h3')[0].text dic['address'] = li.select('span.add')[0].text dic['salary&experience'] = li.select('div.li_b_l')[0].text.split('\n')[1] dic['experience'] = li.select('div.li_b_l')[0].text.replace(' ','').split('\n')[2].split('/')[0] dic['degree'] = li.select('div.li_b_l')[0].text.replace(' ','').split('\n')[2].split('/')[1] dic['job_kind'] = li.select('div.li_b_l')[1].text.replace('\n','/') position_info.append(dic) print(position_info)