requests
简介
Python标准库中提供了:urllib、urllib2、httplib等模块以供Http请求,但是,它的 API 太渣了。它是为另一个时代、另一个互联网所创建的。它需要巨量的工作,甚至包括各种方法覆盖,来完成最简单的任务。
Requests 是使用 Apache2 Licensed 许可证的 基于Python开发的HTTP 库,其在Python内置模块的基础上进行了高度的封装,从而使得Pythoner进行网络请求时,变得美好了许多,使用Requests可以轻而易举的完成浏览器可有的任何操作。
1、GET请求
1 # 1、无参数实例
2
3 import requests
4
5 ret = requests.get('https://github.com/timeline.json')
6
7 print ret.url
8 print ret.text
9
10
11
12 # 2、有参数实例
13
14 import requests
15
16 payload = {'key1': 'value1', 'key2': 'value2'}
17 ret = requests.get("http://httpbin.org/get", params=payload)
18
19 print ret.url
20 print ret.text
2、POST请求
1 # 1、基本POST实例
2
3 import requests
4
5 payload = {'key1': 'value1', 'key2': 'value2'}
6 ret = requests.post("http://httpbin.org/post", data=payload)
7
8 print ret.text
9
10
11 # 2、发送请求头和数据实例
12
13 import requests
14 import json
15
16 url = 'https://api.github.com/some/endpoint'
17 payload = {'some': 'data'}
18 headers = {'content-type': 'application/json'}
19
20 ret = requests.post(url, data=json.dumps(payload), headers=headers)
21
22 print ret.text
23 print ret.cookies
3、其他请求
requests.get(url, params=None, **kwargs)
requests.post(url, data=None, json=None, **kwargs)
requests.put(url, data=None, **kwargs)
requests.head(url, **kwargs)
requests.delete(url, **kwargs)
requests.patch(url, data=None, **kwargs)
requests.options(url, **kwargs)
# 以上方法均是在此方法的基础上构建
requests.request(method, url, **kwargs)
4、更多参数
1 def request(method, url, **kwargs):
2 """Constructs and sends a :class:`Request <Request>`.
3
4 :param method: method for the new :class:`Request` object.
5 :param url: URL for the new :class:`Request` object.
6 :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
7 :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
8 :param json: (optional) json data to send in the body of the :class:`Request`.
9 :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
10 :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
11 :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
12 ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
13 or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
14 defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
15 to add for the file.
16 :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
17 :param timeout: (optional) How long to wait for the server to send data
18 before giving up, as a float, or a :ref:`(connect timeout, read
19 timeout) <timeouts>` tuple.
20 :type timeout: float or tuple
21 :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
22 :type allow_redirects: bool
23 :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
24 :param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to ``True``.
25 :param stream: (optional) if ``False``, the response content will be immediately downloaded.
26 :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
27 :return: :class:`Response <Response>` object
28 :rtype: requests.Response
29
30 Usage::
31
32 >>> import requests
33 >>> req = requests.request('GET', 'http://httpbin.org/get')
34 <Response [200]>
35 """
1 def param_method_url():
2 # requests.request(method='get', url='http://127.0.0.1:8000/test/')
3 # requests.request(method='post', url='http://127.0.0.1:8000/test/')
4 pass
5
6
7 def param_param():
8 # - 可以是字典
9 # - 可以是字符串
10 # - 可以是字节(ascii编码以内)
11
12 # requests.request(method='get',
13 # url='http://127.0.0.1:8000/test/',
14 # params={'k1': 'v1', 'k2': '水电费'})
15
16 # requests.request(method='get',
17 # url='http://127.0.0.1:8000/test/',
18 # params="k1=v1&k2=水电费&k3=v3&k3=vv3")
19
20 # requests.request(method='get',
21 # url='http://127.0.0.1:8000/test/',
22 # params=bytes("k1=v1&k2=k2&k3=v3&k3=vv3", encoding='utf8'))
23
24 # 错误
25 # requests.request(method='get',
26 # url='http://127.0.0.1:8000/test/',
27 # params=bytes("k1=v1&k2=水电费&k3=v3&k3=vv3", encoding='utf8'))
28 pass
29
30
31 def param_data():
32 # 可以是字典
33 # 可以是字符串
34 # 可以是字节
35 # 可以是文件对象
36
37 # requests.request(method='POST',
38 # url='http://127.0.0.1:8000/test/',
39 # data={'k1': 'v1', 'k2': '水电费'})
40
41 # requests.request(method='POST',
42 # url='http://127.0.0.1:8000/test/',
43 # data="k1=v1; k2=v2; k3=v3; k3=v4"
44 # )
45
46 # requests.request(method='POST',
47 # url='http://127.0.0.1:8000/test/',
48 # data="k1=v1;k2=v2;k3=v3;k3=v4",
49 # headers={'Content-Type': 'application/x-www-form-urlencoded'}
50 # )
51
52 # requests.request(method='POST',
53 # url='http://127.0.0.1:8000/test/',
54 # data=open('data_file.py', mode='r', encoding='utf-8'), # 文件内容是:k1=v1;k2=v2;k3=v3;k3=v4
55 # headers={'Content-Type': 'application/x-www-form-urlencoded'}
56 # )
57 pass
58
59
60 def param_json():
61 # 将json中对应的数据进行序列化成一个字符串,json.dumps(...)
62 # 然后发送到服务器端的body中,并且Content-Type是 {'Content-Type': 'application/json'}
63 requests.request(method='POST',
64 url='http://127.0.0.1:8000/test/',
65 json={'k1': 'v1', 'k2': '水电费'})
66
67
68 def param_headers():
69 # 发送请求头到服务器端
70 requests.request(method='POST',
71 url='http://127.0.0.1:8000/test/',
72 json={'k1': 'v1', 'k2': '水电费'},
73 headers={'Content-Type': 'application/x-www-form-urlencoded'}
74 )
75
76
77 def param_cookies():
78 # 发送Cookie到服务器端
79 requests.request(method='POST',
80 url='http://127.0.0.1:8000/test/',
81 data={'k1': 'v1', 'k2': 'v2'},
82 cookies={'cook1': 'value1'},
83 )
84 # 也可以使用CookieJar(字典形式就是在此基础上封装)
85 from http.cookiejar import CookieJar
86 from http.cookiejar import Cookie
87
88 obj = CookieJar()
89 obj.set_cookie(Cookie(version=0, name='c1', value='v1', port=None, domain='', path='/', secure=False, expires=None,
90 discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False,
91 port_specified=False, domain_specified=False, domain_initial_dot=False, path_specified=False)
92 )
93 requests.request(method='POST',
94 url='http://127.0.0.1:8000/test/',
95 data={'k1': 'v1', 'k2': 'v2'},
96 cookies=obj)
97
98
99 def param_files():
100 # 发送文件
101 # file_dict = {
102 # 'f1': open('readme', 'rb')
103 # }
104 # requests.request(method='POST',
105 # url='http://127.0.0.1:8000/test/',
106 # files=file_dict)
107
108 # 发送文件,定制文件名
109 # file_dict = {
110 # 'f1': ('test.txt', open('readme', 'rb'))
111 # }
112 # requests.request(method='POST',
113 # url='http://127.0.0.1:8000/test/',
114 # files=file_dict)
115
116 # 发送文件,定制文件名
117 # file_dict = {
118 # 'f1': ('test.txt', "hahsfaksfa9kasdjflaksdjf")
119 # }
120 # requests.request(method='POST',
121 # url='http://127.0.0.1:8000/test/',
122 # files=file_dict)
123
124 # 发送文件,定制文件名
125 # file_dict = {
126 # 'f1': ('test.txt', "hahsfaksfa9kasdjflaksdjf", 'application/text', {'k1': '0'})
127 # }
128 # requests.request(method='POST',
129 # url='http://127.0.0.1:8000/test/',
130 # files=file_dict)
131
132 pass
133
134
135 def param_auth():
136 from requests.auth import HTTPBasicAuth, HTTPDigestAuth
137
138 ret = requests.get('https://api.github.com/user', auth=HTTPBasicAuth('zhaogongzi', 'sdfasdfasdf'))
139 print(ret.text)
140
141 # ret = requests.get('http://192.168.1.1',
142 # auth=HTTPBasicAuth('admin', 'admin'))
143 # ret.encoding = 'gbk'
144 # print(ret.text)
145
146 # ret = requests.get('http://httpbin.org/digest-auth/auth/user/pass', auth=HTTPDigestAuth('user', 'pass'))
147 # print(ret)
148 #
149
150
151 def param_timeout():
152 # ret = requests.get('http://google.com/', timeout=1)
153 # print(ret)
154
155 # ret = requests.get('http://google.com/', timeout=(5, 1))
156 # print(ret)
157 pass
158
159
160 def param_allow_redirects():
161 ret = requests.get('http://127.0.0.1:8000/test/', allow_redirects=False)
162 print(ret.text)
163
164
165 def param_proxies():
166 # proxies = {
167 # "http": "61.172.249.96:80",
168 # "https": "http://61.185.219.126:3128",
169 # }
170
171 # proxies = {'http://10.20.1.128': 'http://10.10.1.10:5323'}
172
173 # ret = requests.get("http://www.proxy360.cn/Proxy", proxies=proxies)
174 # print(ret.headers)
175
176
177 # from requests.auth import HTTPProxyAuth
178 #
179 # proxyDict = {
180 # 'http': '77.75.105.165',
181 # 'https': '77.75.105.165'
182 # }
183 # auth = HTTPProxyAuth('username', 'mypassword')
184 #
185 # r = requests.get("http://www.google.com", proxies=proxyDict, auth=auth)
186 # print(r.text)
187
188 pass
189
190
191 def param_stream():
192 ret = requests.get('http://127.0.0.1:8000/test/', stream=True)
193 print(ret.content)
194 ret.close()
195
196 # from contextlib import closing
197 # with closing(requests.get('http://httpbin.org/get', stream=True)) as r:
198 # # 在此处理响应。
199 # for i in r.iter_content():
200 # print(i)
201
202
203 def requests_session():
204 import requests
205
206 session = requests.Session()
207
208 ### 1、首先登陆任何页面,获取cookie
209
210 i1 = session.get(url="http://dig.chouti.com/help/service")
211
212 ### 2、用户登陆,携带上一次的cookie,后台对cookie中的 gpsd 进行授权
213 i2 = session.post(
214 url="http://dig.chouti.com/login",
215 data={
216 'phone': "8615131255089",
217 'password': "xxxxxx",
218 'oneMonth': ""
219 }
220 )
221
222 i3 = session.post(
223 url="http://dig.chouti.com/link/vote?linksId=8589623",
224 )
225 print(i3.text)
BeautifulSoup
官方文档:http://cn.python-requests.org/zh_CN/latest/user/quickstart.html#id4
简介
BeautifulSoup是一个模块,该模块用于接收一个HTML或XML字符串,然后将其进行格式化,之后遍可以使用他提供的方法进行快速查找指定元素,从而使得在HTML或XML中查找指定元素变得简单。
1 from bs4 import BeautifulSoup
2
3 html_doc = """
4 <html><head><title>The Dormouse's story</title></head>
5 <body>
6 asdf
7 <div class="title">
8 <b>The Dormouse's story总共</b>
9 <h1>f</h1>
10 </div>
11 <div class="story">Once upon a time there were three little sisters; and their names were
12 <a class="sister0" id="link1">Els<span>f</span>ie</a>,
13 <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
14 <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
15 and they lived at the bottom of a well.</div>
16 ad<br/>sf
17 <p class="story">...</p>
18 </body>
19 </html>
20 """
21
22 soup = BeautifulSoup(html_doc, features="lxml")
23 # 找到第一个a标签
24 tag1 = soup.find(name='a')
25 # 找到所有的a标签
26 tag2 = soup.find_all(name='a')
27 # 找到id=link2的标签
28 tag3 = soup.select('#link2')
安装
1 pip3 install beautifulsoup4
使用示例
1 from bs4 import BeautifulSoup
2
3 html_doc = """
4 <html><head><title>The Dormouse's story</title></head>
5 <body>
6 ...
7 </body>
8 </html>
9 """
10
11 soup = BeautifulSoup(html_doc, features="lxml")
1. name,标签名称
1 # tag = soup.find('a')
2 # name = tag.name # 获取
3 # print(name)
4 # tag.name = 'span' # 设置
5 # print(soup)
2. attr,标签属性
1 # tag = soup.find('a')
2 # attrs = tag.attrs # 获取
3 # print(attrs)
4 # tag.attrs = {'ik':123} # 设置
5 # tag.attrs['id'] = 'iiiii' # 设置
6 # print(soup)
3. children,所有子标签
1 # body = soup.find('body')
2 # v = body.children
4. children,所有子子孙孙标签
1 # body = soup.find('body')
2 # v = body.descendants
5. clear,将标签的所有子标签全部清空(保留标签名)
1 # tag = soup.find('body')
2 # tag.clear()
3 # print(soup)
6. decompose,递归的删除所有的标签
1 # body = soup.find('body')
2 # body.decompose()
3 # print(soup)
7. extract,递归的删除所有的标签,并获取删除的标签
1 # body = soup.find('body')
2 # v = body.extract()
3 # print(soup)
8. decode,转换为字符串(含当前标签);decode_contents(不含当前标签)
1 # body = soup.find('body')
2 # v = body.decode()
3 # v = body.decode_contents()
4 # print(v)
9. encode,转换为字节(含当前标签);encode_contents(不含当前标签)
1 # body = soup.find('body')
2 # v = body.encode()
3 # v = body.encode_contents()
4 # print(v)
10. find,获取匹配的第一个标签
1 # tag = soup.find('a')
2 # print(tag)
3 # tag = soup.find(name='a', attrs={'class': 'sister'}, recursive=True, text='Lacie')
4 # tag = soup.find(name='a', class_='sister', recursive=True, text='Lacie')
5 # print(tag)
11. find_all,获取匹配的所有标签
1 # tags = soup.find_all('a')
2 # print(tags)
3
4 # tags = soup.find_all('a',limit=1)
5 # print(tags)
6
7 # tags = soup.find_all(name='a', attrs={'class': 'sister'}, recursive=True, text='Lacie')
8 # # tags = soup.find(name='a', class_='sister', recursive=True, text='Lacie')
9 # print(tags)
10
11
12 # ####### 列表 #######
13 # v = soup.find_all(name=['a','div'])
14 # print(v)
15
16 # v = soup.find_all(class_=['sister0', 'sister'])
17 # print(v)
18
19 # v = soup.find_all(text=['Tillie'])
20 # print(v, type(v[0]))
21
22
23 # v = soup.find_all(id=['link1','link2'])
24 # print(v)
25
26 # v = soup.find_all(href=['link1','link2'])
27 # print(v)
28
29 # ####### 正则 #######
30 import re
31 # rep = re.compile('p')
32 # rep = re.compile('^p')
33 # v = soup.find_all(name=rep)
34 # print(v)
35
36 # rep = re.compile('sister.*')
37 # v = soup.find_all(class_=rep)
38 # print(v)
39
40 # rep = re.compile('http://www.oldboy.com/static/.*')
41 # v = soup.find_all(href=rep)
42 # print(v)
43
44 # ####### 方法筛选 #######
45 # def func(tag):
46 # return tag.has_attr('class') and tag.has_attr('id')
47 # v = soup.find_all(name=func)
48 # print(v)
49
50
51 # ## get,获取标签属性
52 # tag = soup.find('a')
53 # v = tag.get('id')
54 # print(v)
12. has_attr,检查标签是否具有该属性
1 # tag = soup.find('a')
2 # v = tag.has_attr('id')
3 # print(v)
13. get_text,获取标签内部文本内容
1 # tag = soup.find('a')
2 # v = tag.get_text('id')
3 # print(v)
14. index,检查标签在某标签中的索引位置
1 # tag = soup.find('body')
2 # v = tag.index(tag.find('div'))
3 # print(v)
4
5 # tag = soup.find('body')
6 # for i,v in enumerate(tag):
7 # print(i,v)
15. is_empty_element,是否是空标签(是否可以是空)或者自闭合标签
判断是否是如下标签:'br' , 'hr', 'input', 'img', 'meta','spacer', 'link', 'frame', 'base'
1 # tag = soup.find('br')
2 # v = tag.is_empty_element
3 # print(v)
16. 当前的关联标签
1 # soup.next
2 # soup.next_element
3 # soup.next_elements
4 # soup.next_sibling
5 # soup.next_siblings
6
7 #
8 # tag.previous
9 # tag.previous_element
10 # tag.previous_elements
11 # tag.previous_sibling
12 # tag.previous_siblings
13
14 #
15 # tag.parent
16 # tag.parents
17. 查找某标签的关联标签
1 # tag.find_next(...)
2 # tag.find_all_next(...)
3 # tag.find_next_sibling(...)
4 # tag.find_next_siblings(...)
5
6 # tag.find_previous(...)
7 # tag.find_all_previous(...)
8 # tag.find_previous_sibling(...)
9 # tag.find_previous_siblings(...)
10
11 # tag.find_parent(...)
12 # tag.find_parents(...)
13
14 # 参数同find_all
18. select,select_one, CSS选择器
1 soup.select("title")
2
3 soup.select("p nth-of-type(3)")
4
5 soup.select("body a")
6
7 soup.select("html head title")
8
9 tag = soup.select("span,a")
10
11 soup.select("head > title")
12
13 soup.select("p > a")
14
15 soup.select("p > a:nth-of-type(2)")
16
17 soup.select("p > #link1")
18
19 soup.select("body > a")
20
21 soup.select("#link1 ~ .sister")
22
23 soup.select("#link1 + .sister")
24
25 soup.select(".sister")
26
27 soup.select("[class~=sister]")
28
29 soup.select("#link1")
30
31 soup.select("a#link2")
32
33 soup.select('a[href]')
34
35 soup.select('a[href="http://example.com/elsie"]')
36
37 soup.select('a[href^="http://example.com/"]')
38
39 soup.select('a[href$="tillie"]')
40
41 soup.select('a[href*=".com/el"]')
42
43
44 from bs4.element import Tag
45
46 def default_candidate_generator(tag):
47 for child in tag.descendants:
48 if not isinstance(child, Tag):
49 continue
50 if not child.has_attr('href'):
51 continue
52 yield child
53
54 tags = soup.find('body').select("a", _candidate_generator=default_candidate_generator)
55 print(type(tags), tags)
56
57 from bs4.element import Tag
58 def default_candidate_generator(tag):
59 for child in tag.descendants:
60 if not isinstance(child, Tag):
61 continue
62 if not child.has_attr('href'):
63 continue
64 yield child
65
66 tags = soup.find('body').select("a", _candidate_generator=default_candidate_generator, limit=1)
67 print(type(tags), tags)
19. 标签的内容
1 # tag = soup.find('span')
2 # print(tag.string) # 获取
3 # tag.string = 'new content' # 设置
4 # print(soup)
5
6 # tag = soup.find('body')
7 # print(tag.string)
8 # tag.string = 'xxx'
9 # print(soup)
10
11 # tag = soup.find('body')
12 # v = tag.stripped_strings # 递归内部获取所有标签的文本
13 # print(v)
20.append在当前标签内部追加一个标签
1 # tag = soup.find('body')
2 # tag.append(soup.find('a'))
3 # print(soup)
4 #
5 # from bs4.element import Tag
6 # obj = Tag(name='i',attrs={'id': 'it'})
7 # obj.string = '我是一个新来的'
8 # tag = soup.find('body')
9 # tag.append(obj)
10 # print(soup)
21.insert在当前标签内部指定位置插入一个标签
1 # from bs4.element import Tag
2 # obj = Tag(name='i', attrs={'id': 'it'})
3 # obj.string = '我是一个新来的'
4 # tag = soup.find('body')
5 # tag.insert(2, obj)
6 # print(soup)
22. insert_after,insert_before 在当前标签后面或前面插入
1 # from bs4.element import Tag
2 # obj = Tag(name='i', attrs={'id': 'it'})
3 # obj.string = '我是一个新来的'
4 # tag = soup.find('body')
5 # # tag.insert_before(obj)
6 # tag.insert_after(obj)
7 # print(soup)
23. replace_with 在当前标签替换为指定标签
1 # from bs4.element import Tag
2 # obj = Tag(name='i', attrs={'id': 'it'})
3 # obj.string = '我是一个新来的'
4 # tag = soup.find('div')
5 # tag.replace_with(obj)
6 # print(soup)
24. 创建标签之间的关系
1 # tag = soup.find('div')
2 # a = soup.find('a')
3 # tag.setup(previous_sibling=a)
4 # print(tag.previous_sibling)
25. wrap,将指定标签把当前标签包裹起来
1 # from bs4.element import Tag
2 # obj1 = Tag(name='div', attrs={'id': 'it'})
3 # obj1.string = '我是一个新来的'
4 #
5 # tag = soup.find('a')
6 # v = tag.wrap(obj1)
7 # print(soup)
8
9 # tag = soup.find('a')
10 # v = tag.wrap(soup.find('p'))
11 # print(soup)
26. unwrap,去掉当前标签,将保留其包裹的标签
1 # tag = soup.find('a')
2 # v = tag.unwrap()
3 # print(soup)
"自动登陆"示例
抽屉新热榜
1 #!/usr/bin/env python
2 # -*- coding:utf-8 -*-
3 import requests
4
5
6 # ############## 方式一 ##############
7 """
8 # ## 1、首先登陆任何页面,获取cookie
9 i1 = requests.get(url="http://dig.chouti.com/help/service")
10 i1_cookies = i1.cookies.get_dict()
11
12 # ## 2、用户登陆,携带上一次的cookie,后台对cookie中的 gpsd 进行授权
13 i2 = requests.post(
14 url="http://dig.chouti.com/login",
15 data={
16 'phone': "8615131255089",
17 'password': "xxooxxoo",
18 'oneMonth': ""
19 },
20 cookies=i1_cookies
21 )
22
23 # ## 3、点赞(只需要携带已经被授权的gpsd即可)
24 gpsd = i1_cookies['gpsd']
25 i3 = requests.post(
26 url="http://dig.chouti.com/link/vote?linksId=8589523",
27 cookies={'gpsd': gpsd}
28 )
29
30 print(i3.text)
31 """
32
33
34 # ############## 方式二 ##############
35 """
36 import requests
37
38 session = requests.Session()
39 i1 = session.get(url="http://dig.chouti.com/help/service")
40 i2 = session.post(
41 url="http://dig.chouti.com/login",
42 data={
43 'phone': "8615131255089",
44 'password': "xxooxxoo",
45 'oneMonth': ""
46 }
47 )
48 i3 = session.post(
49 url="http://dig.chouti.com/link/vote?linksId=8589523"
50 )
51 print(i3.text)
52
53 """
github
1 #!/usr/bin/env python
2 # -*- coding:utf-8 -*-
3
4 import requests
5 from bs4 import BeautifulSoup
6
7 # ############## 方式一 ##############
8 #
9 # # 1. 访问登陆页面,获取 authenticity_token
10 # i1 = requests.get('https://github.com/login')
11 # soup1 = BeautifulSoup(i1.text, features='lxml')
12 # tag = soup1.find(name='input', attrs={'name': 'authenticity_token'})
13 # authenticity_token = tag.get('value')
14 # c1 = i1.cookies.get_dict()
15 # i1.close()
16 #
17 # # 1. 携带authenticity_token和用户名密码等信息,发送用户验证
18 # form_data = {
19 # "authenticity_token": authenticity_token,
20 # "utf8": "",
21 # "commit": "Sign in",
22 # "login": "zhaogongzi@qq.com",
23 # 'password': 'xxoo'
24 # }
25 #
26 # i2 = requests.post('https://github.com/session', data=form_data, cookies=c1)
27 # c2 = i2.cookies.get_dict()
28 # c1.update(c2)
29 # i3 = requests.get('https://github.com/settings/repositories', cookies=c1)
30 #
31 # soup3 = BeautifulSoup(i3.text, features='lxml')
32 # list_group = soup3.find(name='div', class_='listgroup')
33 #
34 # from bs4.element import Tag
35 #
36 # for child in list_group.children:
37 # if isinstance(child, Tag):
38 # project_tag = child.find(name='a', class_='mr-1')
39 # size_tag = child.find(name='small')
40 # temp = "项目:%s(%s); 项目路径:%s" % (project_tag.get('href'), size_tag.string, project_tag.string, )
41 # print(temp)
42
43
44
45 # ############## 方式二 ##############
46 # session = requests.Session()
47 # # 1. 访问登陆页面,获取 authenticity_token
48 # i1 = session.get('https://github.com/login')
49 # soup1 = BeautifulSoup(i1.text, features='lxml')
50 # tag = soup1.find(name='input', attrs={'name': 'authenticity_token'})
51 # authenticity_token = tag.get('value')
52 # c1 = i1.cookies.get_dict()
53 # i1.close()
54 #
55 # # 1. 携带authenticity_token和用户名密码等信息,发送用户验证
56 # form_data = {
57 # "authenticity_token": authenticity_token,
58 # "utf8": "",
59 # "commit": "Sign in",
60 # "login": "zhaogongzi@qq.com",
61 # 'password': 'xxoo'
62 # }
63 #
64 # i2 = session.post('https://github.com/session', data=form_data)
65 # c2 = i2.cookies.get_dict()
66 # c1.update(c2)
67 # i3 = session.get('https://github.com/settings/repositories')
68 #
69 # soup3 = BeautifulSoup(i3.text, features='lxml')
70 # list_group = soup3.find(name='div', class_='listgroup')
71 #
72 # from bs4.element import Tag
73 #
74 # for child in list_group.children:
75 # if isinstance(child, Tag):
76 # project_tag = child.find(name='a', class_='mr-1')
77 # size_tag = child.find(name='small')
78 # temp = "项目:%s(%s); 项目路径:%s" % (project_tag.get('href'), size_tag.string, project_tag.string, )
79 # print(temp)
知乎
1 #!/usr/bin/env python
2 # -*- coding:utf-8 -*-
3 import time
4
5 import requests
6 from bs4 import BeautifulSoup
7
8 session = requests.Session()
9
10 i1 = session.get(
11 url='https://www.zhihu.com/#signin',
12 headers={
13 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
14 }
15 )
16
17 soup1 = BeautifulSoup(i1.text, 'lxml')
18 xsrf_tag = soup1.find(name='input', attrs={'name': '_xsrf'})
19 xsrf = xsrf_tag.get('value')
20
21 current_time = time.time()
22 i2 = session.get(
23 url='https://www.zhihu.com/captcha.gif',
24 params={'r': current_time, 'type': 'login'},
25 headers={
26 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
27 })
28
29 with open('zhihu.gif', 'wb') as f:
30 f.write(i2.content)
31
32 captcha = input('请打开zhihu.gif文件,查看并输入验证码:')
33 form_data = {
34 "_xsrf": xsrf,
35 'password': 'xxooxxoo',
36 "captcha": 'captcha',
37 'email': '424662508@qq.com'
38 }
39 i3 = session.post(
40 url='https://www.zhihu.com/login/email',
41 data=form_data,
42 headers={
43 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
44 }
45 )
46
47 i4 = session.get(
48 url='https://www.zhihu.com/settings/profile',
49 headers={
50 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
51 }
52 )
53
54 soup4 = BeautifulSoup(i4.text, 'lxml')
55 tag = soup4.find(id='rename-section')
56 nick_name = tag.find('span',class_='name').string
57 print(nick_name)
博客园
1 #!/usr/bin/env python
2 # -*- coding:utf-8 -*-
3 import re
4 import json
5 import base64
6
7 import rsa
8 import requests
9
10
11 def js_encrypt(text):
12 b64der = 'MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCp0wHYbg/NOPO3nzMD3dndwS0MccuMeXCHgVlGOoYyFwLdS24Im2e7YyhB0wrUsyYf0/nhzCzBK8ZC9eCWqd0aHbdgOQT6CuFQBMjbyGYvlVYU2ZP7kG9Ft6YV6oc9ambuO7nPZh+bvXH0zDKfi02prknrScAKC0XhadTHT3Al0QIDAQAB'
13 der = base64.standard_b64decode(b64der)
14
15 pk = rsa.PublicKey.load_pkcs1_openssl_der(der)
16 v1 = rsa.encrypt(bytes(text, 'utf8'), pk)
17 value = base64.encodebytes(v1).replace(b'\n', b'')
18 value = value.decode('utf8')
19
20 return value
21
22
23 session = requests.Session()
24
25 i1 = session.get('https://passport.cnblogs.com/user/signin')
26 rep = re.compile("'VerificationToken': '(.*)'")
27 v = re.search(rep, i1.text)
28 verification_token = v.group(1)
29
30 form_data = {
31 'input1': js_encrypt('wptawy'),
32 'input2': js_encrypt('asdfasdf'),
33 'remember': False
34 }
35
36 i2 = session.post(url='https://passport.cnblogs.com/user/signin',
37 data=json.dumps(form_data),
38 headers={
39 'Content-Type': 'application/json; charset=UTF-8',
40 'X-Requested-With': 'XMLHttpRequest',
41 'VerificationToken': verification_token}
42 )
43
44 i3 = session.get(url='https://i.cnblogs.com/EditDiary.aspx')
45
46 print(i3.text)
拉勾网
1 #!/usr/bin/env python
2 # -*- coding:utf-8 -*-
3
4 import requests
5
6
7 # 第一步:访问登陆页,拿到X_Anti_Forge_Token,X_Anti_Forge_Code
8 # 1、请求url:https://passport.lagou.com/login/login.html
9 # 2、请求方法:GET
10 # 3、请求头:
11 # User-agent
12 r1 = requests.get('https://passport.lagou.com/login/login.html',
13 headers={
14 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
15 },
16 )
17
18 X_Anti_Forge_Token = re.findall("X_Anti_Forge_Token = '(.*?)'", r1.text, re.S)[0]
19 X_Anti_Forge_Code = re.findall("X_Anti_Forge_Code = '(.*?)'", r1.text, re.S)[0]
20 print(X_Anti_Forge_Token, X_Anti_Forge_Code)
21 # print(r1.cookies.get_dict())
22 # 第二步:登陆
23 # 1、请求url:https://passport.lagou.com/login/login.json
24 # 2、请求方法:POST
25 # 3、请求头:
26 # cookie
27 # User-agent
28 # Referer:https://passport.lagou.com/login/login.html
29 # X-Anit-Forge-Code:53165984
30 # X-Anit-Forge-Token:3b6a2f62-80f0-428b-8efb-ef72fc100d78
31 # X-Requested-With:XMLHttpRequest
32 # 4、请求体:
33 # isValidate:true
34 # username:15131252215
35 # password:ab18d270d7126ea65915c50288c22c0d
36 # request_form_verifyCode:''
37 # submit:''
38 r2 = requests.post(
39 'https://passport.lagou.com/login/login.json',
40 headers={
41 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
42 'Referer': 'https://passport.lagou.com/login/login.html',
43 'X-Anit-Forge-Code': X_Anti_Forge_Code,
44 'X-Anit-Forge-Token': X_Anti_Forge_Token,
45 'X-Requested-With': 'XMLHttpRequest'
46 },
47 data={
48 "isValidate": True,
49 'username': '15131255089',
50 'password': 'ab18d270d7126ea65915c50288c22c0d',
51 'request_form_verifyCode': '',
52 'submit': ''
53 },
54 cookies=r1.cookies.get_dict()
55 )
56 print(r2.text)
来源:oschina
链接:https://my.oschina.net/u/4274876/blog/3926903