问题
I am trying to learn how to use the requests module so i dont need to be scraping with Selenium.
This is the code i have so far, that prints a table from a webpage.
I cant figure out how i could use requests to make this code faster and in a pythonic way.
#my imports
import pandas as pd
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException, NoSuchElementException,StaleElementReferenceException
from datetime import datetime
import untangle,xmltodict,glob, os, csv, time
from openpyxl import load_workbook
#Paths that i use later in code
path = r"G:\Meu Drive\Balanços\\"
pathxml =r"C:\Users\GuilhermeMachado\Documents\XML\\"
#Pandas table configs
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
#chromedriver options
options = webdriver.ChromeOptions()
options.add_argument('disable-infobars')
options.add_argument("--disable-extensions")
options.add_argument("--incognito")
options.add_experimental_option("prefs", {
"download.default_directory": pathxml,
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True
})
driver = webdriver.Chrome(options=options)
driver.implicitly_wait(2)
#URL i want to get data from
driver.get('http://www.b3.com.br/pt_br/produtos-e-servicos/negociacao/renda-variavel/fundos-de-investimentos/fii/')
#driver.maximize_window()
time.sleep(2)
iframe = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.ID, 'bvmf_iframe')))
driver.switch_to.frame(iframe)
#the table i need to scrap
tabel = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, '//table[@class="responsive"]')))
time.sleep(2)
df = pd.read_html(driver.page_source)[0]
print(df)
回答1:
If you use requests, you won't get the data in the iframe. Which is what you seem to be looking for. When looking at your page, I can see the iframe table is referenced by the following URL http://bvmf.bmfbovespa.com.br/Fundos-Listados/FundosListados.aspx?tipoFundo=imobiliario&Idioma=pt-br
If that url remains constant, then you can use requests to get the content of that url using the following. Then you can use beautifulSoup to parse the table.
from urllib.request import Request, urlopen
page = Request("http://bvmf.bmfbovespa.com.br/Fundos-Listados/FundosListados.aspx?tipoFundo=imobiliario&Idioma=pt-br")
content = urlopen(page).read()
print(content)
Edit: Alternatively, you can still request the main page. Use beautifulsoup to find the iframe and get the src tag which would result in that new link. Then use another request to get the table data.
Edit2: Simple solution
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
page = Request("http://www.b3.com.br/pt_br/produtos-e-servicos/negociacao/renda-variavel/fundos-de-investimentos/fii/")
content = urlopen(page).read()
soup = BeautifulSoup(content, 'html.parser')
iframe_url = soup.find('iframe', id='bvmf_iframe')['src']
page = Request(iframe_url)
content = urlopen(page).read()
soup = BeautifulSoup(content, 'html.parser')
table = soup.find('table')
print(table)
回答2:
Here's an example of getting the table data using requests and BeautifulSoup. Note that neither is part of standard library and they need to be installed (with pip, for example).
import requests
from bs4 import BeautifulSoup
url = 'http://bvmf.bmfbovespa.com.br/Fundos-Listados/FundosListados.aspx?tipoFundo=imobiliario&Idioma=pt-br'
r = requests.get(url)
soup = BeautifulSoup(r.content)
table = soup.find('table')
rows = table.find_all('tr')
for row in rows:
columns = row.find_all('td')
if len(columns) == 4: # skip rows without 4 cells
razao, fundo, segmento, codigo = columns
print(razao.text) # prints the company names
来源:https://stackoverflow.com/questions/61193148/from-selenium-to-requests