from bs4 import BeautifulSoupfrom selenium import webdriverimport time import sysquery_txt = input("크롤링할 내용 입력 :")path = "C:\Temp\chromedriver_240\chromedriver.exe"driver = webdriver.Chrome(path)driver.get("https://www.naver.com")time.sleep(2)driver.find_element_by_id("query").send_keys(query_txt)driver.find_element_by_id("search_btn").click()driver.find_element_by_link_text("블로그 더보기").click()full_html = driver.page_sourcesoup = BeautifulSoup(full_html, 'html.parser')content_list = soup.find('ul', id='elThumbnailResultArea')print(content_list)content = content_list.find('a','sh_blog_title _sp_each_url _sp_each_title' ).get_text()print(content)for i in content_list: con = i.find('a', class_='sh_blog_title _sp_each_url _sp_each_title').get_text() print(con) print('\n')我在观看在线学习时输入了这段代码,但在循环中它总是出错。con = i.find('a', class_='sh_blog_title _sp_each_url _sp_each_title').get_text() 这一行显示错误 'find() 没有关键字参数'
2 回答
慕桂英3389331
TA贡献2036条经验 获得超8个赞
问题是,您必须使用.find_all()来获取所有<a>标签。.find()只返回一个标签(如果有的话):
import requests
from bs4 import BeautifulSoup
url = 'https://search.naver.com/search.naver?query=tree&where=post&sm=tab_nmr&nso='
full_html = requests.get(url).content
soup = BeautifulSoup(full_html, 'html.parser')
content_list = soup.find_all('a', class_='sh_blog_title _sp_each_url _sp_each_title' )
for i in content_list:
print(i.text)
print('\n')
添加回答
举报
0/150
提交
取消