from collections import Counterimport requestsfrom bs4 import BeautifulSoupfrom urllib import requestimport urllib
url = 'http://www.baidu.com.cn/s?wd=' + urllib.parse.quote('BeautifulSoup') + '&pn=' # word为关键词,pn是百度用来分页的..
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'Connection': 'keep-alive',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'}
r=requests.get(url,headers=headers)
soup = BeautifulSoup(r.text.replace('<b>', '').replace('</b>', ''),'lxml')print(soup.find(id="3").find('a', 'c-showurl').string)
#for i in range(1, 11):
# print(soup.find(id=i).find('a', 'c-showurl').string)问题主要在最后三行,如果find(id="3")就行find(id=i)就报错,请问如何解决?还有个问题,就是为何通过python取的搜索结果与电脑搜索到的结果不同?
- 1 回答
- 0 关注
- 230 浏览
添加回答
举报
0/150
提交
取消