def get_new_urls(self, page_url, soup):
new_urls = set()
#正则匹配:/view/FLOSS.htm
links = soup.find_all('a', href=re.compile(r'/item/\w+'))
for link in links:
…………
…………
return new_urls
new_urls = set()
#正则匹配:/view/FLOSS.htm
links = soup.find_all('a', href=re.compile(r'/item/\w+'))
for link in links:
…………
…………
return new_urls
2017-05-06
这个视频讲的是初学爬虫又不是初学 Python ,还说新手听不懂,对 Python 都没什么了解就先去看看 Python 入门好了。。。循序渐进不好么
2017-05-06
不得不说讲师的思路很清晰,我只学习了python的基础知识就完全能听懂了。但是教程似乎是python2的不是3的。希望讲师继续出新的教程。
2017-05-06
@弃F忆
python -m pip install --upgrade pip 升级pip
python -m pip install beautifulsoup4
按 这个做法可以
python -m pip install --upgrade pip 升级pip
python -m pip install beautifulsoup4
按 这个做法可以
2017-05-05
print('Third Method')
cj = http.cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cj))
request.install_opener(opener)
response3 = request.urlopen(url)
print(response3.getcode())
print(cj)
print(response3.read().decode("utf-8"))
cj = http.cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cj))
request.install_opener(opener)
response3 = request.urlopen(url)
print(response3.getcode())
print(cj)
print(response3.read().decode("utf-8"))
2017-05-05
print('Second Method')
req = request.Request(url)
req.add_header('user-agent', 'Mozilla/5.0')
response2 = request.urlopen(req)
print(response2.getcode())
print(len(response2.read()))
req = request.Request(url)
req.add_header('user-agent', 'Mozilla/5.0')
response2 = request.urlopen(req)
print(response2.getcode())
print(len(response2.read()))
2017-05-05
from urllib import request
import http.cookiejar
url = 'http://www.baidu.com'
print('First Method')
response1 = request.urlopen(url)
print(response1.getcode())
print(len(response1.read()))
import http.cookiejar
url = 'http://www.baidu.com'
print('First Method')
response1 = request.urlopen(url)
print(response1.getcode())
print(len(response1.read()))
2017-05-05