修改了匹配的标签后爬取新浪博客,结果只爬了一个就失败了
调试之后发现是html_parser没有返回url和data,但是又不知道哪里错了。附上源码。
import re
import urlparse
from bs4 import BeautifulSoup
class HtmlParser(object):
def _get_new_urls(self, page_url, soup):
new_urls = set()
#href="http://blog.sina.com.cn/s/blog_b4474abf0102wmo4.html
links = soup.find_all('a',href=re.compile(r'/s/blog_\w+\.html'))
for link in links:
new_url =link['href']
new_full_url = urlparse.urljoin(page_url, new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self, page_url, soup):
res_data ={}
#url
res_data['url'] = page_url
#<h2 class="titName SG_txta" id="t_b4474abf0102wmo4">ZUK Z2
title_node = soup.find('div',class_="articalTitle").find("h2")
res_data['title'] = title_node.get_text()
#<div class="articalContent newfont_family" id="sina_keyword_ad_area2"><div>
summary_node = soup.find('div',class_="articalContent newfont_family" ).find("p")
res_data['summary'] = summary_node.get_text()
return res_data
def parse(self,page_url,html_cont):
if page_url is None or html_cont is None:
return
#html_cont,'html.parser',from_encoding = 'utf-8'
soup = BeautifulSoup(html_cont,'html.parser',from_encoding = 'utf-8')
new_urls = self._get_new_urls(page_url,soup)
new_data = self._get_new_data(page_url,soup)
return new_urls,new_data
运行之后结果是
craw 1 : http://blog.sina.com.cn/s/blog_b4474abf0102wmo4.html
craw failed