# coding:utf8from baike_spider import url_manager,html_downloader,html_parser,html_outputerclass SpiderMain(object): def __init__(self): self.urls = url_manager.UrlManager() self.downloader = html_downloader.HtmlDownloader() self.parser = html_parser.HtmlParser() self.outputer = html_outputer.HtmlOutputer() def craw(self,root_url): count = 1 self.urls.add_new_url(root_url) while self.urls.has_new_url(): try: new_url =self.urls.get_new_url() print ("craw %d : %s \n"%(count,new_url)) html_cont =self.downloader.download(new_url) new_urls,new_data = self.parser.parse(new_url,html_cont) self.urls.add_new_urls(new_urls) self.outputer.collect_data(new_data) if count ==1000: break count +=1 except: print ('craw failed') self.outputer.output_html() if __name__=="__main__": root_url ="https://baike.baidu.com/item/Python/407313" obj_spider = SpiderMain() obj_spider.craw(root_url)——————————————————————————————————————————# coding:utf8class UrlManager(object): def __init__(self): self.new_urls = set() self.old_urls = set() def add_new_url(self,url): if url is None: return if url not in self.new_urls and url not in self.old_urls: self.new_urls.add(url) def add_new_urls(self,urls): if urls is None or len(urls) == 0: return for url in urls: self.add_new_url(url) def has_new_url(self): return len(self.new_urls) !=0 def get_new_url(self): new_url =self.new_urls.pop() self.old_urls.add(new_url) return new_url————————————————————————————————————————————————# coding:utf8import urllib.requestclass HtmlDownloader(object): def download(self,url): if url is None: return None response = urllib.request.urlopen(url) if response.getcode() != 200: return None return response.read()————————————————————————————————————————————————# coding:utf8from bs4 import BeautifulSoupimport refrom urllib.parse import urlparse#from urllib.parse import urljoinclass HtmlParser(object): def _get_new_urls(self, page_url, soup): new_urls =set() #/item/123.htm links = soup.find_all('a',href=re.compile(r"/item/\d+\.htm")) for link in links: new_url =link['href'] new_full_url = urlparse.urljoin(page_url,new_url) # @UndefinedVariable new_urls.add(new_full_url) return new_urls def _get_new_data(self, page_url, soup): res_data ={} #url res_data['url'] = page_url #<dd class="lemmaWgt-lemmaTitle-title"> <h1>Python</h1> title_node = soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find("h1") res_data['title']=title_node.get_text() #<div class="lemma-summary" label-module="lemmaSummary"> summary_node = soup.find("div",class_="lemma-summary") res_data['summary']=summary_node.get_text() return res_data def parse(self,page_url,html_cont): if page_url is None or html_cont is None: return soup = BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8') new_urls = self._get_new_urls(page_url,soup) new_data = self._get_new_data(page_url,soup) return new_urls,new_data——————————————————————————————————————————————————# coding:utf8class HtmlOutputer(object): def __init__(self): self.datas = [] def collect_data(self,data): if data is None: return self.datas.append(data) def output_html(self): fout = open('output.html','w') fout.write("<html>") fout.write("<body>") fout.write("<table>") # ascil for data in self.datas: fout.write("<tr>") fout.write("<td>%s</td>" % data['url']) fout.write("<td>%s</td>" % data['title'].encode('utf-8')) fout.write("<td>%s</td>" % data['summary'].encode('utf-8')) fout.write("</tr>") fout.write("</table>") fout.write("</body>") fout.write("</html>") fout.close()——————————————————————————————————————————————————运行后就停在第一个url,如下所示:
1 回答
陌上点点
TA贡献16条经验 获得超13个赞
Emm。。只有输出一个都话就是循环问题了,在for data in self.datas:下面加一句
urldata = data
把data数据都改成urldata
添加回答
举报
0/150
提交
取消