-
urllib2: 添加特殊情景的处理器查看全部
-
create BeautifulSoup object查看全部
-
beautigul Soup查看全部
-
方法1 import urllib2 response = urllib.urlopne('http://www.baidu.com') #获取状态码 print response.getcode() #读取内容 cont = response.read()查看全部
-
实例代码查看全部
-
不需要登录的静态加载的内容查看全部
-
有些网页是使用Ajax异步加载的内容查看全部
-
有些网页是需要用户登录以后才能访问的查看全部
-
网页下载器查看全部
-
爬虫架构查看全部
-
class HtmlOutputer(object): def __init__(self): self.datas=[]#列表 #收集数据 def collect_data(self,data): if data is None: return self.datas.append(data) #输出HTML内容 def output_html(self): fout=open('output.html','w')#输出到output.html中,w为写模式 fout.write("<html>") fout.write("<body>") fout.write("<table>") #ASCI for data in self.datas: fout.write("<tr>") fout.write("<td>s%</td>" % data["url"]) fout.write("<td>s%</td>" % data["title"].encode("UTF-8")) fout.write("<td>s%</td>" % data["summary"].encode("UTF-8")) fout.write("</tr>") fout.write("</table>") fout.write("</body>") fout.write("</html>")查看全部
-
from bs4 import BeautifulSoup import re import urlparse class HtmlParser(object): def _get_new_urls(self, page_url, soup): new_urls = set() links = soup.find_all('a', href=re.compile(r"/view/\d+\.htm")) for link in links: new_url = link['href'] new_full_url = urlparse.urljoin(page_url, new_url) new_urls.add(new_full_url) return new_urls def _get_new_data(self, page_url, soup): res_data = {} res_data['url'] = page_url title_node = soup.find('dd', class_="lemmaWgt-lemmaTitle-title").find("h1") res_data['title'] = title_node.get_text() summary_node = soup.find('div', class_="lemma-summary") res_data['summary'] = summary_node.get_text() return res_data def parse(self, page_url, html_cont): if page_url is None or html_cont is None: return soup = BeautifulSoup(html_cont, 'html.parse', from_encoding='utf-8') new_urls = self._get_new_urls(page_url, soup) new_data = self._get_new_data(page_url, soup) return new_urls, new_data查看全部
-
import urllib2 class HtmlDownloader(object):#下载网页内容 def download(self,url): if url is None: return None response=urllib2.urlopen(url) if response.getcode()!=200:#判断是否请求成功 return None return response.read()查看全部
-
class UrlManager(object): def __init__(self):#初始化 self.new_urls=set() self.old_urls=set() #向URL管理器中添加一个新的URL def add_new_url(self,url): if url is None: return if url not in self.new_urls and url not in self.old_urls : self.new_urls.add(url) #向URL管理器中批量添加新的URL def add_new_urls(self,urls): if urls is None or len(urls)==0: return for url in urls: self.add_new_url(url)#调用单条添加方法 #判断URL管理器中是否有新的待爬取的URL def has_new_url(self): return len(self.new_urls)!=0 #从URL管理器中获取一个新的带爬取的URL def get_new_url(self): new_url=self.new_urls.pop()#返回一个URL并从中移除这条URL self.old_urls.add(new_url)#添加到 self.old_urls中 return new_url查看全部
-
from baike_spider import url_manager, html_downloader, html_parser,\ html_outputer class SpiderMain(): def __init__(self): self.urls=url_manager.UrlManager() self.downloader=html_downloader.HtmlDownloader() self.parser=html_parser.HtmlParser() self.outputer=html_outputer.HtmlOutputer() def craw(self,root_url): count=1 self.urls.add_new_url(root_url) while self.urls.has_new_url():#如果有待爬去的url new_url=self.urls.get_new_url()#取出一个 print 'craw %d:%s' %(count,new_url) html_cont=self.downloader.download(new_url) new_urls,new_data=self.parser.parse(new_url,html_cont) self.urls.add_new_urls(new_urls) self.outputer.collect_data(new_data) count=count+1 if count=1000 break self.outputer.output_html() if _name_=="__main__": root_url = "http://baike.baidu.com/view/21087.htm" obj_spider=SpiderMain() obj_spider.craw(root_url)查看全部
举报
0/150
提交
取消