Python爬蟲學習筆記(三) — 分布式爬蟲

Yanwei Liu
5 min readMar 30, 2019

--

引入模組與基本設定

import multiprocessing as mp
import time
from urllib.request import urlopen, urljoin
from bs4 import BeautifulSoup
import re

base_url = "http://127.0.0.1:4000/"

# DON'T OVER CRAWL THE WEBSITE OR YOU MAY NEVER VISIT AGAIN
if base_url != "http://127.0.0.1:4000/":
restricted_crawl = True
else:
restricted_crawl = False

建立爬蟲函數及抓資料函數

def crawl(url):
response = urlopen(url)
time.sleep(0.1) #延遲0.1秒
return response.read().decode()


def parse(html):
soup = BeautifulSoup(html, 'lxml')
urls = soup.find_all('a', {"href": re.compile('^/.+?/$')})
title = soup.find('h1').get_text().strip()
page_urls = set([urljoin(base_url, url['href']) for url in urls]) # 去除重複
url = soup.find('meta', {'property': "og:url"})['content']
return title, page_urls, url

普通爬法

#蒐集爬過的網站和沒爬過的網站
unseen = set([base_url,])
seen = set()
--------------------------------------------------------------------unseen = set([base_url,])
seen = set()

count, t1 = 1, time.time()

while len(unseen) != 0: # still get some url to visit
if restricted_crawl and len(seen) > 20:
break

print('\nDistributed Crawling...')
htmls = [crawl(url) for url in unseen]

print('\nDistributed Parsing...')
results = [parse(html) for html in htmls]

print('\nAnalysing...')
seen.update(unseen) # seen the crawled
unseen.clear() # nothing unseen

for title, page_urls, url in results:
print(count, title, url)
count += 1
unseen.update(page_urls - seen) # get new url to crawl
print('Total time: %.1f s' % (time.time()-t1, ))
花費52.3秒

分布式爬法

unseen = set([base_url,])
seen = set()

pool = mp.Pool(4)
count, t1 = 1, time.time()
while len(unseen) != 0:
if restricted_crawl and len(seen) > 20:
break
print('\nDistributed Crawling...')
crawl_jobs = [pool.apply_async(crawl, args=(url,)) for url in unseen]
htmls = [j.get() for j in crawl_jobs]
print('\nDistributed Parsing...')
parse_jobs = [pool.apply_async(parse, args=(html,)) for html in htmls]
results = [j.get() for j in parse_jobs]

print('\nAnalysing...')
seen.update(unseen) # seen the crawled
unseen.clear() # nothing unseen

for title, page_urls, url in results:
print(count, title, url)
count += 1
unseen.update(page_urls - seen) # get new url to crawl
print('Total time: %.1f s' % (time.time()-t1, ))
花費16秒

--

--

No responses yet