麻豆小视频在线观看_中文黄色一级片_久久久成人精品_成片免费观看视频大全_午夜精品久久久久久久99热浪潮_成人一区二区三区四区

首頁 > 編程 > Python > 正文

php與python實現的線程池多線程爬蟲功能實例詳解

2020-03-22 19:01:15
字體:
來源:轉載
供稿:網友
這篇文章主要介紹了php與python實現的線程池多線程爬蟲功能,結合實例形式分析了php與python實現線程池多線程爬蟲的完整實現方法,需要的朋友可以參考下

多線程爬蟲可以用于抓取內容了這個可以提升性能了,這里我們來看php與python 線程池多線程爬蟲的例子,代碼如下:

php例子

<?phphtml' target='_blank'>class Connect extends Worker //worker模式{public function __construct(){}public function getConnection(){if (!self::$ch){self::$ch = curl_init();curl_setopt(self::$ch, CURLOPT_TIMEOUT, 2);curl_setopt(self::$ch, CURLOPT_RETURNTRANSFER, 1);curl_setopt(self::$ch, CURLOPT_HEADER, 0);curl_setopt(self::$ch, CURLOPT_NOSIGNAL, true);curl_setopt(self::$ch, CURLOPT_USERAGENT, "Firefox");curl_setopt(self::$ch, CURLOPT_FOLLOWLOCATION, 1);}/* do some exception/error stuff here maybe */return self::$ch;}public function closeConnection(){curl_close(self::$ch);}/*** Note that the link is stored statically, which for pthreads, means thread local* */protected static $ch;}class Query extends Threaded{public function __construct($url){$this->url = $url;}public function run(){$ch = $this->worker->getConnection();curl_setopt($ch, CURLOPT_URL, $this->url);$page = curl_exec($ch);$info = curl_getinfo($ch);$error = curl_error($ch);$this->deal_data($this->url, $page, $info, $error);$this->result = $page;}function deal_data($url, $page, $info, $error){$parts = explode(".", $url);$id = $parts[1];if ($info['http_code'] != 200){$this->show_msg($id, $error);} else{$this->show_msg($id, "OK");}}function show_msg($id, $msg){echo $id."/t$msg/n";}public function getResult(){return $this->result;}protected $url;protected $result;}function check_urls_multi_pthreads(){global $check_urls; //定義抓取的連接$check_urls = array( 'http://xxx.com' => "xx網",);$pool = new Pool(10, "Connect", array()); //建立10個線程池foreach ($check_urls as $url => $name){$pool->submit(new Query($url));}$pool->shutdown();}check_urls_multi_pthreads();python 多線程def handle(sid)://這個方法內執行爬蟲數據處理passclass MyThread(Thread):"""docstring for ClassName"""def __init__(self, sid):Thread.__init__(self)self.sid = siddef run():handle(self.sid)threads = []for i in xrange(1,11):t = MyThread(i)threads.append(t)t.start()for t in threads:t.join()

python 線程池爬蟲:

from queue import Queuefrom threading import Thread, Lockimport urllib.parseimport socketimport reimport timeseen_urls = set(['/'])lock = Lock()class Fetcher(Thread):  def __init__(self, tasks):    Thread.__init__(self)    self.tasks = tasks    self.daemon = True    self.start()  def run(self):    while True:      url = self.tasks.get()      print(url)      sock = socket.socket()      sock.connect(('localhost', 3000))      get = 'GET {} HTTP/1.0/r/nHost: localhost/r/n/r/n'.format(url)      sock.send(get.encode('ascii'))      response = b''      chunk = sock.recv(4096)      while chunk:        response += chunk        chunk = sock.recv(4096)      links = self.parse_links(url, response)      lock.acquire()      for link in links.difference(seen_urls):        self.tasks.put(link)      seen_urls.update(links)      lock.release()      self.tasks.task_done()  def parse_links(self, fetched_url, response):    if not response:      print('error: {}'.format(fetched_url))      return set()    if not self._is_html(response):      return set()    urls = set(re.findall(r'''(?i)href=["']?([^/s"'<>]+)''',               self.body(response)))    links = set()    for url in urls:      normalized = urllib.parse.urljoin(fetched_url, url)      parts = urllib.parse.urlparse(normalized)      if parts.scheme not in ('', 'http', 'https'):        continue      host, port = urllib.parse.splitport(parts.netloc)      if host and host.lower() not in ('localhost'):        continue      defragmented, frag = urllib.parse.urldefrag(parts.path)      links.add(defragmented)    return links  def body(self, response):    body = response.split(b'/r/n/r/n', 1)[1]    return body.decode('utf-8')  def _is_html(self, response):    head, body = response.split(b'/r/n/r/n', 1)    headers = dict(h.split(': ') for h in head.decode().split('/r/n')[1:])    return headers.get('Content-Type', '').startswith('text/html')class ThreadPool:  def __init__(self, num_threads):    self.tasks = Queue()    for _ in range(num_threads):      Fetcher(self.tasks)  def add_task(self, url):    self.tasks.put(url)  def wait_completion(self):    self.tasks.join()if __name__ == '__main__':  start = time.time()  pool = ThreadPool(4)  pool.add_task("/")  pool.wait_completion()  print('{} URLs fetched in {:.1f} seconds'.format(len(seen_urls),time.time() - start))        

鄭重聲明:本文版權歸原作者所有,轉載文章僅為傳播更多信息之目的,如作者信息標記有誤,請第一時間聯系我們修改或刪除,多謝。

發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 亚洲精品一区中文字幕 | 精品一区二区电影 | 最新中文在线视频 | 国产一区二区三区欧美 | 九九热色 | 久草在线视频免费播放 | 91成人亚洲| 免费国产一级淫片 | 一级黄色影片在线观看 | 55夜色66夜色国产精品视频 | 亚洲综人网| 网站毛片 | 免费国产一级特黄久久 | 亚洲综合91 | 欧美aⅴ视频 | 一区免费 | 欧美一级爱操视频 | 久久久久国产成人免费精品免费 | 一级毛片手机在线观看 | 欧美一级毛片欧美一级成人毛片 | 亚洲av一级毛片特黄大片 | 午夜视频在线免费观看 | 羞羞视频免费网站 | 欧美综合日韩 | 久精品久久 | 国产羞羞视频在线观看免费应用 | 免费a视频| 宅男噜噜噜66一区二区 | 久草在线最新 | 中文字幕 亚洲一区 | 中文字幕在线网 | 高清中文字幕在线 | 天堂成人一区二区三区 | 久久久久久久爱 | 国产精品亚洲欧美一级在线 | 国产精品av久久久久久无 | 黄色网址在线免费 | 狠狠干导航| av成人免费观看 | 久久久久久久九九九九 | 欧美成人性色 |