麻豆小视频在线观看_中文黄色一级片_久久久成人精品_成片免费观看视频大全_午夜精品久久久久久久99热浪潮_成人一区二区三区四区

首頁 > 開發 > Python > 正文

爬蟲代理池Python3WebSpider源代碼測試過程解析

2024-09-09 19:03:22
字體:
來源:轉載
供稿:網友

這篇文章主要介紹了爬蟲代理池Python3WebSpider源代碼測試過程解析,文中通過示例代碼介紹的非常詳細,對大家的學習或者工作具有一定的參考學習價值,需要的朋友可以參考下

元類屬性的使用

代碼

主要關于元類的使用

通過獲取由元類生成的爬蟲抓取類的部分屬性.這里為抓取函數,以相同的字符開頭的抓取函數,生成屬性列表,這樣可以持續調用.目的是可以僅僅添加不同的抓取函數抓取不同的網站,而類的其他部分不用做調整.

部分代碼:

class ProxyMetaclass(type):  def __new__(cls, name, bases, attrs):    count = 0    attrs['__CrawlFunc__'] = []    for k, v in attrs.items():      if 'crawl_' in k:        attrs['__CrawlFunc__'].append(k)        count += 1    attrs['__CrawlFuncCount__'] = count    return type.__new__(cls, name, bases, attrs)class Crawler(object, metaclass=ProxyMetaclass):  def get_proxies(self, callback):    proxies = []    for proxy in eval("self.{}()".format(callback)):      print('成功獲取到代理', proxy)      proxies.append(proxy)    return proxies      def crawl_daili66(self, page_count=4):    """    獲取代理66    :param page_count: 頁碼    :return: 代理    """    start_url = 'http://www.66ip.cn/{}.html'    urls = [start_url.format(page) for page in range(1, page_count + 1)]    for url in urls:      print('Crawling', url)      html = get_page(url)      if html:        doc = pq(html)        trs = doc('.containerbox table tr:gt(0)').items()        for tr in trs:          ip = tr.find('td:nth-child(1)').text()          port = tr.find('td:nth-child(2)').text()          yield ':'.join([ip, port])

測試方法

#!/usr/bin/env python# -*- coding: utf-8 -*-# @Time  : 12/19/19 4:10 PM# @Author : yon# @Email  : @qq.com# @File  : testimport jsonimport refrom pyquery import PyQuery as pqclass ProxyMetaclass(type):  def __new__(cls, name, bases, attrs):    count = 0    attrs['__CrawlFunc__'] = []    for k, v in attrs.items():      print("打印k")      print(k)      print("打印v")      print(v)      if 'crawl_' in k:        attrs['__CrawlFunc__'].append(k)        count += 1    attrs['__CrawlFuncCount__'] = count    return type.__new__(cls, name, bases, attrs)class Crawler(object, metaclass=ProxyMetaclass):  def get_proxies(self, callback):    proxies = []    for proxy in eval("self.{}()".format(callback)):      print('成功獲取到代理', proxy)      proxies.append(proxy)    return proxies  def crawl_daili66(self, page_count=4):    """    獲取代理66    :param page_count: 頁碼    :return: 代理    """    start_url = 'http://www.66ip.cn/{}.html'    urls = [start_url.format(page) for page in range(1, page_count + 1)]    for url in urls:      print('Crawling', url)      html = get_page(url)      if html:        doc = pq(html)        trs = doc('.containerbox table tr:gt(0)').items()        for tr in trs:          ip = tr.find('td:nth-child(1)').text()          port = tr.find('td:nth-child(2)').text()          yield ':'.join([ip, port])  def crawl_ip3366(self):    for page in range(1, 4):      start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(page)      html = get_page(start_url)      ip_address = re.compile('<tr>/s*<td>(.*?)</td>/s*<td>(.*?)</td>')      # /s * 匹配空格,起到換行作用      re_ip_address = ip_address.findall(html)      for address, port in re_ip_address:        result = address + ':' + port        yield result.replace(' ', '')  def crawl_kuaidaili(self):    for i in range(1, 4):      start_url = 'http://www.kuaidaili.com/free/inha/{}/'.format(i)      html = get_page(start_url)      if html:        ip_address = re.compile('<td data-title="IP">(.*?)</td>')        re_ip_address = ip_address.findall(html)        port = re.compile('<td data-title="PORT">(.*?)</td>')        re_port = port.findall(html)        for address, port in zip(re_ip_address, re_port):          address_port = address + ':' + port          yield address_port.replace(' ', '')  def crawl_xicidaili(self):    for i in range(1, 3):      start_url = 'http://www.xicidaili.com/nn/{}'.format(i)      headers = {        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',        'Cookie': '_free_proxy_session=BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJWRjYzc5MmM1MTBiMDMzYTUzNTZjNzA4NjBhNWRjZjliBjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMUp6S2tXT3g5a0FCT01ndzlmWWZqRVJNek1WanRuUDBCbTJUN21GMTBKd3M9BjsARg%3D%3D--2a69429cb2115c6a0cc9a86e0ebe2800c0d471b3',        'Host': 'www.xicidaili.com',        'Referer': 'http://www.xicidaili.com/nn/3',        'Upgrade-Insecure-Requests': '1',      }      html = get_page(start_url, options=headers)      if html:        find_trs = re.compile('<tr class.*?>(.*?)</tr>', re.S)        trs = find_trs.findall(html)        for tr in trs:          find_ip = re.compile('<td>(/d+/./d+/./d+/./d+)</td>')          re_ip_address = find_ip.findall(tr)          find_port = re.compile('<td>(/d+)</td>')          re_port = find_port.findall(tr)          for address, port in zip(re_ip_address, re_port):            address_port = address + ':' + port            yield address_port.replace(' ', '')  def crawl_ip3366(self):    for i in range(1, 4):      start_url = 'http://www.ip3366.net/?stype=1&page={}'.format(i)      html = get_page(start_url)      if html:        find_tr = re.compile('<tr>(.*?)</tr>', re.S)        trs = find_tr.findall(html)        for s in range(1, len(trs)):          find_ip = re.compile('<td>(/d+/./d+/./d+/./d+)</td>')          re_ip_address = find_ip.findall(trs[s])          find_port = re.compile('<td>(/d+)</td>')          re_port = find_port.findall(trs[s])          for address, port in zip(re_ip_address, re_port):            address_port = address + ':' + port            yield address_port.replace(' ', '')  def crawl_iphai(self):    start_url = 'http://www.iphai.com/'    html = get_page(start_url)    if html:      find_tr = re.compile('<tr>(.*?)</tr>', re.S)      trs = find_tr.findall(html)      for s in range(1, len(trs)):        find_ip = re.compile('<td>/s+(/d+/./d+/./d+/./d+)/s+</td>', re.S)        re_ip_address = find_ip.findall(trs[s])        find_port = re.compile('<td>/s+(/d+)/s+</td>', re.S)        re_port = find_port.findall(trs[s])        for address, port in zip(re_ip_address, re_port):          address_port = address + ':' + port          yield address_port.replace(' ', '')  def crawl_data5u(self):    start_url = 'http://www.data5u.com/free/gngn/index.shtml'    headers = {      'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',      'Accept-Encoding': 'gzip, deflate',      'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',      'Cache-Control': 'max-age=0',      'Connection': 'keep-alive',      'Cookie': 'JSESSIONID=47AA0C887112A2D83EE040405F837A86',      'Host': 'www.data5u.com',      'Referer': 'http://www.data5u.com/free/index.shtml',      'Upgrade-Insecure-Requests': '1',      'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',    }    html = get_page(start_url, options=headers)    if html:      ip_address = re.compile('<span><li>(/d+/./d+/./d+/./d+)</li>.*?<li class=/"port.*?>(/d+)</li>', re.S)      re_ip_address = ip_address.findall(html)      for address, port in re_ip_address:        result = address + ':' + port        yield result.replace(' ', '')class Getter():  def __init__(self):    self.crawler = Crawler()  def run(self):    print('獲取器開始執行')    for callback_label in range(self.crawler.__CrawlFuncCount__):      print(callback_label)      callback = self.crawler.__CrawlFunc__[callback_label]      print(callback)      # # 獲取代理      # proxies = self.crawler.get_proxies(callback)      # sys.stdout.flush()      # for proxy in proxies:      #   self.redis.add(proxy)if __name__ == '__main__':  get = Getter()  get.run()
發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 激情久久婷婷 | 精品麻豆cm视频在线看 | www国产成人免费观看视频,深夜成人网 | 精选久久 | 亚洲精品欧美在线 | 欧美成人免费一级 | 在线成人免费观看 | 久久久一区二区三区四区 | 羞羞视频免费观看网站 | 欧美电影在线观看 | 中日韩免费视频 | 亚洲国产精品久久久久制服红楼梦 | 欧美精品一区二区三区在线 | 国产精品久久久久久久久粉嫩 | xnxx18日本| 叉逼视频 | 噜噜社| 日日天日日夜日日摸 | 黄色一级片免费观看 | 羞羞的视频在线免费观看 | 久久精精品 | 亚洲成人综合网站 | 精品国产乱码一区二区 | lutube成人福利在线观看 | 露脸各种姿势啪啪的清纯美女 | 欧美成人精品h版在线观看 国产一级淫片在线观看 | 免费在线观看成人网 | 午夜精品久久久久久毛片 | 中国老女人一级毛片视频 | 欧美交在线 | 亚洲国产馆| 久久久中精品2020中文 | 国产欧美在线一区二区三区 | 草草视频免费观看 | 亚洲啊v在线观看 | 成年片在线观看 | 免费一级片网站 | 永久av在线免费观看 | 污污网站入口 | 日韩视频一区二区 | 欧美a级大胆视频 |