搭建本地代理IP池

突破反爬虫策略中有个搭建属于自己的IP代理池,本章节介绍如何搭建属于自己的IP代理池,本章节介绍如果使用西刺代理上免费的IP代理

一、使用requests库抓取代理IP存入到MySQL数据库中

import requests
from scrapy.selector import Selector
import pymysql

conn = pymysql.connect(host='127.0.0.1', user='root', passwd='***', db='nodejs', port=3306, charset='utf8')
cursor = conn.cursor()

def crawl_ips():
    """
    创建一个抓取西刺代理网上高密的ip存到到本地数据中
    """
    headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0"}
    # 遍历全部的页面
    for i in range(2, 2890):
        response = requests.get("http://www.xicidaili.com/nn/{0}".format(i), headers=headers)
        # 获取的页面内容
        selector = Selector(text=response.text)
        all_trs = selector.css('#ip_list tr')
        ip_list = []
        for tr in all_trs[1: ]:
            speed_str = tr.css(".bar::attr(title)").extract()[0]
            if speed_str:
                speed = float(speed_str.split("秒")[0])
            all_texts = tr.css("td::text").extract()

            ip = all_texts[0]
            port = all_texts[1]
            proxy_type = all_texts[5]

            ip_list.append((ip, port, speed, proxy_type,))

        try:
            print(ip_list)
            sql = 'insert into proxy_ip (ip, port, speed, proxy_type) values (%s, %s, %s, %s)'
            for ip_info in ip_list:
                if ip_info[0] and ip_info[1] and ip_info[2] and ip_info[3]:
                    cursor.execute(sql, (ip_info[0], ip_info[1], ip_info[2], ip_info[3]))
                    print('入库成功~~~')
                    conn.commit()
                else:
                    continue
        except pymysql.Error as e:
            print(e)
            conn.rollback()
        finally:
            if conn:
                conn.close()

二、定义一个工具类用来从数据库中随机获取一个IP用来访问

import requests
import pymysql

conn = pymysql.connect(host='127.0.0.1', user='root', passwd='jianshuihen128', db='scrapy_test', port=3306, charset='utf8')
cursor = conn.cursor()

class GetIP(object):
    """
    定义一个获取ip的类
    """

    def delete_ip(self, ip):
        """
        删除数据库中无效的IP
        :param ip:
        :return:
        """
        delete_sql = """
                    delete from proxy_ip where ip='{0}'
                """.format(ip)
        cursor.execute(delete_sql)
        conn.commit()
        return True

    def judge_ip(self, proxy_type, ip, port):
        """
        判断当前获取的IP是否有效
        :param proxy_type:
        :param ip:
        :param port:
        :return:
        """
        http_url = "http://www.baidu.com"
        proxy_url = "{0}://{1}:{2}".format(proxy_type, ip, port)
        try:

            proxy_dict = {
                proxy_type: proxy_url,
            }
            response = requests.get(http_url, proxies=proxy_dict)
        except Exception as e:
            print("invalid ip and port")
            self.delete_ip(ip)
            return False
        else:
            code = response.status_code
            if 200 <= code < 300:
                print("有效的ip", proxy_url)
                return True
            else:
                print("无用的ip", proxy_url)
                self.delete_ip(ip)
                return False

    def get_random_ip(self):
        """
        从数据库中随机获取一个IP
        :return:
        """
        random_sql = """
            SELECT ip, port, proxy_type FROM proxy_ip
            ORDER BY RAND()
            LIMIT 1
        """
        # 执行sql语句
        cursor.execute(random_sql)
        for ip_info in cursor.fetchall():
            ip = ip_info[0]
            port = ip_info[1]
            proxy_type = ip_info[2]

            # 判断当前的ip是否有效
            judge_re = self.judge_ip(proxy_type, ip, port)
            if judge_re:
                return "{0}://{1}:{2}".format(proxy_type, ip, port)
            else:
                return self.get_random_ip()

三、使用中间件来设置代理IP

class RandomProxy(object):
    """
    设置随机代理IP
    """

    def process_request(self, request, spider):
        get_ip = GetIP()
        request.meta['proxy'] = get_ip.get_random_ip()

四、测试

class HttpbinSpider(scrapy.Spider):
    name = 'httpbin'
    allowed_domains = ['httpbin.org']
    start_urls = ['http://httpbin.org/ip']

    def parse(self, response):
        print('*' * 100)
        print(response.text)
        print('*' * 100)
        yield scrapy.Request(self.start_urls[0], callback=self.parse, dont_filter=True)

results matching ""

    No results matching ""