暑假搭建完博客,随即发现奇缺美图,故写下这个简单的图库爬虫

学名为聚集爬虫,目标设置为简单的静态图库使用了伪静态的动态图库

效果:

1.高速:可以网速拉满高速下载图库的图片

2.稳定:涉及了方方面面的异常处理(真的方方面面!0_0 )和调用国外高匿名代理池网站(若出现大量异常多为图库封禁国外IP所致,自行修改代码中所写的代理功能即可)

3.去重:采用Hash算法对图片的md5值去重

小技巧:

1.预测图库图片总数,并模拟出全部下载所需数量大小

人工操作:前一步通过抽样得知重复数(一、可调用命令行获取文件名,再将文件名放置Excel比较。 二、或者直接利用md5算法生成文件放置在Excel中),通过标志重捕估算公式预测图片总数;再用简单的概率论知识(球入盒问题)计算全部下载所需下载数量。

import urllib.request
import easygui as g
import os
import socket
import time
from hashlib import md5
import sys
from bs4 import BeautifulSoup
import requests
import random
from requests.packages import urllib3
from test.test_urllib import urlopen
from requests.exceptions import ReadTimeout, ConnectionError, RequestException


# 随机选UA
def ChooseAgent():
    agents = [
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5",
        "Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
        "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
    ]
    return random.choice(agents)


# 爬取IP
def get_ip_list(url, headers):
    web_data = requests.get(url, headers=headers)
    soup = BeautifulSoup(web_data.text, 'lxml')
    ips = soup.find_all('tr')
    ip_list = []
    for i in range(1, len(ips)):
        ip_info = ips[i]
        tds = ip_info.find_all('td')
        ip_list.append(tds[1].text + ':' + tds[2].text)
    return ip_list


# 随机选IP
def get_random_ip(ip_list):
    proxy_list = []
    for ip in ip_list:
        proxy_list.append('http://' + ip)
    proxy_ip = random.choice(proxy_list)
    proxies = {'http': proxy_ip}
    return proxies


# 图片去重——Hash算法,path和open需对应相应文件位置
def md5():
    path = 'E:/Images'
    f = open('E:/Images/md5.txt', 'w')

    list = []

    list1 = []

    # 得到所有图片的路径,加到列表list1中
    root, _, files = next(os.walk(path))
    for i in range(len(files)):
        line = path + '/' + str(files[i])
        list1.append(line)

    # 计算每张图片的md5值,并将图片路径与其md5值整合到列表list中
    for n in range(len(list1)):
        hash = md5()
        img = open(list1[n], 'rb')
        hash.update(img.read())
        img.close()
        list2 = [list1[n], hash.hexdigest()]
        f.write(str(list2) + '\n')
        list.append(list2)

    # 两两比较md5值,若相同,则删去一张图片
    m = 0
    while m < len(list):
        t = m + 1
        while t < len(list):
            if list[m][1] == list[t][1]:
                os.remove(list[t][0])
                del list[t]
            else:
                t += 1
        m += 1



# 下载图片核心代码
def downloadImages(quantity, nam, address, url):
    headers = {
        # 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
        # 'Chrome/51.0.2704.63 Safari/537.36 ',
        'Referer': url,
        'Connection': 'keep-alive'
    }
    headers["User-Agent"] = ChooseAgent()

    IP_url = 'http://www.xicidaili.com/nn/'
    ip_list = get_ip_list(IP_url, headers=headers)

    timeout = 20
    socket.setdefaulttimeout(timeout)  # 这里对整个socket层设置超时时间。后续文件中如果再使用到socket,不必再设置
    sleep_download_time = 0

    for i in range(1, quantity + 1):

        try:
            time.sleep(sleep_download_time)  # 这里时间自己设定

            proxies = get_random_ip(ip_list)

            urllib3.disable_warnings()

            response = requests.get(url, headers=headers, proxies=proxies, verify=False)
            if response.status_code == requests.codes.ok:
                print("访问成功")
            _image = response.content

            name = address + '/' + nam + "%s" % i + '.png'


            with open(name, 'wb') as t:
                t.write(_image)
                t.close()
            response.close()
            print(proxies, "第%d张打印完毕" % i)
        except requests.RequestException as e:
            print(e)

        except UnicodeDecodeError as e:

            print('-----UnicodeDecodeError url:', url)

        except urllib.error.URLError as e:
            print("-----urlError url:", url)

        except socket.timeout as e:
            print("-----socket timout:", url)
        except ReadTimeout:
            print("timeout")
        except ConnectionError:
            print("connection Error")
        except RequestException:
            print("error")


if __name__ == '__main__':
    sys.setrecursionlimit(100000)  # 递归深度设置为十万

    fields_list = ['*图片来源网址', '文件命名', '下载数量']
    values_list = ['', 'images', '1']
    know = g.multenterbox(msg='带*号的为必填项', title='自动下载图片脚本', fields=fields_list, values=values_list)
    an = know[0]
    nam = know[1]
    num = int(know[2])

    address = g.diropenbox(msg='请选择存储文件夹 ', title='自动下载图片脚本')


    check = an[:4]

    if check != 'http':
        ans = 'http://' + an
    else:
        ans = an

    downloadImages(num, nam, address, ans)
    md5()

正在学习爬虫,欢迎致信交流


only love & learning