python遍历爬取一堆URL图片到指定目录


把一堆图片的URL放在urls.txt中,同时在该脚本同目录下建立一个result目录;

把下面脚本也放在同目录下命名为crawlimg.py,在IDLE中按F5,大功告成。

#!/usr/bin/python
#-*- coding: utf-8 -*-

import urllib.request
import urllib
import re,os
import time
import hashlib
import requests
import random


# 可行的
def gogogo():

    #stream = requests.get('https://www.vancleefarpels.com/content/dam/rcq/vca/22/29/19/4/2229194.png')
    #print('oik')
    #return

    # header头信息 [('User-agent', 'Mozilla/5.0')]
    headers = [
        #('Host', 'www.cartier.com'),
        ('Host', 'productimg.xbiao.com'),
        ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0'),
        ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),
        ('Accept-Language', 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3'),
        ('Accept-Encoding', 'gzip, deflate'),
        ('Referer', 'http://www.google.com'),
        ('Connection', 'keep-alive'),
        ('Cache-Control', 'max-age=0')
    ]
    
    print('run go go go')
    with open('urls.txt') as f:
        for url in f:
            #basename = rec.findall(url)[0]
            #print('url:' + url.replace('\n','').replace('\r',''))
            requestUrl = url.replace('\n','').replace('\r','')
            basename = hashlib.md5(requestUrl.encode()).hexdigest()
            extname = os.path.splitext(requestUrl)[1]
            
            print('basename:' + basename)
            filename = './result/{}{}'.format(basename,extname)
            print('filename:' + filename)

            # 如果文件存在则跳过
            if os.path.exists(filename):
                print('filename exists-repeat')
                continue
            
            print('url:' + url)

            '''
            # 方法1:添加header
            opener = urllib.request.build_opener()
            opener.addheaders = headers
            urllib.request.install_opener(opener)
            # url记得去头尾再md5
            urllib.request.urlretrieve(requestUrl, filename)
            '''

            #方法2:在上述方法下载图片异常的时候使用
            try:
                pic_content = (urllib.request.urlopen(requestUrl)).read()
                if len(pic_content)>4000:
                    f = open(filename,'wb')
                    f.write(pic_content)
                    f.close()
            except Exception as e:
                print(imgPath+" error")

            #方法3:
            

            #break;
            time.sleep(random.randint(1,3))



if __name__ == "__main__":
    gogogo()

整体结构

    /*
     * Header出错记得header是二维数组结构
     * 主要逻辑以下部分:
     * 1。 用php将json或html爬下来,保存到tmps下产生一堆log文件
     * 2. 根据log格式化出来excel1
     * 3. 将excel1的图片列单独拷贝成txt,用python 进行爬取放到tmps/imgs目录下
     * 4. 然后再用php产生带图的excel2
     */

原文链接:https://blog.yongit.com/note/1573028.html