Python使用代理抓取网站图片(多线程)

yipeiwu_com4年前Python爬虫

一、功能说明:
1. 多线程方式抓取代理服务器,并多线程验证代理服务器
ps 代理服务器是从http://www.cnproxy.com/ (测试只选择了8个页面)抓取
2. 抓取一个网站的图片地址,多线程随机取一个代理服务器下载图片
二、实现代码

复制代码 代码如下:

#!/usr/bin/env python
#coding:utf-8

import urllib2
import re
import threading
import time
import random

rawProxyList = []
checkedProxyList = []
imgurl_list = []

#抓取代理网站
portdicts ={'v':"3",'m':"4",'a':"2",'l':"9",'q':"0",'b':"5",'i':"7",'w':"6",'r':"8",'c':"1"}
targets = []
for i in xrange(1,9):
        target = r"http://www.cnproxy.com/proxy%d.html" % i
        targets.append(target)
#print targets

#抓取代理服务器正则
p = re.compile(r'''<tr><td>(.+?)<SCRIPT type=text/javascript>document.write\(":"\+(.+?)\)</SCRIPT></td><td>(.+?)</td><td>.+?</td><td>(.+?)</td></tr>''')

#获取代理的类
class ProxyGet(threading.Thread):
    def __init__(self,target):
        threading.Thread.__init__(self)
        self.target = target

    def getProxy(self):
        print "代理服务器目标网站: " + self.target
        req = urllib2.urlopen(self.target)
        result = req.read()
        #print chardet.detect(result)
        matchs = p.findall(result)
        for row in matchs:
            ip=row[0]
            port =row[1]
            port = map(lambda x:portdicts[x],port.split('+'))
            port = ''.join(port)
            agent = row[2]
            addr = row[3].decode("cp936").encode("utf-8")
            proxy = [ip,port,addr]
            #print proxy
            rawProxyList.append(proxy)

    def run(self):
        self.getProxy()

#检验代理的类
class ProxyCheck(threading.Thread):
    def __init__(self,proxyList):
        threading.Thread.__init__(self)
        self.proxyList = proxyList
        self.timeout = 5
        self.testUrl = "http://www.baidu.com/"
        self.testStr = "030173"

    def checkProxy(self):
        cookies = urllib2.HTTPCookieProcessor()
        for proxy in self.proxyList:
            proxyHandler = urllib2.ProxyHandler({"http" : r'http://%s:%s' %(proxy[0],proxy[1])})
            #print r'http://%s:%s' %(proxy[0],proxy[1])
            opener = urllib2.build_opener(cookies,proxyHandler)
            opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0')]
            #urllib2.install_opener(opener)
            t1 = time.time()

            try:
                #req = urllib2.urlopen("http://www.baidu.com", timeout=self.timeout)
                req = opener.open(self.testUrl, timeout=self.timeout)
                #print "urlopen is ok...."
                result = req.read()
                #print "read html...."
                timeused = time.time() - t1
                pos = result.find(self.testStr)
                #print "pos is %s" %pos

                if pos > 1:
                    checkedProxyList.append((proxy[0],proxy[1],proxy[2],timeused))
                    #print "ok ip: %s %s %s %s" %(proxy[0],proxy[1],proxy[2],timeused)
                else:
                     continue
            except Exception,e:
                #print e.message
                continue

    def run(self):
        self.checkProxy()

#获取图片地址函数
def imgurlList(url_home):
    global imgurl_list
    home_page = urllib2.urlopen(url_home)
    url_re = re.compile(r'<li><a href="(.+?)" target="_blank" rel="nofollow">')
    pic_re = re.compile(r'<img src="(.*?\.\w{3,4})"')
    url_list = re.findall(url_re,home_page.read())
    for url in url_list:
        #print url_home+url
        url_page = urllib2.urlopen(url_home+url)
        for imgurlList in re.findall(pic_re,url_page.read()):
            imgurl_list.append(imgurlList)

#下载图片的类
class getPic(threading.Thread):
    def __init__(self,imgurl_list):
        threading.Thread.__init__(self)
        self.imgurl_list = imgurl_list
        self.timeout = 5
    def downloadimg(self):
        for imgurl in self.imgurl_list:
            pic_suffix = imgurl.split('.')[-1] #获取图片后缀
            pic_name = str(random.randint(0,10000000000))+'.'+pic_suffix
            cookies = urllib2.HTTPCookieProcessor()
            randomCheckedProxy = random.choice(checkedProxyList) #随机取一组代理服务器
            proxyHandler = urllib2.ProxyHandler({"http" : r'http://%s:%s' %(randomCheckedProxy[0],randomCheckedProxy[1])})
            opener = urllib2.build_opener(cookies,proxyHandler)
            opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0')]
            urllib2.install_opener(opener)
            try:
                data_img = opener.open(imgurl,timeout=self.timeout)
                f = open (pic_name,'wb')
                f.write(data_img.read())
                f.close()
            except:
                continue
    def run(self):
        self.downloadimg()

if __name__ == "__main__":
    getThreads = []
    checkThreads = []
    imgurlList('http://www.ivsky.com')
    getPicThreads = []

#对每个目标网站开启一个线程负责抓取代理
for i in range(len(targets)):
    t = ProxyGet(targets[i])
    getThreads.append(t)

for i in range(len(getThreads)):
    getThreads[i].start()

for i in range(len(getThreads)):
    getThreads[i].join()

print '.'*10+"总共抓取了%s个代理" %len(rawProxyList) +'.'*10

#开启20个线程负责校验,将抓取到的代理分成20份,每个线程校验一份
for i in range(20):
    t = ProxyCheck(rawProxyList[((len(rawProxyList)+19)/20) * i:((len(rawProxyList)+19)/20) * (i+1)])
    checkThreads.append(t)

for i in range(len(checkThreads)):
    checkThreads[i].start()

for i in range(len(checkThreads)):
    checkThreads[i].join()

print '.'*10+"总共有%s个代理通过校验" %len(checkedProxyList) +'.'*10

#开启20个线程随机取一个代理下载图片
for i in range(20):
    t = getPic(imgurl_list[((len(imgurl_list)+19)/20) * i:((len(imgurl_list)+19)/20) * (i+1)])
    getPicThreads.append(t)

for i in range(len(getPicThreads)):
    getPicThreads[i].start()

for i in range(len(getPicThreads)):
    getPicThreads[i].join()

print '.'*10+"总共有%s个图片下载" %len(imgurl_list) +'.'*10

#代理排序持久化
f= open("proxy_list.txt",'w+')
for proxy in sorted(checkedProxyList,cmp=lambda x,y:cmp(x[3],y[3])):
    #print "checked proxy is: %s:%s\t%s\t%s" %(proxy[0],proxy[1],proxy[2],proxy[3])
    f.write("%s:%s\t%s\t%s\n"%(proxy[0],proxy[1],proxy[2],proxy[3]))
f.close()

二、测试结果:

复制代码 代码如下:

# ls
proxy_getpic.py
# python proxy_getpic.py
代理服务器目标网站: http://www.cnproxy.com/proxy1.html
代理服务器目标网站: http://www.cnproxy.com/proxy2.html
代理服务器目标网站: http://www.cnproxy.com/proxy3.html
代理服务器目标网站: http://www.cnproxy.com/proxy4.html
代理服务器目标网站: http://www.cnproxy.com/proxy5.html
代理服务器目标网站: http://www.cnproxy.com/proxy6.html
代理服务器目标网站: http://www.cnproxy.com/proxy7.html
代理服务器目标网站: http://www.cnproxy.com/proxy8.html
..........总共抓取了800个代理..........
..........总共有458个代理通过校验..........
..........总共有154个图片下载..........
# cat proxy_list.txt | more
173.213.113.111:3128    United States   0.432188987732
173.213.113.111:8089    United States   0.441318035126
173.213.113.111:7808    United States   0.444597005844
110.4.24.170:80 香港 香港移动通讯有限公司       0.489440202713
211.142.236.135:8080    湖南省株洲市 移动       0.490673780441
211.142.236.135:8081    湖南省株洲市 移动       0.518096923828
211.142.236.135:8000    湖南省株洲市 移动       0.51860499382
211.142.236.135:8082    湖南省株洲市 移动       0.520448207855
# ls
1001117689.jpg  3097883176.jpg  5234319709.jpg  7012274766.jpg  8504924248.jpg
1076458640.jpg  3144369522.jpg  5387877704.jpg  7106183143.jpg  867723868.jpg
1198548712.jpg  3161307031.jpg  5572092752.jpg  7361254661.jpg  8746315373.jpg
165738192.jpg   3228008315.jpg  5575388077.jpg  7389537793.jpg  8848973192.jpg
1704512138.jpg  3306931164.jpg  5610740708.jpg  7407358698.jpg  8973834958.jpg
1742167711.jpg  3320152673.jpg  5717429022.jpg  7561176207.jpg  8976862152.jpg
...............

相关文章

python实现爬取百度图片的方法示例

本文实例讲述了python实现爬取百度图片的方法。分享给大家供大家参考,具体如下: import json import itertools import urllib import...

Python爬虫:通过关键字爬取百度图片

Python爬虫:通过关键字爬取百度图片

使用工具:Python2.7 点我下载 scrapy框架 sublime text3 一。搭建python(Windows版本)  1.安装python2.7 ---然后在cm...

Python爬取智联招聘数据分析师岗位相关信息的方法

Python爬取智联招聘数据分析师岗位相关信息的方法

进入智联招聘官网,在搜索界面输入‘数据分析师',界面跳转,按F12查看网页源码,点击network  选中XHR,然后刷新网页 可以看到一些Ajax请求, 找到画红线的XH...

python协程gevent案例 爬取斗鱼图片过程解析

python协程gevent案例 爬取斗鱼图片过程解析

分析 分析网站寻找需要的网址 用谷歌浏览器摁F12打开开发者工具,然后打开斗鱼颜值分类的页面,如图: 在里面的请求中,最后发现它是以ajax加载的数据,数据格式为json,如图: 圈...

windows下搭建python scrapy爬虫框架步骤

windows下搭建python scrapy爬虫框架步骤

网络上现有的windows下搭建scrapy教程都比较旧,一般都是咔咔咔安装一堆软件,太麻烦,这是因为scrapy框架用到好多不同的模块,其实查阅最新的官网scrapy文档,在windo...