最近学员不断面试,不时听到令人惊喜的消息,类似应届专科毕业生获得7k月薪,小美女应聘月薪11k等等,看到学员开心的笑容令人开心。在POPTEST学习很辛苦,每天学员起早贪黑,回家10点正常,11,12点回家不稀奇,学员也有自己的生活,有的时候会让我去给他们刷榜,其实学员在前面的学习中已经有了基础“这样的脚本,所以我这里通过爬虫技术爬下代理ip,然后采用多进程的方法来访问www.poptest.cn,实现利用代理ip地址访问网站,间接说明如何帮学员刷榜,~_~!!!!!,脚本如下:
# coding: utf-8
__author__ = 'zzg'
import requests
from bs4 import BeautifulSoup
import multiprocessing
import time
def getProxyIp():
proxy = []
for i in range(1,12):
print i
header= {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Ubuntu Chromium/44.0.2403.89 '
'Chrome/44.0.2403.89 '
'Safari/537.36'}
r = requests.get('http://www.xicidaili.com/nt/{0}'.format(i),headers= header)
html = r.text
soup = BeautifulSoup(html)
table = soup.find('table', attrs={'id': 'ip_list'})
tr = table.find_all('tr')[1:]
#解析得到代理ip的地址,端口,和类型
for item in tr:
tds = item.find_all('td')
temp_dict = {}
kind = tds[6].get_text().lower()
if 'http' in kind:
temp_dict['http'] = "http://{0}:{1}".format(tds[2].get_text(), tds[3].get_text())
if 'https' in kind:
temp_dict['https'] = "https://{0}:{1}".format(tds[2].get_text(), tds[3].get_text())
proxy.append(temp_dict)
return proxy
def brash(proxy_dict):
header= {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Ubuntu Chromium/44.0.2403.89 '
'Chrome/44.0.2403.89 '
'Safari/537.36'}
try:
r=requests.get("http://WWW.POPTEST.CN", headers=header,proxies=proxy_dict,timeout=1)
except Exception, e:
print "failed"
else:
print "successful"
time.sleep(0.5)
return None
if __name__ == '__main__':
i = 0
t = 0
final = input() # 输入数字代表要获取多少次代理ip
while t< final:
t += 1
proxies = getProxyIp() # 获取代理ip网站上的前12页的ip
# 为了爬取的代理ip不浪费循环5次使得第一次的不能访问的ip尽可能利用
for i in range(5):
i += 1
# 多进程代码开了32个进程
pool = multiprocessing.Pool(processes=32)
results = []
for i in range(len(proxies)):
results.append(pool.apply_async(brash,(proxies[i],)))
for i in range(len(proxies)):
results[i].get()
pool.close()
pool.join()
i = 0