本文主要用到python3自带的urllib模块编写轻量级的简单爬虫。至于怎么定位一个网页中具体元素的url可自行百度火狐浏览器的firebug插件或者谷歌浏览器的自带方法。
1、访问一个网址
re=urllib.request.urlopen('网址‘)
打开的也可以是个urllib.request.Request对象,后边也可以跟数据参数,当有传入数据时会自动变为POST请求;
2、urllib.request.Request(url,data=None,headers={})对象属性和方法
1 full_url
2 type
3 host
4 data
5 selector
6 method
7 get_method()
8 add_header(key,val)
9 add_unredirected_header(key,header)
10 has_header(header)
11 remove_header(header)
12 get_full_url(header)
13 set_proxy(host,type)
14 get_header(header_name,default=None)
15 header_items()
3、已连接对象的可用方法:
1 re.read() 读取内容,想要将内容保存下来,需先新建一个相应格式的文件,再将读取到的内容写入到这个文件内即可;
2 re.geturl() 可取得已打开对象的url地址;
3 re.info() 可取得响应服务器的信息;
4 re.getcode() 可取得响应状态码;
5 urllib.parse.urlencode() 将一个存储post数据的字典转换成打开网页所需要的数据格式;
可用json.loads()将文本转换成键值对
可在传地址时将header以一个字典数据的形式传入,以隐藏自己的访问方式;也可用re.add_header('') 的方式进行追加;
4、当知道一个文件的url时可用此方法直接下载保存到本地
urllib.request.urlretrieve('http://wx1.sinaimg.cn/mw600/9bbc284bgy1ffkuafn4xtj20dw0jgh08.jpg','bc.jpg')
5、登录功能的实现(post)
(1)利用session保留登录状态
1 login_data = {
2 '_xsrf': getXSRF(baseurl),
3 'password': password,
4 'remember_me': 'true',
5 'email': email,
6 session = requests.session()
7 content = session.post(url, headers = headers_base, data = login_data)
8 s = session.get("http://www.zhihu.com", verify = False)
9 print s.text.encode('utf-8')
(2)利用cookie进行登录
1 post = {
2 'ua':self.ua,
3 'TPL_checkcode':'',
4 'CtrlVersion': '1,0,0,7',
5 'TPL_password':'',
6 }
7 #将POST的数据进行编码转换
8 postData = urllib.urlencode(post)
9 cookie = cookielib.LWPCookieJar()
10 cookieHandler = urllib2.HTTPCookieProcessor(cookie)
11 opener = urllib2.build_opener(cookieHandler, urllib2.HTTPHandler)
12 #第一次登录获取验证码尝试,构建request
13 request = urllib2.Request(loginURL,postData,loginHeaders)
14 #得到第一次登录尝试的相应
15 response = self.opener.open(request)
16 #获取其中的内容
17 content = response.read().decode('gbk')
18
网站常用的编码方式有utf8,gbk,gb2132,gb18030等
6、代理的使用
同一个Ip设备在短时间内访问一个服务器次数过多会被服务器禁止访问,所以很多时候我们都需要用天代理来帮助我们解决这个问题。方法如下:
1 proxy_support = urllib.request.ProxyHandler({类型:代理ip和端口号})
2 opner = urllib.request.build_opener(proxy_suppoert)
3 urllib.request.install_opener(opener) #可选安装
4 opener.open(url) #或直接调用opener代理
注:如想实现更复杂的可使用更全面的scrapy框架。
附:自己写的一个验证网上代理的有效性的爬虫,此爬虫先从网站上获取代理的地址,然后使用这个代理来访问百度,验证是否能得到百度的网页,如能则将此代理地址保存。
1 import threading,time,pickle,re
2 import urllib.request
3
4 class ProxyCheck(threading.Thread):
5 def __init__(self,proxylist):
6 threading.Thread.__init__(self)
7 self.proxylist = proxylist
8 self.timeout = 5
9 self.test_url = 'http://www.baidu.com'
10 self.test_str = '11000002000001'
11 self.checkedProxyList = []
12
13 def checkProxy(self):
14 cookies = urllib.request.HTTPCookieProcessor()
15 for proxy in self.proxylist:
16 proxy_handler = urllib.request.ProxyHandler({'http':r'%s://%s:%s' %(proxy[0],proxy[1],proxy[2])})
17 opener = urllib.request.build_opener(cookies,proxy_handler)
18 opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
19 '(KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36')]
20 urllib.request.install_opener(opener)
21 t1 = time.time()
22 try:
23 req = urllib.request.urlopen(self.test_url,timeout=self.timeout)
24 result = req.read().decode('utf-8')
25 timeused = time.time() - t1
26 pos = result.find(self.test_str)
27 if pos > 1:
28 self.checkedProxyList.append((proxy[0],proxy[1],proxy[2],proxy[3],timeused))
29 print((proxy[0],proxy[1],proxy[2],proxy[3],timeused))
30 else:
31 continue
32 except:
33 continue
34 # def sort(self):
35 # sorted(self.checkedProxyList,cmp=lambda x,y:cmp(x[4],y[4]))
36 def save(self,filename):
37 with open("%s.txt"%filename,'w') as f:
38 for proxy in self.checkedProxyList:
39 f.write("{}\t{}:{}\t{}\t{}\n".format(*proxy))
40 with open("%s.pickle"%filename,'wb') as fb:
41 pickle.dump(self.checkedProxyList,fb)
42
43 def run(self):
44 self.checkProxy()
45 self.save("checked-50")
46
47
48 class xiciProxy:
49 def __init__(self):
50 self.alllist = []
51 def grep(self,url):
52 # req = urllib.request.Request(url)
53 # req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
54 # '(KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36')
55
56
57 result1 = urllib.request.urlopen(req)
58 result2 = result1.read().decode('utf-8')
59
60 regex = r"<td>(\d+.\d+.\d+.\d+)</td>\n.*?" \
61 r"<td>(\d+)</td>\n.*?" \
62 r"\n.*?" \
63 r"<a href=.*?>(.*?)</a>\n.*?" \
64 r"\n.*?" \
65 r"\n.*?" \
66 r"<td>(HTTPS?)</td>"
67 get = re.findall(regex,result2)
68 proxylist = []
69 for i in get:
70 proxylist.append((i[3],i[0],i[1],i[2]))
71 return proxylist
72 def save(self,filename):
73 with open("%s.txt"%filename,'w') as f:
74 for proxy in self.alllist:
75 f.write("{}\t{}:{}\t{}\n".format(*proxy))
76 with open("%s.pickle"%filename,'wb') as fb:
77 pickle.dump(self.alllist,fb)
78 def run(self):
79 for i in range(51,1951):
80 url = "http://www.xicidaili.com/nn/{}".format(i)
81 print(url)
82 proxylist = self.grep(url)
83 self.alllist += proxylist
84 if i % 50 == 0:
85 self.save("xiciproxy-{}".format(i))
86 self.alllist = []
87
88 with open("xiciproxy-50.pickle","rb") as fb:
89 proxylist = pickle.load(fb)
90 ProxyCheck(proxylist).run()