学习廖雪峰的PYTHON爬虫笔记心得连载:
1 URLIB基本使用
import urllib.request
def load_data():
url = "http://www.baidu.com/"
#get的请求
#http请求
#response:http相应的对象
response = urllib.request.urlopen(url)
print(response)
#读取内容 bytes类型
data = response.read()
print(data)
#将文件获取的内容转换成字符串
str_data = data.decode("utf-8")
print(str_data)
#将数据写入文件
with open("baidu.html","w",encoding="utf-8")as f:
f.write(str_data)
#将字符串类型转换成bytes
str_name = "baidu"
bytes_name =str_name.encode("utf-8")
print(bytes_name)
#python爬取的类型:str bytes
#如果爬取回来的是bytes类型:但是你写入的时候需要字符串 decode("utf-8")
#如果爬取过来的是str类型:但你要写入的是bytes类型 encode(""utf-8")
load_data()
2 请求参数中需要转义的:
def get_params():
url = "http://www.baidu.com/s?"
params = {
"wd":"中文",
"key":"zhang",
"value":"san"
}
str_params = urllib.parse.urlencode(params)
3 获取请求头信息:
def load_baidu():
url= "https://www.baidu.com"
header = {
#浏览器的版本
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36",
# "haha":"hehe"
}
#创建请求对象
request = urllib.request.Request(url)
#动态的去添加head的信息
request.add_header("User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36")
#请求网络数据(不在此处增加请求头信息因为此方法系统没有提供参数)
response = urllib.request.urlopen(request)
print(response)
data = response.read().decode("utf-8")
4 动态从请求头列表中随机找一个出来
def load_baidu():
url = "http://www.baidu.com"
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.9.168 Version/11.50"
]
#每次请求的浏览器都是不一样的
random_user_agent = random.choice(user_agent_list)
request = urllib.request.Request(url)
#增加对应的请求头信息(user_agent)
request.add_header("User-Agent", random_user_agent)
#请求数据
response = urllib.request.urlopen(request)
#请求头的信息
print(request.get_header("User-agent"))
load_baidu()
#获取到完整的url
final_url = request.get_full_url()
print(final_url)
#响应头
# print(response.headers)
#获取请求头的信息(所有的头的信息)
# request_headers = request.headers
# print(request_headers)
#(2)第二种方式打印headers的信息
#注意点:首字母需要大写,其他字母都小写
request_headers = request.get_header("User-agent")
# print(request_headers)
with open("02header.html","w")as f:
f.write(data)
load_baidu()
5 第三方SSL加密等:
import urllib.request
def handler_openner():
#系统的urlopen并没有添加代理的功能所以需要我们自定义这个功能
#安全 套接层 ssl第三方的CA数字证书
#http80端口# 和https443
#urlopen为什么可以请求数据 handler处理器
#自己的oppener请求数据
# urllib.request.urlopen()
url = "https://blog.cn.net/m0_37499059/article/details/79003731"
#创建自己的处理器
handler = urllib.request.HTTPHandler()
#创建自己的oppener
opener=urllib.request.build_opener(handler)
#用自己创建的opener调用open方法请求数据
response = opener.open(url)
# data = response.read()
data = response.read().decode("utf-8")
with open("02header.html", "w")as f:
f.write(data)
handler_openner()
6 使用代理访问:
def create_proxy_handler():
url = "https://blog.cn.net/m0_37499059/article/details/79003731"
#添加代理
proxy = {
#免费的写法
"http":"120.77.249.46:8080"
}
#代理处理器
proxy_handler = urllib.request.ProxyHandler(proxy)
#创建自己opener
opener = urllib.request.build_opener(proxy_handler)
#拿着代理ip去发送请求
response = opener.open(url)
data = response.read().decode("utf-8")
print(data)
7 随机从PROXY列表中找一个,然后用代理去访问:
def proxy_user():
proxy_list = [
{"https":"106.75.226.36:808"},
{"https":"61.135.217.7:80"},
{"https":"125.70.13.77:8080"},
{"https":"118.190.95.35:9001"}
]
for proxy in proxy_list:
print(proxy)
#利用遍历出来的ip创建处理器
proxy_handler = urllib.request.ProxyHandler(proxy)
#创建opener
opener = urllib.request.build_opener(proxy_handler)
try:
data = opener.open("http://www.baidu.com",timeout=1)
haha = data.read()
print(haha)
except Exception as e:
print(e)
proxy_user()
7 付费的proxy处理:
def money_proxy_use():
# #第一种方式付费代理发送请求
# #1.代理ip
# money_proxy ={"http":"username:pwd@192.168.12.11:8080"}
# #2.代理的处理器
# proxy_handler=urllib.request.ProxyHandler(money_proxy)
#
# #3.通过处理器创建opener
# opener = urllib.request.build_opener(proxy_handler)
# #4.open发送请求
# opener.open("http://www.baidu.com")
# #第二种方式发送付费的ip地址
use_name = "abcname"
pwd = "123456"
proxy_money = "123.158.63.130:8888"
#2.创建密码管理器,添加用户名和密码
password_manager = urllib.request.HTTPPasswordMgrWithDefaultRealm()
#uri定位 uri>url
#url 资源定位符
password_manager.add_password(None,proxy_money,use_name,pwd)
#3.创建可以验证代理ip的处理器
handle_auth_proxy = urllib.request.ProxyBasicAuthHandler(password_manager)
#4.根据处理器创建opener
opener_auth = urllib.request.build_opener(handle_auth_proxy)
#5.发送请求
response = opener_auth.open("http://www.baidu.com")
print(response.read())
8 request库
import requests
url = 'http://www.baidu.com'
response = requests.get(url)
# content属性 返回的类型 是bytes
data = response.content.decode('utf-8')
# text 属性 返回的类型 是文本str
data = response.text
print(type(data))
9 requests库得请求头
import requests
class RequestSpider(object):
def __init__(self):
url = 'https://www.baidu.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
}
self.response = requests.get(url, headers=headers)
def run(self):
data = self.response.content
# 1.获取请求头
request_headers = self.response.request.headers
# 2.获取相应头
coderesponse_headers = self.response.headers
# 3.响应状态码
code = self.response.status_code
# 4. 请求的cookie
request_cookie = self.response.request._cookies
print(request_cookie)
# 5. 响应的cookie
response_cookie = self.response.cookies
print(response_cookie)
RequestSpider().run()
10 内网,比如APACHE等的AUTH认证:
import requests
# 发送post请求
data = {
}
response = requests.post(url, data=data)
# 内网 需要 认证
auth = (user,pwd)
response = requests.get(url,auth=auth)
11 request请求proxy
url = 'http://www.baidu.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
}
free_proxy = {'http': '27.17.45.90:43411'}
response = requests.get(url=url, headers=headers, proxies=free_proxy)
print(response.status_code)
12 ssl证书的处理
import requests
url = 'https://www.12306.cn/mormhweb/'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
}
# 因为hhtps 是有第三方 CA 证书认证的
# 但是 12306 虽然是https 但是 它不是 CA证书, 他是自己 颁布的证书
# 解决方法 是: 告诉 web 忽略证书 访问
response = requests.get(url=url, headers=headers, verify=False)
data = response.content.decode()
with open('03-ssl.html', 'w') as f:
f.write(data)