爬取贴吧图片

初次使用urllib实现爬虫的数据请求
urllib.request.urlopen(url) 发起get请求
urllib.parse.quote() 将中文进行url编码
urllib.request.urlretrieve(url,filename) 下载url保存到filename
爬取图片存于目录下

python基本案例测试-贴吧图片爬取_Python

 

 python基本案例测试-贴吧图片爬取_Python_02

 

 代码如下

from urllib.request import urlopen, urlretrieve, Request
from urllib.parse import quote

import ssl

ssl._create_default_https_context = ssl._create_unverified_context


def search_baidu(wd='千峰'):
    # 网络资源的接口(URL)
    url = 'https://www.baidu.com/s?wd=%s'
    # 生成请求对象,封装请求的URL和头header
    request = Request(url % quote(wd),
                      headers={
                          'Cookie': 'BIDUPSID=585E43DE7CB2B860C3B9C269E2C4D929; PSTM=1579580069; BD_UPN=12314753; BAIDUID=10484BA386BB3BF20C7E02FBB519B4CD:FG=1; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BDSFRCVID=cV-OJeC62ZCrRLTrhEVfKF6Sa27L6EvTH6f3T38pC4vGwLFcuDPiEG0PSM8g0KubwmWVogKKBmOTHnuF_2uxOjjg8UtVJeC6EG0Ptf8g0f5; H_BDCLCKID_SF=tb4qoCDbJDt3qPbNq4Tq5b-eKxRJa4r-HD7yW6rJaDvjS66Oy4oTj6DlLp5NWp3H2gTp0qvIJfOHhC3sjxo-3MvBbGbnJ5vj0bnqK-313JOhsMJYQft20htIeMtjBbQabKjU-J7jWhvIDq72y-ThQlRX5q79atTMfNTJ-qcH0KQpsIJM5-DWbT8EjH62btt_tJPDoK5P; delPer=0; BD_CK_SAM=1; PSINO=2; BDUSS=9uYVhkbEEzWW5JblR0LXlqeGR3b3p5N2t1Q0NzR3puOXhBNW1tR3ZnTXlXV1ZmRVFBQUFBJCQAAAAAAAAAAAEAAACjAMfnzt7H6bXEztLP8bfJ0akAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADLMPV8yzD1fbW; BDUSS_BFESS=9uYVhkbEEzWW5JblR0LXlqeGR3b3p5N2t1Q0NzR3puOXhBNW1tR3ZnTXlXV1ZmRVFBQUFBJCQAAAAAAAAAAAEAAACjAMfnzt7H6bXEztLP8bfJ0akAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADLMPV8yzD1fbW; BD_HOME=1; COOKIE_SESSION=4904_0_8_6_11_24_0_2_7_4_1_1_0_0_15_0_1597883995_0_1597888884%7C9%231182301_18_1597881542%7C9; BDRCVFR[Qs-Y_7gNldt]=OjjlczwSj8nXy4Grjf8mvqV; H_PS_PSSID=; sug=3; sugstore=0; ORIGIN=0; bdime=0',
                          'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML,'
                                        ' like Gecko) Chrome/84.0.4147.125 Safari/537.36'
                      })
    response = urlopen(request)  # 发起请求
    assert response.code == 200
    print('请求成功')
    # 读取响应的数据
    bytes_ = response.read()
    with open('%s.html' % wd, 'wb') as file:
        file.write(bytes_)


def download_img(url):
    # 从url中获取文件名
    filename = url[url.rfind('/') + 1:]
    urlretrieve(url, filename)


if __name__ == '__main__':
    # search_baidu()
    download_img('https://www.dy2018.com/d/file/html/gndy/dyzz/2020-08-20/7a861af82beb6e25cd6729988c545c61.jpg')