import requests
import time
import lxml
from bs4 import BeautifulSoup

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36',
    'Cookie': 'gr_user_id=c6f58a39-ea25-4f58-b448-545070192c4e; 59a81cc7d8c04307ba183d331c373ef6_gr_last_sent_cs1=N%2FA; grwng_uid=9ec14ad9-5ac0-4bb1-81c1-bc60d2685710; _uab_collina=154660443606130958890473; xz_guid_4se=7cdf79ce-1836-4fcb-acfa-4ebdf3a01a01; abtest_ABTest4SearchDate=b; TY_SESSION_ID=93f80c09-2ebd-4436-b321-46e658b852cb; 59a81cc7d8c04307ba183d331c373ef6_gr_session_id=fa326b17-1b31-4da3-923f-35c7b967328c; 59a81cc7d8c04307ba183d331c373ef6_gr_last_sent_sid_with_cs1=fa326b17-1b31-4da3-923f-35c7b967328c; 59a81cc7d8c04307ba183d331c373ef6_gr_session_id_fa326b17-1b31-4da3-923f-35c7b967328c=true; xzuuid=4df005f6; rule_math=m5srppmeot'}

def get_info(url):
    web_data = requests.get(url, headers=headers)
    soup = BeautifulSoup(web_data.text, 'lxml')
    ranks = soup.select('#rankWrap > div.pc_temp_songlist > ul > li > span.pc_temp_num')
    titles = soup.select('#rankWrap > div.pc_temp_songlist > ul > li > a')
    times = soup.select('#rankWrap > div.pc_temp_songlist > ul > li > span.pc_temp_tips_r > span')
    try:
        for rank,title,time in zip(ranks,titles,times):
            data ={
                'rank':rank.get_text().strip(),
                'singer':title.get_text().split('-')[0],
                'song':title.get_text().split('-')[1],
                'time':time.get_text().strip()
            }
            print(data)
            print('                                                                      ')
    except IndexError as e:
        print(e)
if __name__ == '__main__':
    urls = ['https://www.kugou.com/yy/rank/home/{}-31308.html?from=rank'.format(number)for number in range(1,6)]
    for url in urls:
        get_info(url)
        print('--------------------------这是一页的分割线-------------------------------')
        time.sleep(1)

使用的到的知识点:

  1. requests库的 requests.get(url, headers=headers),headers是爬取伪装头,伪装成浏览器爬取数据。
  2. BeautifulSoup(web_data.text, 'lxml') 将爬取到的数据格式化,select()方法根据网页上的Copy selector 提取元素。get_text()获取元素中的文字信息。
  3. split('-')将字符串以'-'号分开,split('-')[0]取第一个元素。
  4. ['https://www.kugou.com/yy/rank/home/{}-31308.html?from=rank'.format(number)for number in range(1,6)] 生成一个url列表。