import requests
from bs4 import BeautifulSoup

url =["http://ent.qq.com/","http://ent.qq.com/",
"http://finance.qq.com/","http://stock.qq.com/hk/",
"http://tech.qq.com/","http://fashion.qq.com/",
"http://mil.qq.com/mil_index.htm",]

for i in url:
wbdata = requests.get(i).text#获取文本方式的响应体实例,会使用其响应的文本编码进行解码

print (wbdata)

soup = BeautifulSoup(wbdata,'lxml')
#通过select选择器定位指定的元素,返回一个列表
news_titles = soup.select("div.text > em.f14 > a.linkto")
# print (news_titles)
#对返回的列表进行遍历
for n in news_titles:
#提取出标题和链接信息
    title = n.get_text()
    link = n.get("href")
    data = {
    '标题':title,
    '链接':link
    }
    print (data['标题'],data['链接'])#打印抓取了哪些些内容
    f = open('1.txt', 'a')#追加写入
    print (data['标题'],data['链接'], file=f)
    # f.write(data['标题'])#写入文件
    f.close()#写入完成后,关闭文件