之前看到了这道shell面试题:获取51CTO博客列表按时间倒序排序 http://oldboy.blog.51cto.com/2561410/1860985
由于学了一段时间的python,试想着能否使用python来解题
思路:通过requests模块获取网页源码,通过split()函数获取总页数,拼接字符串获取所有博客地址的url。同样,通过requests获取所有页面的源码并通过正则匹配,获取关键信息,从而拼接出html页面。
#coding:utf-8
import requests
import re,sys
reload(sys)
sys.setdefaultencoding('utf-8')
# 获取页数
def page():
url = 'http://oldboy.blog.51cto.com/all/2561410'
content = requests.get(url).content
msg = re.findall(r'<center>(.*)</center>',content)[0]
return msg.split('/')[-1].split()[0]
# 获取关键信息并拼接html
def oldboy():
uri = 'http://oldboy.blog.51cto.com/all/2561410/page/'
num = page()
res = []
for i in range(int(num)):
url = uri + str(int(i)+1)
content = requests.get(url).content
res += re.findall(r'<a href="(/\d+/\d+)">(.*)</a>',content)
# res格式[('/2561410/1867160','2017\xd7\xee\xd0\xc2\xc6\xf3\xd2\xb5Shell\xc3\xe6\xca\xd4\xcc\xe2\xbc\xb0\xc6\xf3\xd2\xb5\xd4\xcb\xce\xac\xca\xb5\xd5\xbd\xb9\xb230\xb5\xc0\xb0\xb8\xc0\xfd'),...]
html = ''
for i in res:
msg = i[1].decode('gbk')
href = 'http://oldboy.blog.51cto.com'+i[0]
html += "<p><br></p><p>{0}</p><p><a href='{1}' target='_blank'>{1}</a></p>".format(msg,href)
# html格式"<p><br></p><p>2017最新企业Shell面试题及企业运维实战共30道案例</p><p><a href='http://oldboy.blog.51cto.com/2561410/1867160' target='_blank'>http://oldboy.blog.51cto.com/2561410/1867160</a></p>..."
with open('oldboy_blog.html','w') as f:
f.write(html)
if __name__=='__main__':
oldboy()html页面
附上使用scrapy框架获取博客的方法
#!/usr/bin/python
#coding:utf-8
import scrapy
from scrapy.selector import Selector
import os,sys
import requests
reload(sys)
sys.setdefaultencoding('utf-8')
class Blog(scrapy.Spider):
name = 'Blog'
def start_requests(self):
urls = ['http://oldboy.blog.51cto.com/all/2561410']
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
baseurl = 'http://oldboy.blog.51cto.com'
content = Selector(response=response)
items = content.xpath('//div[@class="artList box"]//div[@class="modCon"]/ul/li')
for i in range(len(items)):
name = content.xpath('//div[@class="artList box"]//div[@class="modCon"]/ul/li[{}]/span[@class="artList_tit"]/a/text()'.format(i+1)).extract()
url = content.xpath('//div[@class="artList box"]//div[@class="modCon"]/ul/li[{}]/span[@class="artList_tit"]/a/@href'.format(i+1)).extract()
self.save(name[0],baseurl+url[0])
current_page_num = content.xpath('//div[@class="artList box"]//div[@class="modCon"]/div//b/text()').extract()
# 前4页的下一页地址均为当前页码+1的<a>的内容,后面全为第5个<a>的内容
if current_page_num[0] < 5:
next_url = content.xpath('//div[@class="artList box"]//div[@class="modCon"]/div//a[{}]/@href'.format(current_page_num[0]+1)).extract()
else:
next_url = content.xpath('//div[@class="artList box"]//div[@class="modCon"]/div//a[5]/@href').extract()
if next_url:
url = baseurl + next_url[0]
yield scrapy.Request(url=url, callback=self.parse)
def save(seld,name,url):
filename = os.path.join(os.getcwd(),'blog.html')
with open(filename,'a+') as f:
f.write("<p><br></p><p>{0}</p><p><a href='{1}' target='_blank'>{1}</a></p>".format(name,url))

















