#coding=utf-8
import requests
from lxml import etree
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer
from sqlalchemy.orm import sessionmaker
def requests_view(response):
import webbrowser
requests_url = response.url
base_url = '<head><base href="%s">' %(requests_url)
base_url = base_url.encode('utf-8')
content = response.content.replace(b"<head>",base_url)
tem_html = open('tmp.html','wb')
tem_html.write(content)
tem_html.close()
webbrowser.open_new_tab("tmp.html")
host = "http://sz.ganji.com/fang1/o{}"
max = 10
engine = create_engine('mysql+mysqldb://root:root@192.168.33.30:3306/python?charset=utf8',echo=True,encoding='utf8')
Base = declarative_base()
class Ganji(Base):
__tablename__ = 'ganji'
id = Column(Integer, primary_key=True)
title = Column(String(100))
money = Column(String(100))
info = Column(String(100))
create_time = Column(String(30))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.username)
# Base.metadata.create_all(engine)
# exit()
def save_data(title,money,info):
# 创建session对象:
DBSession = sessionmaker(bind=engine)
session = DBSession()
# 创建新User对象:
import datetime
create_time = datetime.datetime.now()
new_ganji = Ganji( title=title,money=money,info=info,create_time="test")
# 添加到session:
session.add(new_ganji)
# 提交即保存到数据库:
session.commit()
# 关闭session:
session.close()
def get_html(url):
headers = {'Referer':'http://callback.ganji.com/firewall/valid/1902788594.do?namespace=ganji_zufang_list_pc&url=http%3A%2F%2Fsz.ganji.com%2Ffang1%2F','User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36'}
response = requests.get(url,headers=headers)
if response.status_code == 200:
#requests_view(response)
#strip
html = etree.HTML(response.content.decode('utf-8'))
items = html.xpath(".//div[@class='f-main-list']/div/div")
print(len(items))
for i in items:
title = i.xpath(".//dd[@class='dd-item title']/a/text()")
money = i.xpath(".//dd[@class='dd-item info']/div[@class='price']/span/text()")
info = i.xpath(".//dd[@class='dd-item size']/span/text()")
print(info)
title = ' '.join(title)
money = ' '.join(money)
info = ' '.join(info)
if len(title) > 0 and len(money) >0 and len(info) > 0 :
save_data(title,money,info)
else:
print("未获取到数据");
else:
print("请求失败")
try:
for i in range(1,max):
url = host.format(i)
print(url)
get_html(url)
except Exception as e:
print(str(e))
python爬虫 赶集网
转载本文章为转载内容,我们尊重原作者对文章享有的著作权。如有内容错误或侵权问题,欢迎原作者联系我们进行内容更正或删除文章。
下一篇:python 爬虫 糗百成人
提问和评论都可以,用心的回复会被更多人看到
评论
发布评论
相关文章
-
Python爬虫:清华大学新闻爬虫的实现
这个爬虫功能强大,代码简介,是爬虫学习入门的不二之选。该文章将一步一步但你探索其中奥秘,解决你在这方面的困惑。
python 爬虫 请求头 jieba -
赶集网三年DBA总结
2012年初入职赶集,当时处在流量讯猛增长的阶段,3年DBA生涯收获坡多,其实坑更多(泪... 后来在做开发时,慢慢体会到 ”运维“ 和 “开发” 确实存在沟通问题:知识不对称。如何解决呢?先总...
redis automan 量子 oracle xfs -
悬赏赶集网公司信息问题
急急急急急急
悬赏赶集网公司信息问题 -
专题【赶集网】之赶驴网事件
好点子是把双刃剑
职场 休闲 赶驴网 赶驴网事件 -
我在赶集网的两个月 (完整版)
值得转载的并不多,这篇算一个;能引发思考的不多,这篇也算一个。
职场 休闲 北邮 百度 赶集