作者:XksA

爬虫准备

1、先获取薪资和学历、工作经验要求

由于拉勾网数据加载是动态加载的,需要我们分析。分析方法如下:

F12分析页面数据存储位置

我们发现网页内容是通过post请求得到的,返回数据是json格式,那我们直接拿到json数据即可。

我们只需要薪资和学历、工作经验还有单个招聘信息,返回json数据字典中对应的英文为:positionId,salary, education, workYear(positionId为单个招聘信息详情页面编号)。相关操作代码如下:

根据获取到的positionId来访问招聘信息详细页面根据positionId还原访问链接:

position_url = []
def read_csv():
# 读取文件内容
with open(r'G:\lagou_anv.csv', 'r', newline='') as file_test:
# 读文件
reader = csv.reader(file_test)
i = 0
for row in reader:
if i != 0 :
# 根据positionID补全链接
url_single = "https://www.lagou.com/jobs/%s.html"%row[0]
position_url.append(url_single)
i = i + 1
print('一共有:'+str(i-1)+'个')
print(position_url)访问招聘信息详情页面,获取职位描述(岗位职责和岗位要求)并清理数据:
def get_info():
for position_url in position_urls:
work_duty = ''
work_requirement = ''
response00 = get_response(position_url,headers = headers)
time.sleep(1)
content = response00.xpath('//*[@id="job_detail"]/dd[2]/div/p/text()')
# 数据清理
j = 0
for i in range(len(content)):
content[i] = content[i].replace('\xa0',' ')
if content[i][0].isdigit():
if j == 0:
content[i] = content[i][2:].replace('、',' ')
content[i] = re.sub('[;;.0-9。]','', content[i])
work_duty = work_duty+content[i]+ '/'
j = j + 1
elif content[i][0] == '1' and not content[i][1].isdigit():
break
else:
content[i] = content[i][2:].replace('、', ' ')
content[i] = re.sub('[、;;.0-9。]','',content[i])
work_duty = work_duty + content[i]+ '/'
m = i
# 岗位职责
write_file(work_duty)
print(work_duty)
# 数据清理
j = 0
for i in range(m,len(content)):
content[i] = content[i].replace('\xa0',' ')
if content[i][0].isdigit():
if j == 0:
content[i] = content[i][2:].replace('、', ' ')
content[i] = re.sub('[、;;.0-9。]', '', content[i])
work_requirement = work_requirement + content[i] + '/'
j = j + 1
elif content[i][0] == '1' and not content[i][1].isdigit():
# 控制范围
break
else:
content[i] = content[i][2:].replace('、', ' ')
content[i] = re.sub('[、;;.0-9。]', '', content[i])
work_requirement = work_requirement + content[i] + '/'
# 岗位要求
write_file2(work_requirement)
print(work_requirement)
print("-----------------------------")
运行结果:
四种图可视化数据+数据清理方式矩形树图:
# 1.矩形树图可视化学历要求
from pyecharts import TreeMap
education_table = {}
for x in education:
education_table[x] = education.count(x)
key = []
values = []
for k,v in education_table.items():
key.append(k)
values.append(v)

data = []
for i in range(len(key)) :
dict_01 = {"value": 40, "name": "我是A"}
dict_01["value"] = values[i]
dict_01["name"] = key[i]
data.append(dict_01)
tree_map = TreeMap("矩形树图", width=1200, height=600)
tree_map.add("学历要求",data, is_label_show=True, label_pos='inside')玫瑰饼图:
# 2.玫瑰饼图可视化薪资
import re
import math
'''# 薪水分类parameter : str_01--字符串原格式:20k-30kreturned value : (a0+b0)/2 --- 解析后变成数字求中间值:25.0'''
def assort_salary(str_01):
reg_str01 = "(\d+)"
res_01 = re.findall(reg_str01, str_01)
if len(res_01) == 2:
a0 = int(res_01[0])
b0 = int(res_01[1])
else :
a0 = int(res_01[0])
b0 = int(res_01[0])
return (a0+b0)/2

from pyecharts import Pie
salary_table = {}
for x in salary:
salary_table[x] = salary.count(x)

key = ['5k以下','5k-10k','10k-20k','20k-30k','30k-40k','40k以上']
a0,b0,c0,d0,e0,f0=[0,0,0,0,0,0]

for k,v in salary_table.items():
ave_salary = math.ceil(assort_salary(k))
print(ave_salary)
if ave_salary < 5:
a0 = a0 + v
elif ave_salary in range(5,10):
b0 = b0 +v
elif ave_salary in range(10,20):
c0 = c0 +v
elif ave_salary in range(20,30):
d0 = d0 +v
elif ave_salary in range(30,40):
e0 = e0 +v
else :
f0 = f0 + v
values = [a0,b0,c0,d0,e0,f0]

pie = Pie("薪资玫瑰图", title_pos='center', width=900)
pie.add("salary",key,values,center=[40, 50],is_random=True,radius=[30, 75],rosetype="area",is_legend_show=False,is_label_show=True)普通柱状图:
# 3.工作经验要求柱状图可视化
from pyecharts import Bar
workYear_table = {}
for x in workYear:
workYear_table[x] = workYear.count(x)
key = []
values = []
for k,v in workYear_table.items():
key.append(k)
values.append(v)
bar = Bar("柱状图")
bar.add("workYear", key, values, is_stack=True,center= (40,60))词云图:
import jieba
from pyecharts import WordCloud
import pandas as pd
import re,numpy

stopwords_path = 'H:\PyCoding\Lagou_analysis\stopwords.txt'
def read_txt():
with open("G:\lagou\Content\\ywkf_requirement.txt",encoding='gbk') as file:
text = file.read()
content = text
# 去除所有评论里多余的字符
content = re.sub('[,,。.\r\n]', '', content)
segment = jieba.lcut(content)
words_df = pd.DataFrame({'segment': segment})
# quoting=3 表示stopwords.txt里的内容全部不引用
stopwords = pd.read_csv(stopwords_path, index_col=False,quoting=3, sep="\t", names=['stopword'], encoding='utf-8')
words_df = words_df[~words_df.segment.isin(stopwords.stopword)]
words_stat = words_df.groupby(by=['segment'])['segment'].agg({"计数": numpy.size})
words_stat = words_stat.reset_index().sort_values(by=["计数"], ascending=False)
test = words_stat.head(200).values
codes = [test[i][0] for i in range(0, len(test))]
counts = [test[i][1] for i in range(0, len(test))]
wordcloud = WordCloud(width=1300, height=620)
wordcloud.add("必须技能", codes, counts, word_size_range=[20, 100])
wordcloud.render("H:\PyCoding\Lagou_analysis\cloud_pit\ywkf_bxjn.html")