原始数据:
ID FL APP AA LA SC LC HON SMS EXP DRV AMB GSP POT KJ SUIT
0 1 6 7 2 5 8 7 8 8 3 8 9 7 5 7 10
1 2 9 10 5 8 10 9 9 10 5 9 9 8 8 8 10
2 3 7 8 3 6 9 8 9 7 4 9 9 8 6 8 10
3 4 5 6 8 5 6 5 9 2 8 4 5 8 7 6 5
4 5 6 8 8 8 4 4 9 5 8 5 5 8 8 7 7
5 6 7 7 7 6 8 7 10 5 9 6 5 8 6 6 6
6 7 9 9 8 8 8 8 8 8 10 8 10 8 9 8 10
7 8 9 9 9 8 9 9 8 8 10 9 10 9 9 9 10
8 9 9 9 7 8 8 8 8 5 9 8 9 8 8 8 10
9 10 4 7 10 2 10 10 7 10 3 10 10 10 9 3 10
10 11 4 7 10 0 10 8 3 9 5 9 10 8 10 2 5
11 12 4 7 10 4 10 10 7 8 2 8 8 10 10 3 7
12 13 6 9 8 10 5 4 9 4 4 4 5 4 7 6 8
13 14 8 9 8 9 6 3 8 2 5 2 6 6 7 5 6
14 15 4 8 8 7 5 4 10 2 7 5 3 6 6 4 6
15 16 6 9 6 7 8 9 8 9 8 8 7 6 8 6 10
16 17 8 7 7 7 9 5 8 6 6 7 8 6 6 7 8
17 18 6 8 8 4 8 8 6 4 3 3 6 7 2 6 4
18 19 6 7 8 4 7 8 5 4 4 2 6 8 3 5 4
19 20 4 8 7 8 8 9 10 5 2 6 7 9 8 8 9
20 21 3 8 6 8 8 8 10 5 3 6 7 8 8 5 8
21 22 9 8 7 8 9 10 10 10 3 10 8 10 8 10 8
22 23 7 10 7 9 9 9 10 10 3 9 9 10 9 10 8
23 24 9 8 7 10 8 10 10 10 2 9 7 9 9 10 8
24 25 6 9 7 7 4 5 9 3 2 4 4 4 4 5 4
25 26 7 8 7 8 5 4 8 2 3 4 5 6 5 5 6
26 27 2 10 7 9 8 9 10 5 3 5 6 7 6 4 5
27 28 6 3 5 3 5 3 5 0 0 3 3 0 0 5 0
28 29 4 3 4 3 3 0 0 0 0 4 4 0 0 5 0
29 30 4 6 5 6 9 4 10 3 1 3 3 2 2 7 3
30 31 5 5 4 7 8 4 10 3 2 5 5 3 4 8 3
31 32 3 3 5 7 7 9 10 3 2 5 3 7 5 5 2
32 33 2 3 5 7 7 9 10 3 2 2 3 6 4 5 2
33 34 3 4 6 4 3 3 8 1 1 3 3 3 2 5 2
34 35 6 7 4 3 3 0 9 0 1 0 2 3 1 5 3
35 36 9 8 5 5 6 6 8 2 2 2 4 5 6 6 3
36 37 4 9 6 4 10 8 8 9 1 3 9 7 5 3 2
37 38 4 9 6 6 9 9 7 9 1 2 10 8 5 5 2
38 39 10 6 9 10 9 10 10 10 10 10 8 10 10 10 10
39 40 10 6 9 10 9 10 10 10 10 10 10 10 10 10 10
40 41 10 7 8 0 2 1 2 0 10 2 0 3 0 0 10
41 42 10 3 8 0 1 1 0 0 10 0 0 0 0 0 10
42 43 3 4 9 8 2 4 5 3 6 2 1 3 3 3 8
43 44 7 7 7 6 9 8 8 6 8 8 10 8 8 6 5
44 45 9 6 10 9 7 7 10 2 1 5 5 7 8 4 5
45 46 9 8 10 10 7 9 10 3 1 5 7 9 9 4 4
46 47 0 7 10 3 5 0 10 0 0 2 2 0 0 0 0
47 48 0 6 10 1 5 0 10 0 0 2 2 0 0 0 0
import pandas as pd
import numpy as np
import math as math
import numpy as np
from numpy import *
from scipy.stats import bartlett
from factor_analyzer import *
import numpy.linalg as nlg
from sklearn.cluster import KMeans
from matplotlib import cm
import matplotlib.pyplot as plt
def main():
df=pd.read_csv("./data/applicant.csv")
# print(df)
df2=df.copy()
print("\n原始数据:\n",df2)
del df2['ID']
# print(df2)
# 皮尔森相关系数
df2_corr=df2.corr()
print("\n相关系数:\n",df2_corr)
#热力图
cmap = cm.Blues
# cmap = cm.hot_r
fig=plt.figure()
ax=fig.add_subplot(111)
map = ax.imshow(df2_corr, interpolation='nearest', cmap=cmap, vmin=0, vmax=1)
plt.title('correlation coefficient--headmap')
ax.set_yticks(range(len(df2_corr.columns)))
ax.set_yticklabels(df2_corr.columns)
ax.set_xticks(range(len(df2_corr)))
ax.set_xticklabels(df2_corr.columns)
plt.colorbar(map)
plt.show()
# KMO测度
def kmo(dataset_corr):
corr_inv = np.linalg.inv(dataset_corr)
nrow_inv_corr, ncol_inv_corr = dataset_corr.shape
A = np.ones((nrow_inv_corr, ncol_inv_corr))
for i in range(0, nrow_inv_corr, 1):
for j in range(i, ncol_inv_corr, 1):
A[i, j] = -(corr_inv[i, j]) / (math.sqrt(corr_inv[i, i] * corr_inv[j, j]))
A[j, i] = A[i, j]
dataset_corr = np.asarray(dataset_corr)
kmo_num = np.sum(np.square(dataset_corr)) - np.sum(np.square(np.diagonal(A)))
kmo_denom = kmo_num + np.sum(np.square(A)) - np.sum(np.square(np.diagonal(A)))
kmo_value = kmo_num / kmo_denom
return kmo_value
print("\nKMO测度:", kmo(df2_corr))
# 巴特利特球形检验
df2_corr1 = df2_corr.values
print("\n巴特利特球形检验:", bartlett(df2_corr1[0], df2_corr1[1], df2_corr1[2], df2_corr1[3], df2_corr1[4],
df2_corr1[5], df2_corr1[6], df2_corr1[7], df2_corr1[8], df2_corr1[9],
df2_corr1[10], df2_corr1[11], df2_corr1[12], df2_corr1[13], df2_corr1[14]))
# 求特征值和特征向量
eig_value, eigvector = nlg.eig(df2_corr) # 求矩阵R的全部特征值,构成向量
eig = pd.DataFrame()
eig['names'] = df2_corr.columns
eig['eig_value'] = eig_value
eig.sort_values('eig_value', ascending=False, inplace=True)
print("\n特征值\n:",eig)
eig1=pd.DataFrame(eigvector)
eig1.columns = df2_corr.columns
eig1.index = df2_corr.columns
print("\n特征向量\n",eig1)
# 求公因子个数m,使用前m个特征值的比重大于85%的标准,选出了公共因子是五个
for m in range(1, 15):
if eig['eig_value'][:m].sum() / eig['eig_value'].sum() >= 0.85:
print("\n公因子个数:", m)
break
# 因子载荷阵
A = np.mat(np.zeros((15, 5)))
i = 0
j = 0
while i < 5:
j = 0
while j < 15:
A[j:, i] = sqrt(eig_value[i]) * eigvector[j, i]
j = j + 1
i = i + 1
a = pd.DataFrame(A)
a.columns = ['factor1', 'factor2', 'factor3', 'factor4', 'factor5']
a.index = df2_corr.columns
print("\n因子载荷阵\n", a)
fa = FactorAnalyzer(n_factors=5)
fa.loadings_ = a
# print(fa.loadings_)
print("\n特殊因子方差:\n", fa.get_communalities()) # 特殊因子方差,因子的方差贡献度 ,反映公共因子对变量的贡献
var = fa.get_factor_variance() # 给出贡献率
print("\n解释的总方差(即贡献率):\n", var)
# 因子旋转
rotator = Rotator()
b = pd.DataFrame(rotator.fit_transform(fa.loadings_))
b.columns = ['factor1', 'factor2', 'factor3', 'factor4', 'factor5']
b.index = df2_corr.columns
print("\n因子旋转:\n", b)
# 因子得分
X1 = np.mat(df2_corr)
X1 = nlg.inv(X1)
b = np.mat(b)
factor_score = np.dot(X1, b)
factor_score = pd.DataFrame(factor_score)
factor_score.columns = ['factor1', 'factor2', 'factor3', 'factor4', 'factor5']
factor_score.index = df2_corr.columns
print("\n因子得分:\n", factor_score)
fa_t_score = np.dot(np.mat(df2), np.mat(factor_score))
print("\n应试者的五个因子得分:\n",pd.DataFrame(fa_t_score))
# 综合得分
wei = [[0.50092], [0.137087], [0.097055], [0.079860], [0.049277]]
fa_t_score = np.dot(fa_t_score, wei) / 0.864198
fa_t_score = pd.DataFrame(fa_t_score)
fa_t_score.columns = ['综合得分']
fa_t_score.insert(0, 'ID', range(1, 49))
print("\n综合得分:\n", fa_t_score)
print("\n综合得分:\n", fa_t_score.sort_values(by='综合得分', ascending=False).head(6))
plt.figure()
ax1=plt.subplot(111)
X=fa_t_score['ID']
Y=fa_t_score['综合得分']
plt.bar(X,Y,color="#87CEFA")
# plt.bar(X, Y, color="red")
plt.title('result00')
ax1.set_xticks(range(len(fa_t_score)))
ax1.set_xticklabels(fa_t_score.index)
plt.show()
fa_t_score1=pd.DataFrame()
fa_t_score1=fa_t_score.sort_values(by='综合得分',ascending=False).head()
ax2 = plt.subplot(111)
X1 = fa_t_score1['ID']
Y1 = fa_t_score1['综合得分']
plt.bar(X1, Y1, color="#87CEFA")
# plt.bar(X1, Y1, color='red')
plt.title('result01')
plt.show()
if __name__ == '__main__':
main()