最大互信息系数可以计算非线性相关性,他的缺点是对于大数据集计算非常缓慢, 因为会产生大量的循环,其中使用MINE(alpha=0.6, c=15),alpha=0.6是经验上的最佳参数,若样本为N则在Y轴上会产生N的0.6次方个网格进行计算,必要时可以缩小该该系数进行计算。本文通过minepy库进行了计算,通过joblib库多进程进行加速。可以考虑使用numpy手动实现算法,并通过numba加速,提升计算速度。

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
from minepy import MINE
import itertools
import seaborn as sns
import matplotlib as mpl
from joblib import Parallel, delayed
from sklearn.decomposition import PCA
#pandas不显示科学计数法,lambda x: '%.2f' % x 两位小数 .f 0位,.3f 3位
pd.set_option('display.float_format', lambda x: '%.2f' % x)
#显示所有列
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)
#屏蔽SettingWithCopyWarning告警
pd.set_option('mode.chained_assignment', None)
#numpy不显示科学计数法, threshold=np.nan表示不使用省略号
np.set_printoptions(suppress=True)
# matplotlib其实是不支持显示中文的 显示中文需要一行代码设置字体
mpl.rcParams['font.family'] = 'SimHei'
# 步骤二(解决坐标轴负数的负号显示问题)
plt.rcParams['axes.unicode_minus'] = False

def entropy(data):
    #data的type为 `pd.Series`
    p_data = data.value_counts()           # counts occurrence of each value
    entropy = scipy.stats.entropy(p_data)  # get entropy from counts
    return entropy

def mic(x, y):
    m = MINE(alpha=0.6, c=15)
    m.compute_score(x, y)
    return m.mic()

def mic_df(pf,ls):
    i = 0
    for colname in ls:
        mic_value = mic(df[colname[0]], df[colname[1]])
        pf.at[i, '指标1'] = colname[0]
        pf.at[i, '指标2'] = colname[1]
        pf.at[i, '最大信息系数'] = mic_value
        i += 1
    return pf

def lst_split(max,step):
    ls_step = []
    for i in range(0, max, step):
        if i + step <= max:
            ls_step.append((i, i + step))
        else:
            ls_step.append((i, max))
    return ls_step


if __name__ == '__main__':
    path_pkl = r"D:\data\2023\端到端全网\端到端全量小区全量指标.pkl"

    #col1=['TCP建立成功率(无线侧)', 'TCP建立成功率(无线侧)','TCP建立平均时延(无线侧)(毫秒)',  'TCP建立成功率(%)', 'TCP建立成功率(核心网)', '端到端时延(毫秒)', 'TCP建立平均时延(核心网)(毫秒)',  '首TCP三步握手至首GET的时延(毫秒)']
    df_s = pd.read_pickle(path_pkl)
    df = df_s
    df.dropna(inplace=True, how='any')
    #计算最大互信息
    col = df.columns
    ls=[]#元素为元组,元组有2个值,分别是两个列名。
    # ls = list(itertools.product(col, col))
    # pf = pd.DataFrame(columns=["指标1", "指标2", "最大信息系数"])  # 该方法创建时需要创建列名
    # ls=ls[0:6]

    lst_1= lst_split(len(ls),4)
    lst=[]
    for i in lst_1:
        print(i)
        lst.append(ls[i[0]:i[1]])
    print(lst)
    tasks = [delayed(mic_df)(pf,ls) for ls in lst]
    res = Parallel(n_jobs=4)(tasks)
    data = pd.concat(res)
    path_1 = r"D:\data\2023\端到端全网\端到端全量小区互信息1.xlsx"
    data.to_excel(path_1)

import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
import seaborn as sns
import matplotlib as mpl
from chardet.universaldetector import UniversalDetector
from sklearn.decomposition import PCA
#pandas不显示科学计数法,lambda x: ‘%.2f’ % x 两位小数 .f 0位,.3f 3位
pd.set_option(‘display.float_format’, lambda x: ‘%.2f’ % x)
#显示所有列
pd.set_option(‘display.max_columns’, None)
#显示所有行
pd.set_option(‘display.max_rows’, None)
#屏蔽SettingWithCopyWarning告警
pd.set_option(‘mode.chained_assignment’, None)
#numpy不显示科学计数法, threshold=np.nan表示不使用省略号
np.set_printoptions(suppress=True)

matplotlib其实是不支持显示中文的 显示中文需要一行代码设置字体

mpl.rcParams[‘font.family’] = ‘SimHei’

步骤二(解决坐标轴负数的负号显示问题)

plt.rcParams[‘axes.unicode_minus’] = False

def entropy(data):
#data的type为 pd.Series p_data = data.value_counts() # counts occurrence of each value
entropy = scipy.stats.entropy(p_data) # get entropy from counts
return entropy

def encode_to_utf8(filename, des_encode):
# 逐个读取文件的编码方式

with open(filename, 'rb') as f:
    detector = UniversalDetector()
    for line in f.readlines():
        detector.feed(line)
        if detector.done:
            break
    original_encode = detector.result['encoding']

# 逐个读取文件的内容

with open(filename, 'rb') as f:
    file_content = f.read()

file_decode = file_content.decode(original_encode, 'ignore')
file_encode = file_decode.encode(des_encode)
with open(filename, 'wb') as f:
    f.write(file_encode)

##加载

def load_Table(file_path, col_name):
name, ext = os.path.splitext(file_path)
if ‘.csv’ ==ext:
encode_to_utf8(file_path, des_encode=“utf-8”)
df_read = pd.read_csv(file_path, usecols=col_name)
if “.xls” ==ext:
df_read = pd.read_excel(file_path, usecols=col_name)
if “.xlsx” ==ext:
df_read = pd.read_excel(file_path, usecols=col_name)
return df_read

if name == ‘main’:
path_s = r"D:\data\2023\指标合一\端到端12项指标小区级-0310.csv"
encode_to_utf8(path_s, des_encode=“utf-8”)
df_s = pd.read_csv(path_s)
#取数据列用作计算
df = df_s.iloc[:,5:]

#计算标准差
sr_std=df.std()

#计算信息熵
col = df.columns
dict_e={}
for col in col:
    data = df[col]
    d = entropy(data)
    dict_e.update({col: d})
sr_entropy = pd.Series(dict_e)
#标准差和信息熵并合并
df_entropy = sr_entropy.to_frame()
df_std = sr_std.to_frame()
df_1 = pd.merge(df_std, df_entropy, left_index=True, right_index=True)
df_1.rename(columns={"0_x": "标准差", "0_y": "信息熵"}, inplace=True)
df_1.to_csv(r"D:\data\2023\指标合一\端到端12项指标小区级_std_entropy_0322.csv")


#相关性分析
corr = df.corr()
corr = corr.reset_index()
corr.rename(columns={"index": "指标"}, inplace=True)
df_pearson = pd.melt(corr, id_vars="指标",value_name="pearson")
df_pearson["相关系数绝对值"]=df_pearson["pearson"].abs()
df_pearson["分指标相关性排名"] = df_pearson.groupby("指标", group_keys=False)["相关系数绝对值"].apply(lambda x: x.rank(method="first"))
df_pearson["pearson排名"]=df_pearson["相关系数绝对值"].rank(method="first")

corr = df.corr(method="spearman")
corr = corr.reset_index()
corr.rename(columns={"index": "指标"}, inplace=True)
df_spearman = pd.melt(corr, id_vars="指标", value_name="spearman")
df_spearman["spearman绝对值"] = df_spearman["spearman"].abs()
df_pearson["分指标相关性排名"] = df_pearson.groupby("指标", group_keys=False)["相关系数绝对值"].apply(lambda x: x.rank(method="first"))
df_spearman["spearman排名"]=df_spearman["spearman绝对值"].rank(method="first")

corr=df.corr(method="kendall")
corr = corr.reset_index()
corr.rename(columns={"index": "指标"}, inplace=True)
df_kendall = pd.melt(corr, id_vars="指标", value_name="kendall")
df_kendall["kendall绝对值"] = df_kendall["kendall"].abs()
df_pearson["分指标相关性排名"] = df_pearson.groupby("指标", group_keys=False)["相关系数绝对值"].apply(lambda x: x.rank(method="first"))
df_kendall["kendall排名"]=df_kendall["kendall绝对值"].rank(method="first")

df_corr_ = pd.DataFrame()
df_corr_3 = pd.concat([df_pearson, df_spearman.iloc[:, 2:5], df_kendall.iloc[:, 2:5]],axis=1)
df_corr_3.to_csv(r"D:\data\2023\指标合一\端到端12项指标小区级_相关性_0322.csv")