第一篇()讲了原理推导,网上有很多讲解原理的很少有实现的。Talk is cheap,show the code.
第二篇主要是GMM的实现,先用面向过程的方式实现并检验算法的正确性,再用面向对象的方式进行封装。然后再对比sklearn的源代码库进行分析比较。
算法流程:
1.面向过程实现
这里使用的测试数据是由三个高斯分布生成的,三个高斯分布的参数分别被放在mu(n_components*n_feature),covariance(n_components*n_feature*n_feature)里面
import numpy as np
import math
from sklearn import datasets
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from scipy.stats import multivariate_normal
#用三个不同的高斯分布生成三个聚类作为GMM算法的数据
num1, mu1, covar1 = 400, [0.5, 0.5], np.array([[1,0.5],[0.5,3]])
X1 = np.random.multivariate_normal(mu1, covar1, num1)
# 第二簇的数据
num2, mu2, covar2 = 600, [5.5, 2.5], np.array([[2,1],[1,2]])
X2 = np.random.multivariate_normal(mu2, covar2, num2)
# 第三簇的数据
num3, mu3, covar3 = 1000, [1, 7], np.array([[6,2],[2,1]])
X3 = np.random.multivariate_normal(mu3, covar3, num3)
# 合并在一起
X = np.vstack((X1, X2, X3))
#计算聚类结果的对数似然函数值
def logLikelihood(X, class_prior, mu, covariance):
m, n_components = len(X), len(class_prior)
pdfs = np.zeros(((m, n_components)))
for j in range(n_components):
pdfs[:, j] = class_prior[j] * multivariate_normal.pdf(X, mu[j], covariance[j])
return np.mean(np.log(pdfs.sum(axis=1)))
绘制上述数据散点图:
y=np.hstack((np.zeros(len(X1)),np.ones(len(X2)),2*np.ones(len(X3))))
plt.figure(figsize=(10, 8))
plt.axis([-10, 15, -5, 15])
plt.scatter(X1[:, 0], X1[:, 1], s=5)
plt.scatter(X2[:, 0], X2[:, 1], s=5)
plt.scatter(X3[:, 0], X3[:, 1], s=5)
#绘制高斯分布椭圆
plot_cov_ellipse(covar1,mu1)
plot_cov_ellipse(covar2,mu2)
plot_cov_ellipse(covar3,mu3)
plt.show()
print(len(y))
n_components = 3 #聚类(组分)的数量
class_prior = np.ones(n_components) * 1 / n_components #对于每个样本属于某个组分先验的概率
m, n = X.shape
print(m,n)
#聚类中心初始化
W = np.random.random((m, n_components))
mu=np.random.random((n_components,n))
minCol=np.min(X,axis=0)
maxCol=np.max(X,axis=0)
mu=minCol+mu*(maxCol-minCol)
covariance = np.zeros((n_components, n, n))
dist = np.tile(np.sum(X * X, axis=1).reshape((m,1)), (1, n_components)) + np.tile(np.sum(mu * mu, axis=1).T,(m, 1)) - 2 * np.dot(X ,mu.T)
labels = np.argmin(dist, axis=1)
for i in range(n_components):
clusterX = X[labels == i, :]
class_prior[i] = clusterX.shape[0] / m
covariance[i, :, :] = np.cov(clusterX.T)
将上面初始化的聚类中心进行可视化:
#初始化分布绘制
plt.scatter(X[:,0],X[:,1],c=y)
for j in range(n_components):
plot_cov_ellipse(covariance[j],mu[j])
从图上不难看出,初始化聚类中心跟真实的聚类中心差距较大
下面进行第一轮迭代:
pdfs=np.zeros((m,n_components))
#第1轮e步
for j in range(n_components):
pdfs[:,j]=class_prior[j]*multivariate_normal.pdf(X,mu[j],covariance[j])
W=pdfs/np.sum(pdfs,axis=1).reshape(-1,1)
#第1轮m步
class_prior=np.sum(W,axis=0)/np.sum(W)
mu=np.zeros((n_components,n))
covariance=np.zeros((n_components,n,n))
for j in range(n_components):
mu[j]=np.average(X,axis=0,weights=W[:,j])
cov=0
for i in range(m):
tmp=(X[i,:]-mu[j,:]).reshape(-1,1)
cov+=W[i,j]*np.dot(tmp,tmp.T)
covariance[j,:,:]=cov/np.sum(W[:,j])
#第1轮聚类中心和高斯分布椭圆绘制
plt.scatter(X[:,0],X[:,1],c=y)
for j in range(n_components):
plot_cov_ellipse(covariance[j],mu[j])
下面进行第二轮迭代
#第2轮e步
for j in range(n_components):
pdfs[:,j]=class_prior[j]*multivariate_normal.pdf(X,mu[j],covariance[j])
W=pdfs/np.sum(pdfs,axis=1).reshape(-1,1)
#第2轮m步
class_prior=np.sum(W,axis=0)/np.sum(W)
mu=np.zeros((n_components,n))
covariance=np.zeros((n_components,n,n))
for j in range(n_components):
mu[j]=np.average(X,axis=0,weights=W[:,j])
cov=0
for i in range(m):
tmp=(X[i,:]-mu[j,:]).reshape(-1,1)
cov+=W[i,j]*np.dot(tmp,tmp.T)
covariance[j,:,:]=cov/np.sum(W[:,j])
#第2轮绘制聚类中心和高斯分布椭圆
plt.scatter(X[:,0],X[:,1],c=y)
for j in range(n_components):
plot_cov_ellipse(covariance[j],mu[j])
前两轮迭代变化还是很大的,开始往聚类中心转变,再看第三轮
#第3轮e步
for j in range(n_components):
pdfs[:,j]=class_prior[j]*multivariate_normal.pdf(X,mu[j],covariance[j])
W=pdfs/np.sum(pdfs,axis=1).reshape(-1,1)
#第3轮m步
class_prior=np.sum(W,axis=0)/np.sum(W)
mu=np.zeros((n_components,n))
covariance=np.zeros((n_components,n,n))
for j in range(n_components):
mu[j]=np.average(X,axis=0,weights=W[:,j])
cov=0
for i in range(m):
tmp=(X[i,:]-mu[j,:]).reshape(-1,1)
cov+=W[i,j]*np.dot(tmp,tmp.T)
covariance[j,:,:]=cov/np.sum(W[:,j])
#第3轮绘制聚类中心和高斯分布椭圆
plt.scatter(X[:,0],X[:,1],c=y)
for j in range(n_components):
plot_cov_ellipse(covariance[j],mu[j])
中间省略,直接看第9轮
#第9轮e步
for j in range(n_components):
pdfs[:,j]=class_prior[j]*multivariate_normal.pdf(X,mu[j],covariance[j])
W=pdfs/np.sum(pdfs,axis=1).reshape(-1,1)
#第9轮m步
class_prior=np.sum(W,axis=0)/np.sum(W)
mu=np.zeros((n_components,n))
covariance=np.zeros((n_components,n,n))
for j in range(n_components):
mu[j]=np.average(X,axis=0,weights=W[:,j])
cov=0
for i in range(m):
tmp=(X[i,:]-mu[j,:]).reshape(-1,1)
cov+=W[i,j]*np.dot(tmp,tmp.T)
covariance[j,:,:]=cov/np.sum(W[:,j])
#第9轮绘制聚类中心和高斯分布椭圆
plt.scatter(X[:,0],X[:,1],c=y)
for j in range(n_components):
plot_cov_ellipse(covariance[j],mu[j])
#第16轮e步
for j in range(n_components):
pdfs[:,j]=class_prior[j]*multivariate_normal.pdf(X,mu[j],covariance[j])
W=pdfs/np.sum(pdfs,axis=1).reshape(-1,1)
#第16轮m步
class_prior=np.sum(W,axis=0)/np.sum(W)
mu=np.zeros((n_components,n))
covariance=np.zeros((n_components,n,n))
for j in range(n_components):
mu[j]=np.average(X,axis=0,weights=W[:,j])
cov=0
for i in range(m):
tmp=(X[i,:]-mu[j,:]).reshape(-1,1)
cov+=W[i,j]*np.dot(tmp,tmp.T)
covariance[j,:,:]=cov/np.sum(W[:,j])
#第16轮绘制聚类中心和高斯分布椭圆
plt.scatter(X[:,0],X[:,1],c=y)
for j in range(n_components):
plot_cov_ellipse(covariance[j],mu[j])
#第27轮e步
for j in range(n_components):
pdfs[:,j]=class_prior[j]*multivariate_normal.pdf(X,mu[j],covariance[j])
W=pdfs/np.sum(pdfs,axis=1).reshape(-1,1)
#第27轮m步
class_prior=np.sum(W,axis=0)/np.sum(W)
mu=np.zeros((n_components,n))
covariance=np.zeros((n_components,n,n))
for j in range(n_components):
mu[j]=np.average(X,axis=0,weights=W[:,j])
cov=0
for i in range(m):
tmp=(X[i,:]-mu[j,:]).reshape(-1,1)
cov+=W[i,j]*np.dot(tmp,tmp.T)
covariance[j,:,:]=cov/np.sum(W[:,j])
#第27轮绘制聚类中心和高斯分布椭圆
plt.scatter(X[:,0],X[:,1],c=y)
for j in range(n_components):
plot_cov_ellipse(covariance[j],mu[j])
从上面的实验来看,算法代码是基本正确的,最终的聚类结果是可以收敛的。
2.面向对象实现
import numpy as np
from scipy.stats import multivariate_normal
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
# 绘制椭圆参考代码,https://github.com/SJinping/Gaussian-ellipse/blob/master/gaussian_%20ellipse.py
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
def plot(data, mu, covariance, class_label):
plt.scatter(data[:, 0], data[:, 1], c=class_label)
n_components = len(mu)
for j in range(n_components):
plot_cov_ellipse(covariance[j], mu[j])
plt.show()
class GaussianMixtureModel:
def __init__(self, n_components, maxIter=1e4, eps=1e-9):
self.n_components = n_components
self.class_prior = np.ones(n_components) * 1 / n_components
self.mu = None
self.covariance = None
self.W = None
self.pdfs = None
self.eps = eps
self.maxIter = maxIter
def __initParameters(self, X):
'''
初始化模型参数mu,sigma,class_prior
:param X:
:return:
'''
m, n = X.shape
self.W = np.random.random((m, self.n_components))
self.mu = np.random.random((self.n_components, n))
minCol = np.min(X, axis=0)
maxCol = np.max(X, axis=0)
self.mu = minCol + self.mu * (maxCol - minCol)
self.covariance = np.zeros((self.n_components, n, n))
dist = np.tile(np.sum(X * X, axis=1).reshape((m, 1)), (1, self.n_components)) + np.tile(
np.sum(self.mu * self.mu, axis=1).T,
(m, 1)) - 2 * np.dot(X, self.mu.T)
self.pdfs = np.zeros((m, self.n_components))
labels = np.argmin(dist, axis=1)
for i in range(self.n_components):
clusterX = X[labels == i, :]
self.class_prior[i] = clusterX.shape[0] / m
self.covariance[i, :, :] = np.cov(clusterX.T)
def train(self, X):
'''
EM算法得到模型参数,迭代停止条件为:1迭代轮数达到上限 2似然函数的变化极其微小,小于某个阈值
:param X:
:return:
'''
self.__initParameters(X)
num = 0
preLogLikelihood = self.__logLikelihood(X)
while num < self.maxIter:
self.__expectation(X)
self.__maximize(X)
plot(X, self.mu, self.covariance,y)
num += 1
logLikelihood = self.__logLikelihood(X)
if abs(logLikelihood - preLogLikelihood) < self.eps:
break
preLogLikelihood = logLikelihood
# 根据当前的各个组分先验概率、均值向量和协方差矩阵计算对数似然函数值
def __logLikelihood(self, X):
for j in range(self.n_components):
a = multivariate_normal.pdf(X, self.mu[j], self.covariance[j])
print(a)
self.pdfs[:, j] = self.class_prior[j] * multivariate_normal.pdf(X, self.mu[j], self.covariance[j])
return np.mean(np.log(np.sum(self.pdfs, axis=1)))
# EM算法的E步,计算样本x_i来自第k个高斯分布的概率
def __expectation(self, X):
'''
对于样本x_i来自第k个高斯分布的概率
:return:
'''
for j in range(self.n_components):
self.pdfs[:, j] = self.class_prior[j] * multivariate_normal.pdf(X, self.mu[j], self.covariance[j])
self.W = self.pdfs / np.sum(self.pdfs, axis=1).reshape(-1, 1)
def __maximize(self, X):
'''
N_k表示所有数据点属于第k类的概率之和
更新类别先验,类的期望中心和协方差
:return:
'''
m, n = X.shape
self.class_prior = np.sum(self.W, axis=0) / np.sum(self.W)
for j in range(self.n_components):
self.mu[j] = np.average(X, axis=0, weights=self.W[:, j])
cov = 0
for i in range(m):
tmp = (X[i, :] - self.mu[j, :]).reshape(-1, 1)
cov += self.W[i, j] * np.dot(tmp, tmp.T)
self.covariance[j, :, :] = cov / np.sum(self.W[:, j])
# 用三个不同的高斯分布生成三个聚类作为GMM算法的数据
num1, mu1, covar1 = 400, [0.5, 0.5], np.array([[1, 0.5], [0.5, 3]])
X1 = np.random.multivariate_normal(mu1, covar1, num1)
# 第二簇的数据
num2, mu2, covar2 = 600, [5.5, 2.5], np.array([[2, 1], [1, 2]])
X2 = np.random.multivariate_normal(mu2, covar2, num2)
# 第三簇的数据
num3, mu3, covar3 = 1000, [1, 7], np.array([[6, 2], [2, 1]])
X3 = np.random.multivariate_normal(mu3, covar3, num3)
# 合并在一起
Mydata = np.vstack((X1, X2, X3))
# 计算聚类结果的对数似然函数值
y = np.hstack((np.zeros(len(X1)), np.ones(len(X2)), 2 * np.ones(len(X3))))
print(len(y))
myGMM = GaussianMixtureModel(3)
myGMM.train(Mydata)
3.源码剖析
sklearn中的GMM是在mixture模块实现的,里面有Gaussian_mixture和Baysian_mixture,这两个类都继承于BaseMixture。
这种完全面向对方的代码健壮性真的很好。对比自己写的面向对象版本,真的还需要提高很多。
首先是工具函数:
import numpy as np
from scipy import linalg
from .base import BaseMixture, _check_shape
from ..utils import check_array
from ..utils.validation import check_is_fitted
from ..utils.extmath import row_norms
###############################################################################
# Gaussian mixture shape checkers used by the GaussianMixture class
# 工具函数,检验类别权重向量是否都在[0,1]之间,检验权重和是否为1
def _check_weights(weights, n_components):
"""Check the user provided 'weights'.
Parameters
----------
weights : array-like, shape (n_components,)
The proportions of components of each mixture.
n_components : int
Number of components.
Returns
-------
weights : array, shape (n_components,)
"""
weights = check_array(weights, dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(weights, (n_components,), 'weights')
# check range
if (any(np.less(weights, 0.)) or
any(np.greater(weights, 1.))):
raise ValueError("The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights), np.max(weights)))
# check normalization
if not np.allclose(np.abs(1. - np.sum(weights)), 0.):
raise ValueError("The parameter 'weights' should be normalized, "
"but got sum(weights) = %.5f" % np.sum(weights))
return weights
def _check_means(means, n_components, n_features):
"""Validate the provided 'means'.
Parameters
----------
means : array-like, shape (n_components, n_features)
The centers of the current components.
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
means : array, (n_components, n_features)
"""
means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(means, (n_components, n_features), 'means')
return means
def _check_precision_positivity(precision, covariance_type):
"""Check a precision vector is positive-definite."""
if np.any(np.less_equal(precision, 0.0)):
raise ValueError("'%s precision' should be "
"positive" % covariance_type)
def _check_precision_matrix(precision, covariance_type):
"""Check a precision matrix is symmetric and positive-definite."""
if not (np.allclose(precision, precision.T) and
np.all(linalg.eigvalsh(precision) > 0.)):
raise ValueError("'%s precision' should be symmetric, "
"positive-definite" % covariance_type)
def _check_precisions_full(precisions, covariance_type):
"""Check the precision matrices are symmetric and positive-definite."""
for prec in precisions:
_check_precision_matrix(prec, covariance_type)
def _check_precisions(precisions, covariance_type, n_components, n_features):
"""Validate user provided precisions.
Parameters
----------
precisions : array-like
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : string
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
precisions : array
"""
precisions = check_array(precisions, dtype=[np.float64, np.float32],
ensure_2d=False,
allow_nd=covariance_type == 'full')
precisions_shape = {'full': (n_components, n_features, n_features),
'tied': (n_features, n_features),
'diag': (n_components, n_features),
'spherical': (n_components,)}
_check_shape(precisions, precisions_shape[covariance_type],
'%s precision' % covariance_type)
_check_precisions = {'full': _check_precisions_full,
'tied': _check_precision_matrix,
'diag': _check_precision_positivity,
'spherical': _check_precision_positivity}
_check_precisions[covariance_type](precisions, covariance_type)
return precisions
###############################################################################
# Gaussian mixture parameters estimators (used by the M-Step)
def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar):
"""Estimate the full covariance matrices.
Parameters
----------
resp : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features, n_features)
The covariance matrix of the current components.
"""
n_components, n_features = means.shape
covariances = np.empty((n_components, n_features, n_features))
for k in range(n_components):
diff = X - means[k]
covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k]
covariances[k].flat[::n_features + 1] += reg_covar
return covariances
def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar):
"""Estimate the tied covariance matrix.
Parameters
----------
resp : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariance : array, shape (n_features, n_features)
The tied covariance matrix of the components.
"""
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(nk * means.T, means)
covariance = avg_X2 - avg_means2
covariance /= nk.sum()
covariance.flat[::len(covariance) + 1] += reg_covar
return covariance
def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar):
"""Estimate the diagonal covariance vectors.
Parameters
----------
responsibilities : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
covariances : array, shape (n_components, n_features)
The covariance vector of the current components.
"""
avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]
avg_means2 = means ** 2
avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis]
return avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar
def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar):
"""Estimate the spherical variance values.
Parameters
----------
responsibilities : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
variances : array, shape (n_components,)
The variance values of each components.
"""
return _estimate_gaussian_covariances_diag(resp, X, nk,
means, reg_covar).mean(1)
def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input data array.
resp : array-like, shape (n_samples, n_components)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
nk : array-like, shape (n_components,)
The numbers of data samples in the current components.
means : array-like, shape (n_components, n_features)
The centers of the current components.
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
"""
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
# resp.T:(n_components,n_samples) X(n_samples, n_features)
means = np.dot(resp.T, X) / nk[:, np.newaxis]
covariances = {"full": _estimate_gaussian_covariances_full,
"tied": _estimate_gaussian_covariances_tied,
"diag": _estimate_gaussian_covariances_diag,
"spherical": _estimate_gaussian_covariances_spherical
}[covariance_type](resp, X, nk, means, reg_covar)
return nk, means, covariances
def _compute_precision_cholesky(covariances, covariance_type):
"""Compute the Cholesky decomposition of the precisions.
Parameters
----------
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
precisions_cholesky : array-like
The cholesky decomposition of sample precisions of the current
components. The shape depends of the covariance_type.
"""
estimate_precision_error_message = (
"Fitting the mixture model failed because some components have "
"ill-defined empirical covariance (for instance caused by singleton "
"or collapsed samples). Try to decrease the number of components, "
"or increase reg_covar.")
# 当各个组分有自己的协方差矩阵时,需要使用cholesky分解法对各个组分的协方差矩阵求逆
if covariance_type in 'full':
n_components, n_features, _ = covariances.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for k, covariance in enumerate(covariances):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(cov_chol,
np.eye(n_features),
lower=True).T
elif covariance_type == 'tied':
_, n_features = covariances.shape
try:
cov_chol = linalg.cholesky(covariances, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(cov_chol, np.eye(n_features),
lower=True).T
else:
# spherical表示每个组份的协方差矩阵仅仅使用一个参数表示即可,即每个组份的对角线上元素相等(各向同性)
# diag:表示每个组分的协方差均为有D个维度的对角阵,且各个组份对角元素值不相同
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1. / np.sqrt(covariances)
return precisions_chol
###############################################################################
# Gaussian mixture probability estimators
def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):
"""Compute the log-det of the cholesky decomposition of matrices.
Parameters
----------
matrix_chol : array-like
Cholesky decompositions of the matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
n_features : int
Number of features.
Returns
-------
log_det_precision_chol : array-like, shape (n_components,)
The determinant of the precision matrix for each component.
"""
if covariance_type == 'full':
n_components, _, _ = matrix_chol.shape
log_det_chol = (np.sum(np.log(
matrix_chol.reshape(
n_components, -1)[:, ::n_features + 1]), 1))
elif covariance_type == 'tied':
log_det_chol = (np.sum(np.log(np.diag(matrix_chol))))
elif covariance_type == 'diag':
log_det_chol = (np.sum(np.log(matrix_chol), axis=1))
else:
log_det_chol = n_features * (np.log(matrix_chol))
return log_det_chol
# 计算每个样本由每个组分的高斯分布产生的对数概率
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
"""Estimate the log Gaussian probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
means : array-like, shape (n_components, n_features)
precisions_chol : array-like
Cholesky decompositions of the precision matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
Returns
-------
log_prob : array, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
n_components, _ = means.shape
# det(precision_chol) is half of det(precision)
log_det = _compute_log_det_cholesky(
precisions_chol, covariance_type, n_features)
if covariance_type == 'full':
log_prob = np.empty((n_samples, n_components))
for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):
y = np.dot(X, prec_chol) - np.dot(mu, prec_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'tied':
log_prob = np.empty((n_samples, n_components))
for k, mu in enumerate(means):
y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'diag':
precisions = precisions_chol ** 2
log_prob = (np.sum((means ** 2 * precisions), 1) -
2. * np.dot(X, (means * precisions).T) +
np.dot(X ** 2, precisions.T))
elif covariance_type == 'spherical':
precisions = precisions_chol ** 2
log_prob = (np.sum(means ** 2, 1) * precisions -
2 * np.dot(X, means.T * precisions) +
np.outer(row_norms(X, squared=True), precisions))
return -.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
从工具函数中发现在求每个样本来自某个高斯函数的概率时,将样本带入到相应的高斯概率密度函数,而高斯概率密度函数的计算需要求逆矩阵和行列式的值,源码采用的是协方差矩阵cholesky分解,效率提高很多。而我取巧采用scipy.stats里的multivariate_normal直接计算。这里要说明的是:直接求解逆矩阵很有可能出现矩阵奇异的情况。另外一个优化是对不同协方差矩阵的处理(而我的实现是用的统一处理,也就是最general的情况:full),分为了四种情况:
def _compute_precision_cholesky(covariances, covariance_type):
"""Compute the Cholesky decomposition of the precisions.
Parameters
----------
covariances : array-like
The covariance matrix of the current components.
The shape depends of the covariance_type.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
precisions_cholesky : array-like
The cholesky decomposition of sample precisions of the current
components. The shape depends of the covariance_type.
"""
estimate_precision_error_message = (
"Fitting the mixture model failed because some components have "
"ill-defined empirical covariance (for instance caused by singleton "
"or collapsed samples). Try to decrease the number of components, "
"or increase reg_covar.")
# 当各个组分有自己的协方差矩阵时,需要使用cholesky分解法对各个组分的协方差矩阵求逆
if covariance_type in 'full':
n_components, n_features, _ = covariances.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for k, covariance in enumerate(covariances):
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol[k] = linalg.solve_triangular(cov_chol,
np.eye(n_features),
lower=True).T
elif covariance_type == 'tied':
_, n_features = covariances.shape
try:
cov_chol = linalg.cholesky(covariances, lower=True)
except linalg.LinAlgError:
raise ValueError(estimate_precision_error_message)
precisions_chol = linalg.solve_triangular(cov_chol, np.eye(n_features),
lower=True).T
else:
# spherical表示每个组份的协方差矩阵仅仅使用一个参数表示即可,即每个组份的对角线上元素相等(各向同性)
# diag:表示每个组分的协方差均为有D个维度的对角阵,且各个组份对角元素值不相同
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(estimate_precision_error_message)
precisions_chol = 1. / np.sqrt(covariances)
return precisions_chol
这里解释一下四种情况:
full:当各个组分有自己的协方差矩阵时,需要使用cholesky分解法对各个组分的协方差矩阵求逆。
tied:表示各个组分都是相同的高斯分布。
spherical:表示每个组份的协方差矩阵仅仅使用一个参数表示即可,即每个组份的对角线上元素相等(各向同性)。
diag:表示每个组分的协方差均为有D个维度的对角阵,且各个组份对角元素值不相同。
下面是高斯混合模型的类实现部分:
主要步骤与我的实现是一样的,只是考虑了很多异常,预处理检查的代码比较多,主要是类构造函数初始化-模型参数初始化-迭代EM算法(e-step和m-step)
class GaussianMixture(BaseMixture):
"""Gaussian Mixture.
Representation of a Gaussian mixture model probability distribution.
This class allows to estimate the parameters of a Gaussian mixture
distribution.
Read more in the :ref:`User Guide <gmm>`.
.. versionadded:: 0.18
Parameters
----------
n_components : int, defaults to 1.
The number of mixture components.
covariance_type : {'full' (default), 'tied', 'diag', 'spherical'}
String describing the type of covariance parameters to use.
Must be one of:
'full'
each component has its own general covariance matrix
'tied'
all components share the same general covariance matrix
'diag'
each component has its own diagonal covariance matrix
'spherical'
each component has its own single variance
tol : float, defaults to 1e-3.
The convergence threshold. EM iterations will stop when the
lower bound average gain is below this threshold.
reg_covar : float, defaults to 1e-6.
Non-negative regularization added to the diagonal of covariance.
Allows to assure that the covariance matrices are all positive.
max_iter : int, defaults to 100.
The number of EM iterations to perform.
n_init : int, defaults to 1.
The number of initializations to perform. The best results are kept.
init_params : {'kmeans', 'random'}, defaults to 'kmeans'.
The method used to initialize the weights, the means and the
precisions.
Must be one of::
'kmeans' : responsibilities are initialized using kmeans.
'random' : responsibilities are initialized randomly.
weights_init : array-like, shape (n_components, ), optional
The user-provided initial weights, defaults to None.
If it None, weights are initialized using the `init_params` method.
means_init : array-like, shape (n_components, n_features), optional
The user-provided initial means, defaults to None,
If it None, means are initialized using the `init_params` method.
precisions_init : array-like, optional.
The user-provided initial precisions (inverse of the covariance
matrices), defaults to None.
If it None, precisions are initialized using the 'init_params' method.
The shape depends on 'covariance_type'::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
warm_start : bool, default to False.
If 'warm_start' is True, the solution of the last fitting is used as
initialization for the next call of fit(). This can speed up
convergence when fit is called several times on similar problems.
In that case, 'n_init' is ignored and only a single initialization
occurs upon the first call.
See :term:`the Glossary <warm_start>`.
verbose : int, default to 0.
Enable verbose output. If 1 then it prints the current
initialization and each iteration step. If greater than 1 then
it prints also the log probability and the time needed
for each step.
verbose_interval : int, default to 10.
Number of iteration done before the next print.
Attributes
----------
weights_ : array-like, shape (n_components,)
The weights of each mixture components.
means_ : array-like, shape (n_components, n_features)
The mean of each mixture component.
covariances_ : array-like
The covariance of each mixture component.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_ : array-like
The precision matrices for each component in the mixture. A precision
matrix is the inverse of a covariance matrix. A covariance matrix is
symmetric positive definite so the mixture of Gaussian can be
equivalently parameterized by the precision matrices. Storing the
precision matrices instead of the covariance matrices makes it more
efficient to compute the log-likelihood of new samples at test time.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_cholesky_ : array-like
The cholesky decomposition of the precision matrices of each mixture
component. A precision matrix is the inverse of a covariance matrix.
A covariance matrix is symmetric positive definite so the mixture of
Gaussian can be equivalently parameterized by the precision matrices.
Storing the precision matrices instead of the covariance matrices makes
it more efficient to compute the log-likelihood of new samples at test
time. The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
n_iter_ : int
Number of step used by the best fit of EM to reach the convergence.
lower_bound_ : float
Lower bound value on the log-likelihood (of the training data with
respect to the model) of the best fit of EM.
See Also
--------
BayesianGaussianMixture : Gaussian mixture model fit with a variational
inference.
"""
def __init__(self, n_components=1, covariance_type='full', tol=1e-3,
reg_covar=1e-6, max_iter=100, n_init=1, init_params='kmeans',
weights_init=None, means_init=None, precisions_init=None,
random_state=None, warm_start=False,
verbose=0, verbose_interval=10):
super().__init__(
n_components=n_components, tol=tol, reg_covar=reg_covar,
max_iter=max_iter, n_init=n_init, init_params=init_params,
random_state=random_state, warm_start=warm_start,
verbose=verbose, verbose_interval=verbose_interval)
self.covariance_type = covariance_type
self.weights_init = weights_init
self.means_init = means_init
self.precisions_init = precisions_init
def _check_parameters(self, X):
"""Check the Gaussian mixture parameters are well defined."""
_, n_features = X.shape
if self.covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError("Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% self.covariance_type)
if self.weights_init is not None:
self.weights_init = _check_weights(self.weights_init,
self.n_components)
if self.means_init is not None:
self.means_init = _check_means(self.means_init,
self.n_components, n_features)
if self.precisions_init is not None:
self.precisions_init = _check_precisions(self.precisions_init,
self.covariance_type,
self.n_components,
n_features)
def _initialize(self, X, resp):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)
"""
n_samples, _ = X.shape
weights, means, covariances = _estimate_gaussian_parameters(
X, resp, self.reg_covar, self.covariance_type)
weights /= n_samples
self.weights_ = (weights if self.weights_init is None
else self.weights_init)
self.means_ = means if self.means_init is None else self.means_init
if self.precisions_init is None:
self.covariances_ = covariances
self.precisions_cholesky_ = _compute_precision_cholesky(
covariances, self.covariance_type)
elif self.covariance_type == 'full':
self.precisions_cholesky_ = np.array(
[linalg.cholesky(prec_init, lower=True)
for prec_init in self.precisions_init])
elif self.covariance_type == 'tied':
self.precisions_cholesky_ = linalg.cholesky(self.precisions_init,
lower=True)
else:
self.precisions_cholesky_ = self.precisions_init
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
n_samples, _ = X.shape
self.weights_, self.means_, self.covariances_ = (
_estimate_gaussian_parameters(X, np.exp(log_resp), self.reg_covar,
self.covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
def _estimate_log_prob(self, X):
return _estimate_log_gaussian_prob(
X, self.means_, self.precisions_cholesky_, self.covariance_type)
def _estimate_log_weights(self):
return np.log(self.weights_)
def _compute_lower_bound(self, _, log_prob_norm):
return log_prob_norm
def _check_is_fitted(self):
check_is_fitted(self, ['weights_', 'means_', 'precisions_cholesky_'])
def _get_parameters(self):
return (self.weights_, self.means_, self.covariances_,
self.precisions_cholesky_)
def _set_parameters(self, params):
(self.weights_, self.means_, self.covariances_,
self.precisions_cholesky_) = params
# Attributes computation
_, n_features = self.means_.shape
if self.covariance_type == 'full':
self.precisions_ = np.empty(self.precisions_cholesky_.shape)
for k, prec_chol in enumerate(self.precisions_cholesky_):
self.precisions_[k] = np.dot(prec_chol, prec_chol.T)
elif self.covariance_type == 'tied':
self.precisions_ = np.dot(self.precisions_cholesky_,
self.precisions_cholesky_.T)
else:
self.precisions_ = self.precisions_cholesky_ ** 2
def _n_parameters(self):
"""Return the number of free parameters in the model."""
_, n_features = self.means_.shape
if self.covariance_type == 'full':
cov_params = self.n_components * n_features * (n_features + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * n_features
elif self.covariance_type == 'tied':
cov_params = n_features * (n_features + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = n_features * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
# 计算BIC,模型聚类评价指标
def bic(self, X):
"""Bayesian information criterion for the current model on the input X.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
Returns
-------
bic : float
The lower the better.
"""
return (-2 * self.score(X) * X.shape[0] +
self._n_parameters() * np.log(X.shape[0]))
# 计算AIC,模型聚类评价指标
def aic(self, X):
"""Akaike information criterion for the current model on the input X.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
Returns
-------
aic : float
The lower the better.
"""
return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters()