-0.017612	14.053064	0

-1.395634	4.662541	1

-0.752157	6.538620	0

-1.322371	7.152853	0

0.423363	11.054677	0

0.406704	7.067335	1

0.667394	12.741452	0

-2.460150	6.866805	1

0.569411	9.548755	0

-0.026632	10.427743	0

0.850433	6.920334	1

1.347183	13.175500	0

1.176813	3.167020	1

-1.781871	9.097953	0

-0.566606	5.749003	1

0.931635	1.589505	1

-0.024205	6.151823	1

-0.036453	2.690988	1

-0.196949	0.444165	1

1.014459	5.754399	1

1.985298	3.230619	1

-1.693453	-0.557540	1

-0.576525	11.778922	0

-0.346811	-1.678730	1

-2.124484	2.672471	1

1.217916	9.597015	0

-0.733928	9.098687	0

-3.642001	-1.618087	1

0.315985	3.523953	1

1.416614	9.619232	0

-0.386323	3.989286	1

0.556921	8.294984	1

1.224863	11.587360	0

-1.347803	-2.406051	1

1.196604	4.951851	1

0.275221	9.543647	0

0.470575	9.332488	0

-1.889567	9.542662	0

-1.527893	12.150579	0

-1.185247	11.309318	0

-0.445678	3.297303	1

1.042222	6.105155	1

-0.618787	10.320986	0

1.152083	0.548467	1

0.828534	2.676045	1

-1.237728	10.549033	0

-0.683565	-2.166125	1

0.229456	5.921938	1

-0.959885	11.555336	0

0.492911	10.993324	0

0.184992	8.721488	0

-0.355715	10.325976	0

-0.397822	8.058397	0

0.824839	13.730343	0

1.507278	5.027866	1

0.099671	6.835839	1

-0.344008	10.717485	0

1.785928	7.718645	1

-0.918801	11.560217	0

-0.364009	4.747300	1

-0.841722	4.119083	1

0.490426	1.960539	1

-0.007194	9.075792	0

0.356107	12.447863	0

0.342578	12.281162	0

-0.810823	-1.466018	1

2.530777	6.476801	1

1.296683	11.607559	0

0.475487	12.040035	0

-0.783277	11.009725	0

0.074798	11.023650	0

-1.337472	0.468339	1

-0.102781	13.763651	0

-0.147324	2.874846	1

0.518389	9.887035	0

1.015399	7.571882	0

-1.658086	-0.027255	1

1.319944	2.171228	1

2.056216	5.019981	1

-0.851633	4.375691	1

-1.510047	6.061992	0

-1.076637	-3.181888	1

1.821096	10.283990	0

3.010150	8.401766	1

-1.099458	1.688274	1

-0.834872	-1.733869	1

-0.846637	3.849075	1

1.400102	12.628781	0

1.752842	5.468166	1

0.078557	0.059736	1

0.089392	-0.715300	1

1.825662	12.693808	0

0.197445	9.744638	0

0.126117	0.922311	1

-0.679797	1.220530	1

0.677983	2.556666	1

0.761349	10.693862	0

-2.168791	0.143632	1

1.388610	9.341997	0

0.317029	14.739025	0

 

#!/usr/bin/python
# coding=utf-8
import copy
import numpy as np
import matplotlib.pyplot as plt
# 加载数据
def loadDataSet(fileName):  # 解析文件,按tab分割字段,得到一个浮点数字类型的矩阵
    dataMat = []              # 文件的最后一个字段是类别标签
    fr = open(fileName)
    for line in fr.readlines():   #返回list,list每个元素是每一行的字符串
        curLine = line.strip().split('\t')  #删除line字符串中的空格,按\t进行分割。
        fltLine = list(map(float, curLine))    # list创建转换列表,map函数将curline的元素转成float类型
        dataMat.append(fltLine)           #在datamat中添加新的fltline列表。
    return np.array(dataMat)        #将数据转化为一个矩阵

# 计算欧几里得距离
def distEclud(vecA, vecB):
    return np.sqrt(np.sum(np.power(vecA - vecB, 2))) # 求两个向量之间的距离

# 构建聚簇中心,取k个(此例中为4)随机质心
def randCent(dataSet, k):
    centroid = np.array(np.zeros((k,2)))   # 每个质心有n个坐标值,总共要k个质心
    for j in range(2):
        minJ = min(dataSet[:,j])  #最小值
        maxJ = max(dataSet[:,j])   #最大值
        rangeJ = float(maxJ - minJ)   #得到当前列的范围
        centroid[:,j] = mat(minJ + rangeJ * np.random.rand(k))
        #在最小值和最大值之间取值,random模块的rand(a,b)函数返回a行b列的随机数(范围:0-1)
                #在这里mat()加不加都无所谓,但是为了避免赋值右边的变量类型不是matrix,还是加上比较好
    return centroid     #返回包含K个随机质心(centroid)的集合

# k-means 聚类算法
def kMeans(dataSet, k, distMeans =distEclud, createCent = randCent):
    #输入:数据集,k,计算向量间距离的函数名,随机生成k个随机质心的函数名
    #输出:包含质心的集合,簇分配结果矩阵
    m = dataSet.shape[0];counts=[]      #数据集的行数,即数据的个数
    clusterAssments=[];centroids=[]
    clusterAssment = np.array(np.zeros((m,2)))    # 用于存放该样本属于哪类及质心距离
    # clusterAssment第一列存放该数据所属的中心点,第二列是该数据到中心点的距离
    centroid = createCent(dataSet, k)   #先随机生成k个随机质心的集合
    clusterChanged = True   # 用来判断聚类是否已经收敛
    while clusterChanged:    #当任意一个点的簇分配结果改变时
        clusterChanged = False
        count=0
        for i in range(m):  # 把每一个数据点划分到离它最近的中心点
            minDist = np.inf; minIndex = -1;
            for j in range(k):    #对于每一质心
                distJI = distMeans(centroid[j,:], dataSet[i,:-1])#得到数据与质心间的距离
                if distJI < minDist:#更新最小值
                    minDist = distJI; minIndex = j  # 如果第i个数据点到第j个中心点更近,则将i归属为j
            if clusterAssment[i,0] != minIndex: #若该点的簇分配结果改变
                clusterChanged = True;count+=1  # 如果分配发生变化,则需要继续迭代
                #print(clusterAssment[i,0],'-->',minIndex)
            clusterAssment[i,:] = minIndex,minDist**2   # 并将第i个数据点的分配情况存入字典

        for cent in range(k):   # 重新计算中心点 #对于每一质心
            ptsInClust = dataSet[clusterAssment[:,0] == cent][:,:-1]   # 去第一列等于cent的所有列
            centroid[cent,:] = np.mean(ptsInClust, axis = 0)  # 算出这些数据的中心点
                                                           #将质心更新为簇中所有数据的均值
                                                         #axis=0表示沿矩阵的列方向计算均值

        
        #此处为坑
#         centroids.append(centroid)
#         clusterAssments.append(clusterAssment)
        if clusterChanged==True:
            centroids.append(copy.copy(centroid))
            clusterAssments.append(copy.copy(clusterAssment))
            counts.append(count)
    return centroids, clusterAssments,counts
# --------------------测试----------------------------------------------------
# 用测试数据及测试kmeans算法
datMat=loadDataSet('data.txt')
myCentroids,clustAssings,counts = kMeans(datMat,4)
print(myCentroids)
t=len(clustAssings)
print(t)
counts # [58, 23, 3, 1]

fig1=plt.figure(1,figsize=(15,20))  #创建一个绘图对象
#plt.xlabel('x')
#plt.ylabel('y')
for i in range(4):
    ax=fig1.add_subplot(32*10+i+1)
    s=clustAssings[i][:,0]+30
    c=clustAssings[i][:,0]+20
    ax.scatter(datMat[:,0],datMat[:,1],s,c)
    ax.scatter(myCentroids[i][:,0],myCentroids[i][:,1],s=150,c='r',marker='+')

plt.show()
"""