from collections import Counter
from math import log

hmm_model = {i:Counter() for i in 'sbme'}
#dict.txt 就是一个字频文件
with open('dict.txt','r',encoding="utf-8") as f:
for line in f:
lines = line.split(' ')
# 单字
if len(lines[0]) == 1:
hmm_model['s'][lines[0]] += int(lines[1])

else:
# 多字
# 首字
hmm_model['b'][lines[0][0]] += int(lines[1])
# 尾字
hmm_model['e'][lines[0][-1]] += int(lines[1])
print(hmm_model['e']["气"])
if len(lines[0])>2:
# 中间字
for m in lines[0][1:-1]:
hmm_model['m'][m] += int(lines[1])
#计算概率P(λ[k]|o[k]) log 的值
log_total = {i:log(sum(hmm_model[i].values())) for i in 'sbme'}
# 这些值决定了分词的准确率如果
# 一句话作为输入,这些值作为输出
# 输出8个值 直接将8个值softmax 得到的值
# 分词后的标注使用这个进行循序计算出来 这样就有了数据进行训练了训练出来在模型预测出每句话的8个转移概率值。最后通过此法计算 出分词
trans = {'ss':0.3,
'sb':0.6,
'bm':0.3,
'be':0.85,
'mm':0.3,
'me':0.7,
'es':0.3,
'eb':0.7
}

trans = {i:log(j) for i,j in trans.items()}
# 最大路劲计算
def viterbi(nodes):
paths = nodes[0]
for l in range(1, len(nodes)):
paths_ = paths
paths = {}
# 从第一个字开始 评估所有 每个字和下一个字之间的关系 开头结尾组合起来的概率 log值
for i in nodes[l]:
nows = {}
for j in paths_:
#
if j[-1]+i in trans:

nows[j+i]= paths_[j]+nodes[l][i]+trans[j[-1]+i]
# 求出最大组合比如ss se
# log (P(o[k]|o[k−1]) )
k = [k for k,v in nows.items() if max(nows.values())==v ][0]
# print(k)
paths[k] = nows[k]
#返回组合最大的
return [k for k,v in paths.items() if max(paths.values())==v ][0]

def hmm_cut(s):
# 计算每个字在开头结尾中间 单个字的和 平均 概率的log值的误差
nodes = [{i:log(j[t]+1)-log_total[i] for i,j in hmm_model.items()} for t in s]
tags = viterbi(nodes)
print(tags)
words = [s[0]]
print(words)
for i in range(1, len(s)):
if tags[i] in ['b', 's']:
words.append(s[i])
else:
words[-1] += s[i]
return words
if __name__ == '__main__':
print(' '.join(hmm_cut(u'今天天气不错')))