代码参考了零基础入门深度学习(6) - 长短时记忆网络(LSTM)这篇文章,我只对代码里可能存在的一些小错误进行了更改。至于LSTM的原理以及代码里不清楚的地方可以结合该文章理解,十分浅显易懂。
import numpy as np
class SigmoidActivator():
def forward(self,weighted_input):
return 1 / (1 + np.exp(-weighted_input))
def backward(self,output):
return output*(1 - output)
class TanhActivator():
def forward(self,weighted_input):
return 2 / (1 + np.exp(-2 * weighted_input)) - 1
def backward(self,output):
return 1 - output * output
class LstmLayer():
def __init__(self,input_width,state_width,learning_rate):
self.input_width = input_width
self.state_width = state_width
self.learning_rate = learning_rate
# 门的激活函数
self.gate_activator = SigmoidActivator()
# 输出的激活函数
self.output_activator = TanhActivator()
# 当前时刻初始化为0
self.times = 0
# 各个时刻的单元状态向量c
self.c_list = self.init_state_vec()
# 各个时刻的输出向量h
self.h_list = self.init_state_vec()
# 各个时刻的遗忘门f
self.f_list = self.init_state_vec()
# 各个时刻的输入门i
self.i_list = self.init_state_vec()
# 各个时刻的输出门o
self.o_list = self.init_state_vec()
# 各个时刻的即时状态c~
self.ct_list = self.init_state_vec()
# 遗忘门权重矩阵wfh,wfx,偏置项bf
self.wfh,self.wfx,self.bf = (self.init_weight_mat())
# 输入门权重矩阵wih,wix,偏置项bi
self.wih,self.wix,self.bi = (self.init_weight_mat())
# 输出门权重矩阵woh,wox,偏置项bo
self.woh,self.wox,self.bo = (self.init_weight_mat())
# 单元状态权重矩阵wch,wcx,偏置项bc
self.wch,self.wcx,self.bc = (self.init_weight_mat())
def init_state_vec(self):
'''
初始化保存状态的向量
'''
state_vec_list = []
state_vec_list.append(np.zeros(self.state_width,1))
return state_vec_list
def init_weight_mat(self):
'''
初始化权重矩阵
'''
wh = np.random.uniform(-1e-4,1e-4,(self.state_width,self.state_width))
wx = np.random.uniform(-1e-4,1e-4,(self.state_width,self.input_width))
b = np.zeros((self.state_width,1))
return wh,wx,b
def forward(self,x):
'''
根据式1-式6进行前向计算
'''
self.times += 1
# 遗忘门
fg = self.calc_gate(x,self.wfx,self.wfh,self.bf,self.gate_activator)
self.f_list.append(fg)
# 输入门
ig = self.calc_gate(x,self.wix,self.wih,self.bi,self.gate_activator)
self.i_list.append(ig)
# 输出门
og = self.calc_gate(x,self.wox,self.woh,self.bo,self.gate_activator)
self.o_list.append(og)
# 即时状态
ct = self.calc_gate(x,self.wcx,self.wch,self.bc,self.output_activator)
self.ct_list.append(ct)
# 单元状态
c = fg * self.c_list[self.times-1] + ig * ct
self.c_list.append(ct)
# 输出
h = og * self.output_activator.forward(c)
self.h_list.append(h)
def calc_gate(self, x, wx, wh, b, activator):
'''
计算门
'''
# 上次LSTM的输出
h = self.h_list[self.times-1]
net = np.dot(wh,h) + np.dot(wx,x) + b
gate = activator.forward(net)
return gate
def backward(self,x,delta_h,activator):
'''
实现LSTM训练算法
'''
self.calc_delta(delta_h,activator)
self.calc_gradient(x)
def calc_delta(self, delta_h, activator):
# 初始化各个时刻的误差项
self.delta_h_list = self.init_delta() # 输出误差项
self.delta_o_list = self.init_delta() # 输出门误差项
self.delta_i_list = self.init_delta() # 输入门误差项
self.delta_f_list = self.init_delta() # 遗忘门误差项
self.delta_ct_list = self.init_delta() # 即时输出误差项
# 保存从上一层传递下来的当前时刻的误差项
self.delta_h_list[-1] = delta_h
# 迭代计算每个时刻的误差项
for k in range(self.times,0,-1):
self.calc_delta_k(k)
def init_delta(self):
'''
初始化误差项
'''
delta_list = []
for i in range(self.times+1):
delta_list.append(np.zeros((self.state_width,1)))
return delta_list
def calc_delta_k(self, k):
'''
根据k时刻的delta_h,计算k时刻的delta_f,delta_i,delta_o,delta_ct以及k-1时刻的delta_h
'''
# 获得k时刻前向计算的值
ig = self.i_list[k]
og = self.o_list[k]
fg = self.f_list[k]
ct = self.ct_list[k]
c = self.c_list[k]
c_prev = self.c_list[k-1]
tanh_c = self.output_activator.forward(c)
delta_k = self.delta_h_list[k]
# 根据式9计算delta_o
delta_o = (delta_k * tanh_c * self.gate_activator.backward(og))
# 根据式10计算delta_f
delta_f = (delta_k * og * (1 - tanh_c * tanh_c) * c_prev * self.gate_activator.backward(fg))
# 根据式11计算delta_i
delta_i = (delta_k * og * (1 - tanh_c * tanh_c) * ct * self.gate_activator.backward(ig))
# 根据式12计算delta_ct
delta_ct = (delta_k * og * (1 - tanh_c * tanh_c) * ig * self.output_activator.backward(ct))
# 根据式8计算delta_h_pre
delta_h_pre = (np.dot(delta_o.T,self.woh)+np.dot(delta_i.T,self.wih)
+np.dot(delta_f.T,self.wfh)+np.dot(delta_ct.T,self.wch)).T
# 保存全部delta值
self.delta_h_list[k-1] = delta_h_pre
self.delta_f_list[k] = delta_f
self.delta_i_list[k] = delta_i
self.delta_o_list[k] = delta_o
self.delta_ct_list[k] = delta_ct
# 计算梯度
def calc_gradient(self, x):
# 初始化遗忘门权重梯度矩阵和偏置项
self.wfh_grad,self.wfx_grad,self.bf_gard = (self.init_weight_gradient_mat())
# 初始化输入门权重梯度矩阵和偏置项
self.wih_grad,self.wix_grad,self.bi_grad = (self.init_weight_gradient_mat())
# 初始化输出门权重梯度矩阵和偏置项
self.woh_grad,self.wox_grad,self.bo_grad = (self.init_weight_gradient_mat())
# 初始化单元状态权重梯度矩阵和偏置项
self.wch_grad,self.wcx_grad,self.bc_grad = (self.init_weight_gradient_mat())
# 计算对上一次输出h的权重矩阵
for t in range(self.times,0,-1):
# 计算各个时刻的梯度
(wfh_grad,bf_grad,wih_grad,bi_grad,woh_grad,bo_grad,wch_grad,bc_grad) = (self.calc_gradient_t(t))
# 实际梯度是各个时刻梯度之和
self.wfh_grad += wfh_grad
self.bf_gard += bf_grad
self.wih_grad += wih_grad
self.bi_grad += bi_grad
self.woh_grad += woh_grad
self.bo_grad += bo_grad
self.wch_grad += wch_grad
self.bc_grad += bc_grad
# 计算对本次输入x的权重梯度
xt = x.T
self.wfx_grad = np.dot(self.delta_f_list[-1],xt)
self.wix_grad = np.dot(self.delta_i_list[-1],xt)
self.wox_grad = np.dot(self.delta_o_list[-1],xt)
self.wcx_grad = np.dot(self.delta_ct_list[-1],xt)
def init_weight_gradient_mat(self):
'''
初始化权重矩阵
'''
wh_grad = np.zeros((self.state_width,self.state_width))
wx_grad = np.zeros((self.state_width,self.input_width))
b_grad = np.zeros((self.state_width,1))
return wh_grad,wx_grad,b_grad
def calc_gradient_t(self, t):
'''
计算每个时刻t权重的梯度
'''
h_pre = self.h_list[t-1].T
wfh_grad = np.dot(self.delta_f_list[t],h_pre)
bf_grad = self.delta_f_list[t]
wih_grad = np.dot(self.delta_i_list[t],h_pre)
bi_grad = self.delta_i_list[t]
woh_grad = np.dot(self.delta_o_list[t],h_pre)
bo_grad = self.delta_o_list[t]
wch_grad = np.dot(self.delta_ct_list[t],h_pre)
bc_grad = self.delta_ct_list[t]
return wfh_grad,bf_grad,wih_grad,bi_grad,woh_grad,bo_grad,wch_grad,bc_grad
# 用梯度下降算法来更新权重
def update(self):
self.wfh -= self.learning_rate * self.wfh_grad
self.wfx -= self.learning_rate * self.wfx_grad
self.bf -= self.learning_rate * self.bf_gard
self.wih -= self.learning_rate * self.wih_grad
self.wix -= self.learning_rate * self.wih_grad
self.bi -= self.learning_rate * self.bi_grad
self.woh -= self.learning_rate * self.woh_grad
self.wox -= self.learning_rate * self.wox_grad
self.bo -= self.learning_rate * self.bo_grad
self.wch -= self.learning_rate * self.wch_grad
self.wcx -= self.learning_rate * self.wcx_grad
self.bc -= self.learning_rate * self.bc_grad
# 梯度检查重置内部状态
def reset_state(self):
# 当前时刻初始化为0
self.times = 0
# 各个时刻的单元状态向量c
self.c_list = self.init_state_vec()
# 各个时刻的输出向量h
self.h_list = self.init_state_vec()
# 各个时刻的遗忘门f
self.f_list = self.init_state_vec()
# 各个时刻的输入门i
self.i_list = self.init_state_vec()
# 各个时刻的输出门o
self.o_list = self.init_state_vec()
# 各个时刻的即时状态c~
self.ct_list = self.init_state_vec()
# 梯度检查
def data_set():
x = [np.array([[1],[2],[3]]),np.array([[2],[3],[4]])]
d = np.array([[1],[2]])
return x,d
def gradient_check():
'''
梯度检查
'''
# 设计一个误差函数,取所有节点输出项之和
error_function = lambda o:o.sum()
lstm = LstmLayer(3,2,1e-3)
# 计算forward的值
x,d = data_set()
lstm.forward(x[0])
lstm.forward(x[1])
# 求取sensitivity map
sensitivity_array = np.ones(lstm.h_list[-1].shape,dtype=np.float64)
# 计算梯度
from DL.cnn import IdentityActivator
lstm.backward(x[1],sensitivity_array,IdentityActivator())
# 检查梯度
epsilon = 1e-4
for i in range(lstm.wfh.shape[0]):
for j in range(lstm.wfh.shape[1]):
lstm.wfh[i][j] += epsilon
lstm.reset_state()
lstm.forward(x[0])
lstm.forward(x[1])
err1 = error_function(lstm.h_list[-1])
lstm.wfh[i][j] -= 2 * epsilon
lstm.reset_state()
lstm.forward(x[0])
lstm.forward(x[1])
err2 = error_function(lstm.h_list[-1])
expect_grad = (err1 - err2)/(2 * epsilon)
lstm.wfh[i][j] += epsilon
print('weights(%d,%d):expected - actural%.4e - %.4e'%(i,j,expect_grad,lstm.wfh_grad[i][j]))
return lstm