用python實現LSTM
阿新 • • 發佈:2018-12-28
import numpy as np
# 定義一個LSTM類(input_width是輸入資料的張量大小,state_width是狀態向量的維度,learning_rate是學習率)
class LstmLayer(object):
def __init__(self, input_width, state_width, learning_rate):
self.input_width = input_width
self.state_width = state_width
self.learning_rate = learning_rate
# 門的啟用函式
self.gate_activator = SigmoidActivator()
# 輸出的啟用函式
self.output_activator = TanhActivator()
# 當前時刻初始化為t0
self.times = 0
# 各個時刻的單元狀態向量c
self.c_list = self.init_state_vec()
# 各個時刻的輸出向量h
self.h_list = self.init_state_vec()
# 各個時刻的遺忘門f
self.f_list = self.init_state_vec()
# 各個時刻的輸入門i
self.i_list = self.init_state_vec()
# 各個時刻的輸出門o
self.o_list = self.init_state_vec()
# 各個時刻的即時狀態c~
self.ct_list = self.init_state_vec()
# 遺忘門權重矩陣Wfh, Wfx, 偏置項bf
self.Wfh, self.Wfx, self.bf = (self.init_weight_mat())
# 輸入門權重矩陣Wfh, Wfx, 偏置項bf
self.Wih, self.Wix, self.bi = (self.init_weight_mat())
# 輸出門權重矩陣Wfh, Wfx, 偏置項bf
self.Woh, self.Wox, self.bo = (self.init_weight_mat())
# 單元狀態權重矩陣Wfh, Wfx, 偏置項bf
self.Wch, self.Wcx, self.bc = (self.init_weight_mat())
def init_state_vec(self):
# 初始化儲存狀態的向量為0
state_vec_list = []
state_vec_list.append(np.zeros((self.state_width, 1)))
return state_vec_list
def init_weight_mat(self):
# 初始化權重矩陣
Wh = np.random.uniform(-1e-4, 1e-4, (self.state_width, self.state_width))
Wx = np.random.uniform(-1e-4, 1e-4, (self.state_width, self.input_width))
b = np.zeros((self.state_width, 1))
return Wh, Wx, b
def forward(self, x):
# 根據公式1-式6進行前向計算
self.times += 1
# 遺忘門
fg = self.calc_gate(x, self.Wfx, self.Wfh, self.bf, self.gate_activator)
self.f_list.append(fg)
# 輸入門
ig = self.calc_gate(x, self.Wix, self.Wih, self.bi, self.gate_activator)
self.i_list.append(ig)
# 輸出門
og = self.calc_gate(x, self.Wox, self.Woh, self.bo, self.gate_activator)
self.o_list.append(og)
# 即時狀態
ct = self.calc_gate(x, self.Wcx, self.Wch, self.bc, self.output_activator)
self.ct_list.append(ct)
# 單元狀態
c = fg * self.c_list[self.times - 1] + ig * ct
self.c_list.append(c)
# 輸出
h = og * self.output_activator.forward(c)
self.h_list.append(h)
def calc_gate(self, x, Wx, Wh, b, activator):
# 計算門
h = self.h_list[self.times - 1] # 上次的LSTM輸出
net = np.dot(Wh, h) + np.dot(Wx, x) + b
gate = activator.forward(net)
return gate
def backward(self, x, delta_h, activator):
# 實現LSTM訓練演算法
self.calc_delta(delta_h, activator)
self.calc_gradient(x)
def update(self):
# 按照梯度下降,更新權重
self.Wfh -= self.learning_rate * self.Whf_grad
self.Wfx -= self.learning_rate * self.Whx_grad
self.bf -= self.learning_rate * self.bf_grad
self.Wih -= self.learning_rate * self.Whi_grad
self.Wix -= self.learning_rate * self.Whi_grad
self.bi -= self.learning_rate * self.bi_grad
self.Woh -= self.learning_rate * self.Wof_grad
self.Wox -= self.learning_rate * self.Wox_grad
self.bo -= self.learning_rate * self.bo_grad
self.Wch -= self.learning_rate * self.Wcf_grad
self.Wcx -= self.learning_rate * self.Wcx_grad
self.bc -= self.learning_rate * self.bc_grad
def calc_delta(self, delta_h, activator):
# 初始化各個時刻的誤差項
self.delta_h_list = self.init_delta() # 輸出誤差項
self.delta_o_list = self.init_delta() # 輸出門誤差項
self.delta_i_list = self.init_delta() # 輸入門誤差項
self.delta_f_list = self.init_delta() # 遺忘門誤差項
self.delta_ct_list = self.init_delta() # 即時輸出誤差項
# 儲存從上一層傳遞下來的當前時刻的誤差項
self.delta_h_list[-1] = delta_h
# 迭代計算每個時刻的誤差項
for k in range(self.times, 0, -1):
self.calc_delta_k(k)
def init_delta(self):
#初始化誤差項
delta_list = []
for i in range(self.times + 1):
delta_list.append(np.zeros((self.state_width, 1)))
return delta_list
def calc_delta_k(self, k):
# 根據k時刻的delta_h,計算k時刻的delta_f、delta_i、delta_o、delta_ct,以及k-1時刻的delta_h
# 獲得k時刻前向計算的值
ig = self.i_list[k]
og = self.o_list[k]
fg = self.f_list[k]
ct = self.ct_list[k]
c = self.c_list[k]
c_prev = self.c_list[k - 1]
tanh_c = self.output_activator.forward(c)
delta_k = self.delta_h_list[k]
# 根據式9計算delta_o
delta_o = (delta_k * tanh_c * self.gate_activator.backward(og))
delta_f = (delta_k * og * (1 - tanh_c * tanh_c) * c_prev * self.gate_activator.backward(fg))
delta_i = (delta_k * og * (1 - tanh_c * tanh_c) * ct * self.gate_activator.backward(ig))
delta_ct = (delta_k * og * (1 - tanh_c * tanh_c) * ig * self.output_activator.backward(ct))
delta_h_prev = (np.dot(delta_o.transpose(), self.Woh) + np.dot(delta_i.transpose(), self.Wih) +
np.dot(delta_f.transpose(), self.Wfh) + np.dot(delta_ct.transpose(), self.Wch)).transpose()
# 儲存全部delta值
self.delta_h_list[k - 1] = delta_h_prev
self.delta_f_list[k] = delta_f
self.delta_i_list[k] = delta_i
self.delta_o_list[k] = delta_o
self.delta_ct_list[k] = delta_ct
def calc_gradient(self, x):
# 初始化遺忘門權重梯度矩陣和偏置項
self.Wfh_grad, self.Wfx_grad, self.bf_grad = (self.init_weight_gradient_mat())
# 初始化輸入門權重梯度矩陣和偏置項
self.Wih_grad, self.Wix_grad, self.bi_grad = (self.init_weight_gradient_mat())
# 初始化輸出門權重梯度矩陣和偏置項
self.Woh_grad, self.Wox_grad, self.bo_grad = (self.init_weight_gradient_mat())
# 初始化單元狀態權重梯度矩陣和偏置項
self.Wch_grad, self.Wcx_grad, self.bc_grad = (self.init_weight_gradient_mat())
# 計算對上一次輸出h的權重梯度
for t in range(self.times, 0, -1):
# 計算各個時刻的梯度
(Wfh_grad, bf_grad,
Wih_grad, bi_grad,
Woh_grad, bo_grad,
Wch_grad, bc_grad) = (self.calc_gradient_t(t))
# 實際梯度是各時刻梯度之和
self.Wfh_grad += Wfh_grad
self.bf_grad += bf_grad
self.Wih_grad += Wih_grad
self.bi_grad += bi_grad
self.Woh_grad += Woh_grad
self.bo_grad += bo_grad
self.Wch_grad += Wch_grad
self.bc_grad += bc_grad
# 計算對本次輸入x的權重梯度
xt = x.transpose()
self.Wfx_grad = np.dot(self.delta_f_list[-1], xt)
self.Wix_grad = np.dot(self.delta_i_list[-1], xt)
self.Wox_grad = np.dot(self.delta_o_list[-1], xt)
self.Wcx_grad = np.dot(self.delta_ct_list[-1], xt)
def init_weight_gradient_mat(self):
# 初始化權重矩陣
Wh_grad = np.zeros((self.state_width, self.state_width))
Wx_grad = np.zeros((self.state_width, self.input_width))
b_grad = np.zeros((self.state_width, 1))
return Wh_grad, Wx_grad, b_grad
def calc_gradient_t(self, t):
# 計算每個時刻t權重的梯度
h_prev = self.h_list[t - 1].transpose()
Wfh_grad = np.dot(self.delta_f_list[t], h_prev)
bf_grad = self.delta_f_list[t]
Wih_grad = np.dot(self.delta_i_list[t], h_prev)
bi_grad = self.delta_f_list[t]
Woh_grad = np.dot(self.delta_o_list[t], h_prev)
bo_grad = self.delta_f_list[t]
Wch_grad = np.dot(self.delta_ct_list[t], h_prev)
bc_grad = self.delta_ct_list[t]
return Wfh_grad, bf_grad, Wih_grad, bi_grad, Woh_grad, bo_grad, Wch_grad, bc_grad
def reset_state(self):
# 當前時刻初始化為t0
self.times = 0
# 各個時刻的單元狀態向量c
self.c_list = self.init_state_vec()
# 各個時刻的輸出向量h
self.h_list = self.init_state_vec()
# 各個時刻的遺忘門f
self.f_list = self.init_state_vec()
# 各個時刻的輸入門i
self.i_list = self.init_state_vec()
# 各個時刻的輸出門o
self.o_list = self.init_state_vec()
# 各個時刻的即時狀態c~
self.ct_list = self.init_state_vec()
def data_set():
x = [np.array([[1], [2], [3]]),
np.array([[2], [3], [4]])]
d = np.array([[1], [2]])
return x, d
class IdentityActivator():
def forward(self, weight_input):
return weight_input
def backward(self, weighted_input):
return 1.0
# 啟用函式sigmoid和tanh
class SigmoidActivator(object):
def forward(self, weighted_input):
return 1.0 / (1.0 + np.exp(-weighted_input))
def backward(self, output):
return output * (1 - output)
class TanhActivator(object):
def forward(self, weighted_input):
return 2.0 / (1.0 + np.exp(-2 * weighted_input)) - 1.0
def backward(self, output):
return 1 - output * output
def gradient_check():
#梯度檢查
# 設計一個誤差函式,取所有節點輸出項之和
error_function = lambda o: o.sum()
lstm = LstmLayer(3, 2, 1e-3)
# 計算forward值
x, d = data_set()
lstm.forward(x[0])
lstm.forward(x[1])
# 求取sensitivity map
sensitivity_array = np.ones(lstm.h_list[-1].shape,dtype=np.float64)
# 計算梯度
lstm.backward(x[1], sensitivity_array, IdentityActivator())
# 檢查梯度
epsilon = 10e-4
for i in range(lstm.Wfh.shape[0]):
for j in range(lstm.Wfh.shape[1]):
lstm.Wfh[i, j] += epsilon
lstm.reset_state()
lstm.forward(x[0])
lstm.forward(x[1])
err1 = error_function(lstm.h_list[-1])
lstm.Wfh[i, j] -= 2 * epsilon
lstm.reset_state()
lstm.forward(x[0])
lstm.forward(x[1])
err2 = error_function(lstm.h_list[-1])
expect_grad = (err1 - err2) / (2 * epsilon)
lstm.Wfh[i, j] += epsilon
print('weights(%d,%d): expected - actural %.4e - %.4e' % (
i, j, expect_grad, lstm.Wfh_grad[i, j]))
return lstm
def test():
l = LstmLayer(3, 2, 1e-3)
x, d = data_set()
l.forward(x[0])
l.forward(x[1])
l.backward(x[1], d, IdentityActivator())
return l
gradient_check()