1. 程式人生 > >RNN(二)——基於tensorflow的LSTM的實現

RNN(二)——基於tensorflow的LSTM的實現

lstm的前向結構,不迭代

最基本的lstm結構。不涉及損失值和bp過程

import tensorflow as tf
import numpy as np

inputs = tf.placeholder(np.float32, shape=(32,40,5)) # 32 是 batch_size
lstm_cell_1 = tf.nn.rnn_cell.LSTMCell(num_units=128)   #例項話一個lstm單元,輸出是128單元

print("output_size:",lstm_cell_1.output_size)
print("state_size:",lstm_cell_1.state_size)
print(lstm_cell_1.state_size.h)
print(lstm_cell_1.state_size.c)

output,state=tf.nn.dynamic_rnn(
    cell=lstm_cell_1,
    inputs=inputs,
    dtype=tf.float32
)
# 根據inputs輸入的維度迭代rnn,並將輸出和隱層態,push進output和state裡面。
(inputs是三個維度,第一維,是batch_size,第二維:資料切片為面,第三維:切片面的具體資料)

print("第一個輸入的最後一個序列的預測輸出:",output[1,-1,:])
print("output.shape:",output.shape)
print("len of state tuple",len(state))
print("state.h.shape:",state.h.shape)
print("state.c.shape:",state.c.shape)


#>>>
output_size: 128
state_size: LSTMStateTuple(c=128, h=128)
128
128
第一個輸入的最後一個序列的預測輸出: Tensor("strided_slice:0", shape=(128,), dtype=float32)
output.shape: (32, 40, 128)
len of state tuple 2
state.h.shape: (32, 128)
state.c.shape: (32, 128)

用lstm對mnist資料分類

#引包和載入mnist資料

import tensorflow as tf 
import input_data
import numpy as np
import matplotlib.pyplot as plt 

mnist = input_data.read_data_sets("data/", one_hot=True)
trainimgs, trainlabels, testimgs, testlabels \
 = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels 
ntrain, ntest, dim, nclasses \
 = trainimgs.shape[0], testimgs.shape[0], trainimgs.shape[1], trainlabels.shape[1]
print ("MNIST loaded")
diminput=28
dimhidden=128
dimoutput=nclasses
nsteps=28
weights={
    'hidden':tf.Variable(tf.random_normal([diminput,dimhidden])),
    'out':tf.Variable(tf.random_normal([dimhidden,dimoutput]))
}
biases={
    'hidden':tf.Variable(tf.random_normal([dimhidden])),
    'out':tf.Variable(tf.random_normal([dimoutput]))
}
def RNN(X,W,B,nsteps,name):
    print(X.shape,'---')
    X=tf.reshape(X,[-1,diminput])
    X = tf.matmul(X, W['hidden']) + B['hidden']
    X=tf.reshape(X,[-1,diminput,dimhidden])
    print(X.shape)
    with tf.variable_scope(name) as scope:
        #scope.reuse_variables()
        lstm_cell=tf.nn.rnn_cell.BasicLSTMCell(dimhidden,forget_bias=1.0)
        lstm_o,lstm_s=tf.nn.dynamic_rnn(cell=lstm_cell,inputs=X,dtype=tf.float32)
    resultOut=tf.matmul(lstm_o[:,-1,:],W['out'])+B['out']
    return {
        'X':X,
        'lstm_o':lstm_o,'lstm_s':lstm_s,'resultOut':resultOut
    }
learning_rate=0.001
x=tf.placeholder('float',[None,nsteps,diminput])

y=tf.placeholder('float',[None,dimoutput])

myrnn=RNN(x,weights,biases,nsteps,'basic')
pred=myrnn['resultOut']
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels=y))
optm=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
accr=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred,1),tf.argmax(y,1)),tf.float32))
init=tf.global_variables_initializer()
training_epochs=33
batch_size=16
display_step=1
sess=tf.Session()
sess.run(init)

for epoch in range(training_epochs):
    avg_cost=100
    total_batch=100
    for i in range(total_batch):
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        batch_xs = batch_xs.reshape((batch_size, nsteps, diminput))
        feeds = {x: batch_xs, y: batch_ys}
        sess.run(optm, feed_dict=feeds)
        # Compute average loss
        avg_cost += sess.run(cost, feed_dict=feeds)/total_batch
    if epoch % display_step == 0: 
        print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
        feeds = {x: batch_xs, y: batch_ys}
        train_acc = sess.run(accr, feed_dict=feeds)
        print (" Training accuracy: %.3f" % (train_acc))
        testimgs = testimgs.reshape((ntest, nsteps, diminput))
        feeds = {x: testimgs, y: testlabels}
        test_acc = sess.run(accr, feed_dict=feeds)
        print (" Test accuracy: %.3f" % (test_acc))
Epoch: 000/033 cost: 101.797383542
 Training accuracy: 0.688
 Test accuracy: 0.461
Epoch: 001/033 cost: 101.269138204
 Training accuracy: 0.438
 Test accuracy: 0.549
Epoch: 002/033 cost: 101.139203327
 Training accuracy: 0.688
 Test accuracy: 0.614
Epoch: 003/033 cost: 100.965362185
 Training accuracy: 0.938
 Test accuracy: 0.619
Epoch: 004/033 cost: 100.914383653
 Training accuracy: 0.875
 Test accuracy: 0.648
Epoch: 005/033 cost: 100.813317066
 Training accuracy: 0.625
 Test accuracy: 0.656
Epoch: 006/033 cost: 100.781623098
 Training accuracy: 0.875
 Test accuracy: 0.708
Epoch: 007/033 cost: 100.710710035
 Training accuracy: 1.000
 Test accuracy: 0.716
Epoch: 008/033 cost: 100.684573339
 Training accuracy: 1.000
 Test accuracy: 0.745
Epoch: 009/033 cost: 100.635698693
 Training accuracy: 0.875
 Test accuracy: 0.751
Epoch: 010/033 cost: 100.622099145
 Training accuracy: 0.938
 Test accuracy: 0.763
Epoch: 011/033 cost: 100.562925613
 Training accuracy: 0.750
 Test accuracy: 0.763
Epoch: 012/033 cost: 100.592214927
 Training accuracy: 0.812
 Test accuracy: 0.771
Epoch: 013/033 cost: 100.544024273
 Training accuracy: 0.938
 Test accuracy: 0.769
Epoch: 014/033 cost: 100.516522627
 Training accuracy: 0.875
 Test accuracy: 0.791
Epoch: 015/033 cost: 100.479632292
 Training accuracy: 0.938
 Test accuracy: 0.801
Epoch: 016/033 cost: 100.471150137
 Training accuracy: 0.938
 Test accuracy: 0.816
Epoch: 017/033 cost: 100.431061392
 Training accuracy: 0.875
 Test accuracy: 0.807
Epoch: 018/033 cost: 100.464853102
 Training accuracy: 0.812
 Test accuracy: 0.798
Epoch: 019/033 cost: 100.445183915
 Training accuracy: 0.750
 Test accuracy: 0.828
Epoch: 020/033 cost: 100.399013084
 Training accuracy: 1.000
 Test accuracy: 0.804
Epoch: 021/033 cost: 100.393008129
 Training accuracy: 0.938
 Test accuracy: 0.833
Epoch: 022/033 cost: 100.413909222
 Training accuracy: 0.812
 Test accuracy: 0.815