1. 程式人生 > >tensorflow批量訓練

tensorflow批量訓練

學習了一下tensorflow傳入批量資料並且訓練的方法。

程式碼如下:

mport matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
sess = tf.Session()
batch_size = 20
x_vals = np.random.normal(1,0.1,100)
y_vals = np.repeat(10.,100)
x_data = tf.placeholder(shape=[None,1],dtype=tf.float32)
y_target = tf.placeholder(shape=[None,1],dtype=tf.float32)
A = tf.Variable(tf.random_normal(shape=[1,1]))
my_output = tf.matmul(x_data,A)
loss = tf.reduce_mean(tf.square(my_output - y_target))
my_opt = tf.train.GradientDescentOptimizer(0.02)
train_step = my_opt.minimize(loss)
init = tf.global_variables_initializer() #!!!A必須得初始化!!!
sess.run(init)
loss_batch = []
loss_stochastic = []
for i in range(100):
    rand_index1 = np.random.choice(100,size=batch_size)
    rand_x1 = np.transpose([x_vals[rand_index1]])
    rand_y1 = np.transpose([y_vals[rand_index1]])
    sess.run(train_step, feed_dict={x_data: rand_x1, y_target: rand_y1})
    if((i+1)%5 == 0):
        print('Step1# '+str(i+1)+' A1 = '+str(sess.run(A)))
        temp_loss = sess.run(loss,feed_dict={x_data:rand_x1,y_target:rand_y1})
        print('Loss1 = '+str(temp_loss))
        loss_batch.append(temp_loss)
    rand_index2 = np.random.choice(100)
    rand_x2 = [[x_vals[rand_index2]]]
    rand_y2 = [[y_vals[rand_index2]]]
    sess.run(train_step, feed_dict={x_data: rand_x2, y_target: rand_y2})
    if ((i + 1) % 5 == 0):
        print('Normal_step2#' + str(i + 1) + ' A2 = ' + str(sess.run(A)))
        temp_loss = sess.run(loss, feed_dict={x_data: rand_x2, y_target: rand_y2})
        print('Normal_loss2 = ' + str(temp_loss))
        loss_stochastic.append(temp_loss)
plt.plot(range(0,100,5),loss_stochastic,'b-',label='Stomastic Loss')
plt.plot(range(0,100,5),loss_batch,'r--',label='Batch Loss(size=20)')
plt.legend() #將label顯示在上面
plt.show()
視覺化結果: