(Tensorflow之三)神經網路程式一
阿新 • • 發佈:2019-01-10
import tensorflow as tf from numpy.random import RandomState #輸入層(設有3個輸入變數1x3) rdm = RandomState(1) IN = rdm.rand(10000,3) #隨機生層10個3維陣列,取值範圍在0~1之間; x =tf.placeholder(tf.float32,shape=(None,3),name='x_input') #設定輸入層到隱含層隨機權重(設隱含層有3個神經元3x3) in_to_hd_w1 = tf.Variable(tf.random_normal([3,3], stddev=1,seed=1)) #隱含層3個神經元的值(1x3) hd_layer = tf.matmul(x,in_to_hd_w1) #隱含層到輸出層隨機權重(3x1) hd_to_out_w2 = tf.Variable(tf.random_normal([3,1], stddev=1,seed=1)) #訓練輸出值,輸出值Y非零即1 OUT = [[int(x1+x2+x3<1)] for (x1,x2,x3) in IN] y = tf.placeholder(tf.float32,shape=(None,1),name='y_output') #僅有一個輸出值(1x1) yy = tf.matmul(hd_layer,hd_to_out_w2) sess = tf.Session() #注:初始話全部函式 sess.run(tf.global_variables_initializer()); #訓練 #損失函式 learning_rate = 0.001 data_size = 10 loss = -tf.reduce_mean(y*tf.log(tf.clip_by_value(yy,1e-10,1.0))) training = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss); #設定訓練的輪數 time_s = 1000 #設定每次訓練的個數 batch = 10 for i in range(0,10000,1): sess.run(training,feed_dict={x:IN[i:(i+1)*data_size],y:OUT[i:(i+1)*data_size]}) if(i%1000==0): print("loss is:") print(sess.run(loss,feed_dict={x:IN,y:OUT})) #print("in value is:") #print(IN[i]) #print("out value is:") #print(OUT[i]) #print("predict out is:") #print(sess.run(yy,feed_dict={x:IN[i:i+1]}))
執行結果:
loss is: 1.44677 loss is: 1.02309 loss is: 0.737368 loss is: 0.49516 loss is: 0.037638 loss is: 0.0149277 loss is: 0.0121234 loss is: 0.010189 loss is: 0.00865641 loss is: 0.00752897可以看到:損失值越來越小,學習趨於收斂;