1. 程式人生 > >Tensorflow擼程式碼之1線性迴歸

Tensorflow擼程式碼之1線性迴歸

線性迴歸


參考:地址
# _*_ encoding=utf8 _*_

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

# 初始化
learn_rate = 0.01
train_epochs = 1000
step = 50

# 初始化訓練資料
train_x = np.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
                7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_y = np.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 2.827,3.465,1.65,2.904,2.42,2.94,1.3]) # 17個數 n_samples = train_x.shape[0] # 檢視一下資料點的分佈 plt.scatter(train_x, train_y) plt.show() #定義輸入 X = tf.placeholder(tf.float32, name = "float") Y = tf.placeholder(tf.float32,
name = "float") # 初始化引數 np.random.rand()函式可以返回一個或一組服從“0~1”均勻分佈的隨機樣本值。 # 隨機樣本取值範圍是[0,1) W = tf.Variable(np.random.rand(),name = "weight") b = tf.Variable(np.random.rand(),name = "bias") # 線性模型 點乘 對應位置相乘 pred = tf.add(tf.multiply(X,W),b) # 損失函式 均方誤差函式 cost = tf.reduce_sum(tf.pow(pred-Y,2))/(2*n_samples)
# 梯度下降演算法 去優化cost函式的值 找到最小值 learn_rate下降的步伐 optimizer = tf.train.GradientDescentOptimizer(learn_rate).minimize(cost) # 初始化變數 init = tf.global_variables_initializer() # 開始訓練 with tf.Session() as sess: sess.run(init) # 訓練樣本資料 for epoch in range(train_epochs): for (x,y) in zip(train_x,train_y): # 梯度下降損失函式的最小值 sess.run(optimizer,feed_dict={X:x,Y:y}) # 訓練引數 找到最合適的w b if (epoch+1) % step ==0 : c = sess.run(cost,feed_dict={X:train_x,Y:train_y}) print("epoch:",'%04d' % (epoch+1),"cost=", "{:.9f}".format(c), "W=", sess.run(W), "b=", sess.run(b)) print("optimizaton finished") training_cost = sess.run(cost, feed_dict={X:train_x,Y:train_y}) print("最終的cost值=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n') #現在訓練好了引數w和b 就找到了擬合直線 # 圖 x, y r表示紅色,o表示實心 label 標識 plt.plot(train_x, train_y, 'ro', label='Original data') plt.plot(train_x, sess.run(W) * train_x + sess.run(b), label='訓練集的擬合線') plt.legend() plt.show() # 測試資料 test_x = np.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1]) test_y = np.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03]) print("均分誤差") testing_cost = sess.run( tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_x.shape[0]), feed_dict={X: test_x, Y: test_y}) # 損失函式 print("測試集的損失=", testing_cost) print("均方誤差的絕對值:", abs( training_cost - testing_cost)) plt.plot(test_x, test_y, 'bo', label='Testing data') plt.plot(train_y, sess.run(W) * train_x + sess.run(b), label='測試集的擬合線') plt.legend() plt.show()

結果列印
17
epoch: 0050 cost= 0.115379333 W= 0.3592451 b= 0.012641402
epoch: 0100 cost= 0.110939667 W= 0.35272354 b= 0.05955659
epoch: 0150 cost= 0.107012950 W= 0.34658992 b= 0.1036815
epoch: 0200 cost= 0.103539936 W= 0.34082115 b= 0.14518192
epoch: 0250 cost= 0.100468241 W= 0.3353954 b= 0.18421417
epoch: 0300 cost= 0.097751491 W= 0.3302924 b= 0.22092499
epoch: 0350 cost= 0.095348708 W= 0.32549286 b= 0.25545272
epoch: 0400 cost= 0.093223654 W= 0.32097882 b= 0.28792647
epoch: 0450 cost= 0.091344208 W= 0.31673303 b= 0.31846946
epoch: 0500 cost= 0.089682057 W= 0.3127401 b= 0.34719515
epoch: 0550 cost= 0.088212043 W= 0.30898446 b= 0.37421298
epoch: 0600 cost= 0.086911999 W= 0.3054522 b= 0.39962384
epoch: 0650 cost= 0.085762337 W= 0.30213004 b= 0.42352298
epoch: 0700 cost= 0.084745593 W= 0.29900542 b= 0.44600105
epoch: 0750 cost= 0.083846480 W= 0.2960666 b= 0.46714252
epoch: 0800 cost= 0.083051346 W= 0.29330263 b= 0.48702663
epoch: 0850 cost= 0.082348242 W= 0.29070315 b= 0.5057272
epoch: 0900 cost= 0.081726506 W= 0.2882582 b= 0.52331626
epoch: 0950 cost= 0.081176721 W= 0.28595862 b= 0.5398589
epoch: 1000 cost= 0.080690555 W= 0.28379577 b= 0.55541867
optimizaton finished
Training cost= 0.080690555 W= 0.28379577 b= 0.55541867

Testing… (Mean square loss Comparison)
Testing cost= 0.07644593
Absolute mean square loss difference: 0.0042446256

Process finished with exit code 0