1. 程式人生 > 其它 >Tensorflow(1.15.0) 識別圖中模糊的手寫數字

Tensorflow(1.15.0) 識別圖中模糊的手寫數字

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 獲取MNIST資料
# tf.disable_v2_behavior()
mnist = input_data.read_data_sets("./MNIST_data", one_hot=True)
tf.compat.v1.disable_eager_execution()
tf.compat.v1.reset_default_graph()

# 構建模型

x = tf.compat.v1.placeholder(
tf.float32, [None, 784]) # mnist data維度 28*28=784 y = tf.compat.v1.placeholder(tf.float32, [None, 10]) # 0-9 數字=> 10 classes
Extracting ./MNIST_data\train-images-idx3-ubyte.gz
Extracting ./MNIST_data\train-labels-idx1-ubyte.gz
Extracting ./MNIST_data\t10k-images-idx3-ubyte.gz
Extracting ./MNIST_data\t10k-labels-idx1-ubyte.gz
print ('輸入資料:',mnist.train.images)
print ('輸入資料的shape:',mnist.train.images.shape)
輸入資料: [[0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 ...
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]]
輸入資料打shape: (55000, 784)
# 定義權重和偏置引數
W = tf.compat.v1.Variable(tf.compat.v1.random_normal([784
,10])) b = tf.compat.v1.Variable(tf.compat.v1.zeros([10]))
# 前向傳播
# Minist識別是一個多分類問題,採用的前向傳播使用softmax函式
pred = tf.compat.v1.nn.softmax(tf.compat.v1.matmul(x,W)+b)



# 定義反向傳播結構(從定義損失函式開始)


# 定義損失函式(交叉損失函式)

cost= tf.reduce_mean(tf.compat.v1.reduce_sum(-y*tf.compat.v1.log(pred),1))

# 學習率設定
learning_rate = 0.01

# 定義優化器(使用梯度下降)
# 在訓練過程中,先例項化一個優化函式GradientDescentOptimizer,並基於一定的學習率進行梯度優化訓練:
# optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
# 接著使用一個minimize()的操作,裡面傳入損失值loss,再啟動一個外層迴圈,優化器就會按照迴圈的次數一次次沿著loss值最小的方向優化引數了
# 整個過程的求導和反向傳播操作,都在優化器裡自動完成了
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# 訓練引數設定

training_epochs = 25
batch_size = 100
display_step = 1  # 表示每訓練一次(每一個epoch)就把中間狀態顯示出來


# 模型儲存
saver = tf.compat.v1.train.Saver()
model_path = "log/521model/521model.cpkt"        
# 啟動Session

with tf.compat.v1.Session() as sess:
    sess.run(tf.compat.v1.global_variables_initializer())
    for epoch in range(training_epochs):
        # 定義平均損失函式
        avg_loss = 0.
        total_batch = int(mnist.train.num_examples/batch_size)  #550個batch
        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            # Run optimization op (backprop) and cost op (to get loss value)
            _,batch_cost=sess.run([optimizer,cost],feed_dict={x:batch_xs,y:batch_ys})
            # Compute average loss
            avg_loss += batch_cost / total_batch
#       顯示訓練中的詳細資訊
        if (epoch+1) % display_step == 0:
            print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_loss))

    print( " Finished!")       
    # 測試模型
    correct_prediction = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
    # 計算準確率
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print('測試,Accuracy=',sess.run(accuracy,feed_dict={x:mnist.test.images, y:mnist.test.labels}))
    # 儲存模型到磁碟
    save_path = saver.save(sess, model_path)
    print("Model saved in file: %s" % save_path)
Epoch: 0001 cost= 8.707644066
Epoch: 0002 cost= 4.318993017
Epoch: 0003 cost= 2.987426304
Epoch: 0004 cost= 2.345541562
Epoch: 0005 cost= 1.975136930
Epoch: 0006 cost= 1.735651165
Epoch: 0007 cost= 1.568852337
Epoch: 0008 cost= 1.445016423
Epoch: 0009 cost= 1.349485886
Epoch: 0010 cost= 1.272925699
Epoch: 0011 cost= 1.210046346
Epoch: 0012 cost= 1.157291133
Epoch: 0013 cost= 1.112357476
Epoch: 0014 cost= 1.073480450
Epoch: 0015 cost= 1.039473375
Epoch: 0016 cost= 1.009348478
Epoch: 0017 cost= 0.982443332
Epoch: 0018 cost= 0.958243066
Epoch: 0019 cost= 0.936229289
Epoch: 0020 cost= 0.916171892
Epoch: 0021 cost= 0.897779908
Epoch: 0022 cost= 0.880702540
Epoch: 0023 cost= 0.864933019
Epoch: 0024 cost= 0.850226059
Epoch: 0025 cost= 0.836456704
 Finished!
測試,Accuracy= 0.8312
Model saved in file: log/521model/521model.cpkt
mnist.test.images
array([[0., 0., 0., ..., 0., 0., 0.],
       [0., 0., 0., ..., 0., 0., 0.],
       [0., 0., 0., ..., 0., 0., 0.],
       ...,
       [0., 0., 0., ..., 0., 0., 0.],
       [0., 0., 0., ..., 0., 0., 0.],
       [0., 0., 0., ..., 0., 0., 0.]], dtype=float32)
mnist.test.labels
array([[0., 0., 0., ..., 1., 0., 0.],
       [0., 0., 1., ..., 0., 0., 0.],
       [0., 1., 0., ..., 0., 0., 0.],
       ...,
       [0., 0., 0., ..., 0., 0., 0.],
       [0., 0., 0., ..., 0., 0., 0.],
       [0., 0., 0., ..., 0., 0., 0.]])
import pylab 
# 讀取模型
print("Starting 2nd session...")
with tf.compat.v1.Session() as sess2:
    # 初始化變數
    sess2.run(tf.compat.v1.global_variables_initializer())
    # 恢復模型變數
    saver.restore(sess2,model_path)
    # 測試模型
    correct_prediction = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
    # 計算準確率
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print('測試,Accuracy=',sess2.run(accuracy,feed_dict={x:mnist.test.images, y:mnist.test.labels}))
    output = tf.argmax(pred,1)
    batch_xs,batch_ys = mnist.train.next_batch(2)  # 拿到訓練集train的下兩張圖片,在每次呼叫該函式時選擇每個隨後的batch_size張影象
    outputval,predv = sess2.run([output,pred], feed_dict={x: batch_xs})
    print(outputval,predv,batch_ys)
    
    # 列印顯示要預測的圖片
    im = batch_xs[0]
    im = im.reshape(-1,28)
    pylab.imshow(im)
    pylab.show()
    
    im = batch_xs[1]
    im = im.reshape(-1,28)
    pylab.imshow(im)
    pylab.show()    
Starting 2nd session...
INFO:tensorflow:Restoring parameters from log/521model/521model.cpkt
測試,Accuracy= 0.8312
[1 0] [[7.0687776e-09 9.9622285e-01 7.3762261e-04 2.0796645e-03 9.3633993e-08
  5.9373881e-04 1.5098205e-06 1.4338111e-04 1.1239976e-04 1.0865904e-04]
 [1.0000000e+00 2.7836682e-22 1.0759993e-15 3.9601209e-14 5.1557825e-20
  2.7401588e-09 1.8195964e-13 3.6589468e-17 6.1134836e-16 6.5753848e-19]] [[0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
 [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]

在這裡插入圖片描述

在這裡插入圖片描述