1. 程式人生 > 實用技巧 >Mnist手寫數字識別 Tensorflow

Mnist手寫數字識別 Tensorflow

Mnist手寫數字識別 Tensorflow

任務目標

  • 瞭解mnist資料集
  • 搭建和測試模型

編輯環境

作業系統:Win10
python版本:3.6
整合開發環境:pycharm
tensorflow版本:1.*


瞭解mnist資料集

mnist資料集:mnist資料集下載地址
  MNIST 資料集來自美國國家標準與技術研究所, National Institute of Standards and Technology (NIST). 訓練集 (training set) 由來自 250 個不同人手寫的數字構成, 其中 50% 是高中學生, 50% 來自人口普查局 (the Census Bureau) 的工作人員. 測試集(test set) 也是同樣比例的手寫數字資料.
  圖片是以位元組的形式進行儲存, 我們需要把它們讀取到 NumPy array 中, 以便訓練和測試演算法。
讀取mnist資料集

mnist = input_data.read_data_sets("mnist_data", one_hot=True)

搭建和測試模型

程式碼

def getMnistModel(savemodel,is_train):
    """
    :param savemodel:  模型儲存路徑
    :param is_train: true為訓練,false為測試模型
    :return:None
    """
    mnist = input_data.read_data_sets("mnist_data", one_hot=True)
    with tf.variable_scope("data"):
        x = tf.placeholder(tf.float32,shape=[None,784]) # 784=28*28*1 寬長為28,單通道圖片
        y_true = tf.placeholder(tf.int32,shape=[None,10]) # 10個類別

    with tf.variable_scope("conv1"):
        w_conv1 = tf.Variable(tf.random_normal([10,10,1,32])) # 10*10的卷積核  1個通道的輸入影象  32個不同的卷積核,得到32個特徵圖
        b_conv1 = tf.Variable(tf.constant(0.0,shape=[32]))
        x_reshape = tf.reshape(x,[-1,28,28,1]) # n張 28*28 的單通道圖片
        conv1 = tf.nn.relu(tf.nn.conv2d(x_reshape,w_conv1,strides=[1,1,1,1],padding="SAME")+b_conv1) #[1, 1, 1, 1] 中間2個1,卷積每次滑動的步長 padding='SAME' 邊緣自動補充
        pool1 = tf.nn.max_pool(conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME") # 池化視窗為[1,2,2,1] 中間2個2,池化視窗每次滑動的步長  padding="SAME" 考慮邊界,如果不夠用 用0填充
    with tf.variable_scope("conv2"):
        w_conv2 = tf.Variable(tf.random_normal([10,10,32,64]))
        b_conv2 = tf.Variable(tf.constant(0.0,shape=[64]))
        conv2 = tf.nn.relu(tf.nn.conv2d(pool1,w_conv2,strides=[1,1,1,1],padding="SAME")+b_conv2)
        pool2 = tf.nn.max_pool(conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")
    with tf.variable_scope("fc"):
        w_fc = tf.Variable(tf.random_normal([7*7*64,10])) # 經過兩次卷積和池化 28 * 28/(2+2) = 7 * 7
        b_fc = tf.Variable(tf.constant(0.0,shape=[10]))
        xfc_reshape = tf.reshape(pool2,[-1,7*7*64])
        y_predict = tf.matmul(xfc_reshape,w_fc)+b_fc
    with tf.variable_scope("loss"):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,logits=y_predict))
    with tf.variable_scope("optimizer"):
        train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
    with tf.variable_scope("acc"):
        equal_list = tf.equal(tf.arg_max(y_true,1),tf.arg_max(y_predict,1))
        accuracy = tf.reduce_mean(tf.cast(equal_list,tf.float32))

    # tensorboard
    # tf.summary.histogram用來顯示直方圖資訊
    # tf.summary.scalar用來顯示標量資訊
    # Summary:所有需要在TensorBoard上展示的統計結果
    tf.summary.histogram("weight",w_fc)
    tf.summary.histogram("bias",b_fc)
    tf.summary.scalar("loss",loss)
    tf.summary.scalar("acc",accuracy)
    merged = tf.summary.merge_all()
    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init)
        filewriter = tf.summary.FileWriter("tfboard",graph=sess.graph)
        if is_train:
            for i in range(500):
                x_train,y_train = mnist.train.next_batch(100)
                sess.run(train_op,feed_dict={x:x_train,y_true:y_train})
                summary = sess.run(merged,feed_dict={x:x_train,y_true:y_train})
                filewriter.add_summary(summary,i)
                print("第%d訓練,準確率為%f"%(i+1,sess.run(accuracy,feed_dict={x:x_train,y_true:y_train})))
            saver.save(sess,savemodel)
        else:
            count = 0.0
            epochs = 100
            saver.restore(sess, savemodel)
            for i in range(epochs):
                x_test, y_test = mnist.train.next_batch(1)
                print("第%d張圖片,真實值為:%d預測值為:%d" % (i + 1,
                                                 tf.argmax(sess.run(y_true, feed_dict={x: x_test, y_true: y_test}),
                                                           1).eval(),
                                                 tf.argmax(
                                                     sess.run(y_predict, feed_dict={x: x_test, y_true: y_test}),
                                                     1).eval()
                                                 ))
                if (tf.argmax(sess.run(y_true, feed_dict={x: x_test, y_true: y_test}), 1).eval() == tf.argmax(
                        sess.run(y_predict, feed_dict={x: x_test, y_true: y_test}), 1).eval()):
                    count = count + 1
            print("正確率為 %.2f " % float(count * 100 / epochs) + "%")

檢視Tensorboard

   TensorBoard是TensorFlow下的一個視覺化的工具,能夠幫助我們在訓練大規模神經網路過程中出現的複雜且不好理解的運算。TensorBoard能展示你訓練過程中繪製的影象、網路結構等。

  • 複製你訓練後log檔案儲存的位置
  • 開啟終端
    輸入命令 tensorboard --logdir=D:\GongCheng\mnist\tfboard
    tensorboard --logdir= + 資料夾的路徑
  • 用瀏覽器開啟上述地址

完整程式碼

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

def getMnistModel(savemodel,is_train):
    """
    :param savemodel:  模型儲存路徑
    :param is_train: True為訓練,False為測試模型
    :return:None
    """
    mnist = input_data.read_data_sets("mnist_data", one_hot=True)
    with tf.variable_scope("data"):
        x = tf.placeholder(tf.float32,shape=[None,784]) # 784=28*28*1 寬長為28,單通道圖片
        y_true = tf.placeholder(tf.int32,shape=[None,10]) # 10個類別

    with tf.variable_scope("conv1"):
        w_conv1 = tf.Variable(tf.random_normal([10,10,1,32])) # 10*10的卷積核  1個通道的輸入影象  32個不同的卷積核,得到32個特徵圖
        b_conv1 = tf.Variable(tf.constant(0.0,shape=[32]))
        x_reshape = tf.reshape(x,[-1,28,28,1]) # n張 28*28 的單通道圖片
        conv1 = tf.nn.relu(tf.nn.conv2d(x_reshape,w_conv1,strides=[1,1,1,1],padding="SAME")+b_conv1) #[1, 1, 1, 1] 中間2個1,卷積每次滑動的步長 padding='SAME' 邊緣自動補充
        pool1 = tf.nn.max_pool(conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME") # 池化視窗為[1,2,2,1] 中間2個2,池化視窗每次滑動的步長  padding="SAME" 考慮邊界,如果不夠用 用0填充
    with tf.variable_scope("conv2"):
        w_conv2 = tf.Variable(tf.random_normal([10,10,32,64]))
        b_conv2 = tf.Variable(tf.constant(0.0,shape=[64]))
        conv2 = tf.nn.relu(tf.nn.conv2d(pool1,w_conv2,strides=[1,1,1,1],padding="SAME")+b_conv2)
        pool2 = tf.nn.max_pool(conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")
    with tf.variable_scope("fc"):
        w_fc = tf.Variable(tf.random_normal([7*7*64,10])) # 經過兩次卷積和池化 28 * 28/(2+2) = 7 * 7
        b_fc = tf.Variable(tf.constant(0.0,shape=[10]))
        xfc_reshape = tf.reshape(pool2,[-1,7*7*64])
        y_predict = tf.matmul(xfc_reshape,w_fc)+b_fc
    with tf.variable_scope("loss"):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,logits=y_predict))
    with tf.variable_scope("optimizer"):
        train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
    with tf.variable_scope("acc"):
        equal_list = tf.equal(tf.arg_max(y_true,1),tf.arg_max(y_predict,1))
        accuracy = tf.reduce_mean(tf.cast(equal_list,tf.float32))

    # tensorboard
    # tf.summary.histogram用來顯示直方圖資訊
    # tf.summary.scalar用來顯示標量資訊
    # Summary:所有需要在TensorBoard上展示的統計結果
    tf.summary.histogram("weight",w_fc)
    tf.summary.histogram("bias",b_fc)
    tf.summary.scalar("loss",loss)
    tf.summary.scalar("acc",accuracy)
    merged = tf.summary.merge_all()
    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init)
        filewriter = tf.summary.FileWriter("tfboard",graph=sess.graph)
        if is_train:
            for i in range(500):
                x_train,y_train = mnist.train.next_batch(100)
                sess.run(train_op,feed_dict={x:x_train,y_true:y_train})
                summary = sess.run(merged,feed_dict={x:x_train,y_true:y_train})
                filewriter.add_summary(summary,i)
                print("第%d訓練,準確率為%f"%(i+1,sess.run(accuracy,feed_dict={x:x_train,y_true:y_train})))
            saver.save(sess,savemodel)
        else:
            count = 0.0
            epochs = 100
            saver.restore(sess, savemodel)
            for i in range(epochs):
                x_test, y_test = mnist.train.next_batch(1)
                print("第%d張圖片,真實值為:%d預測值為:%d" % (i + 1,
                                                 tf.argmax(sess.run(y_true, feed_dict={x: x_test, y_true: y_test}),
                                                           1).eval(),
                                                 tf.argmax(
                                                     sess.run(y_predict, feed_dict={x: x_test, y_true: y_test}),
                                                     1).eval()
                                                 ))
                if (tf.argmax(sess.run(y_true, feed_dict={x: x_test, y_true: y_test}), 1).eval() == tf.argmax(
                        sess.run(y_predict, feed_dict={x: x_test, y_true: y_test}), 1).eval()):
                    count = count + 1
            print("正確率為 %.2f " % float(count * 100 / epochs) + "%")
if __name__ == '__main__':
    modelPath = "model/mnist_model"
    getMnistModel(modelPath,True)