1. 程式人生 > 其它 >【北京大學】9 TensorFlow1.x的實現自定義Mnist資料集

【北京大學】9 TensorFlow1.x的實現自定義Mnist資料集

技術標籤:機器學習Pythonpythontensorflowmnist資料集

目錄

1 實現把任意圖片放進訓練好的網路進行測試

輸入的圖片是白底黑字的數字圖片進行測試,測試前需要做兩步
(1)轉換圖片矩陣大小為28*28符合網路的輸入
(2)把圖片的轉換成白字黑底的黑白圖片

mnist_app.
py import tensorflow as tf import numpy as np from PIL import Image import mnist_backward import mnist_forward def restore_model(testPicArr): # 利用tf.Graph()復現之前定義的計算圖 with tf.Graph().as_default() as tg: x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE]) # 呼叫mnist_forward檔案中的前向傳播過程forword()函式
y = mnist_forward.forward(x, None) # 得到概率最大的預測值 preValue = tf.argmax(y, 1) # 例項化具有滑動平均的saver物件 variable_averages = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver =
tf.train.Saver(variables_to_restore) with tf.Session() as sess: # 通過ckpt獲取最新儲存的模型 ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) preValue = sess.run(preValue, feed_dict={x: testPicArr}) return preValue else: print("No checkpoint file found") return -1 # 預處理,包括resize,轉變灰度圖,二值化 def pre_pic(picName): img = Image.open(picName) reIm = img.resize((28, 28), Image.ANTIALIAS) #把圖片轉換為灰度值圖片 im_arr = np.array(reIm.convert('L')) # 對圖片做二值化處理(這樣以濾掉噪聲,另外除錯中可適當調節閾值) threshold = 50 # 模型的要求是黑底白字,但輸入的圖是白底黑字,所以需要對每個畫素點的值改為255減去原值以得到互補的反色。 for i in range(28): for j in range(28): im_arr[i][j] = 255 - im_arr[i][j] if (im_arr[i][j] < threshold): im_arr[i][j] = 0 else: im_arr[i][j] = 255 # 把圖片形狀拉成1行784列,並把值變為浮點型(因為要求畫素點是0-1 之間的浮點數) nm_arr = im_arr.reshape([1, 784]) nm_arr = nm_arr.astype(np.float32) # 接著讓現有的RGB圖從0-255之間的數變為0-1之間的浮點數 img_ready = np.multiply(nm_arr, 1.0 / 255.0) return img_ready def application(): # 輸入要識別的幾張圖片 testNum = int(input("input the number of test pictures:")) for i in range(testNum): # 給出待識別圖片的路徑和名稱 testPic = input("the path of test picture:") # 圖片預處理 testPicArr = pre_pic(testPic) # 獲取預測結果 preValue = restore_model(testPicArr) print("The prediction number is:", preValue) def main(): application() if __name__ == '__main__': main()

2 實現製作資料

2.1 簡介

(1)資料集可以生成二進位制的tfrecords檔案。先將圖片和標籤製作成該格式的檔案,使用tfrecords進行資料讀取,會提高記憶體利用率。
(2)用tf.train.Example的協議儲存訓練情況,訓練資料的特徵用鍵值對的形式表示。
(3)用SerializeToString()把資料序列化為字串儲存。

2.2 生成tfrecords檔案

writer = tf.python_io.TFRecordWriter(tfRecordName)
        # 把每張圖片和標籤封裝到example中
        example = tf.train.Example(features=tf.train.Features(feature={
            'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),# img_raw放入原始圖片
            'label': tf.train.Feature(int64_list=tf.train.Int64List(value=labels))# labels是圖片的標籤
        }))
        # 把example進行序列化
        writer.write(example.SerializeToString())
    # 關閉writer
    writer.close()

2.3 解析tfrecords檔案

# 該函式會生成一個先入先出的佇列,檔案閱讀器會使用它來讀取資料
    filename_queue = tf.train.string_input_producer([tfRecord_path], shuffle=True)
    # 新建一個reader
    reader = tf.TFRecordReader()
    # 把讀出的每個樣本儲存在serialized_example中進行解序列化,標籤和圖片的鍵名應該和製作tfrecords的鍵名相同,其中標籤給出幾分類。
    _, serialized_example = reader.read(filename_queue)
    # 將tf.train.Example協議記憶體塊(protocol buffer)解析為張量
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           'label': tf.FixedLenFeature([10], tf.int64),
                                           'img_raw': tf.FixedLenFeature([], tf.string)
                                       })
    # 將img_raw字串轉換為8位無符號整型
    img = tf.decode_raw(features['img_raw'], tf.uint8)
    # 將形狀變為一行784列
    img.set_shape([784])
    img = tf.cast(img, tf.float32) * (1. / 255)
    # 變成0到1之間的浮點數
    label = tf.cast(features['label'], tf.float32)

2.4 生成自定義資料的完整程式碼

讀取的檔案格式是。圖片檔名+空格+標籤
在這裡插入圖片描述

mnist_generateds.py檔案

#mnist_generateds.py
# coding:utf-8
import tensorflow as tf
import numpy as np
from PIL import Image
import os
image_train_path = './mnist_data_jpg/mnist_train_jpg_60000/'
label_train_path = './mnist_data_jpg/mnist_train_jpg_60000.txt'
tfRecord_train = './data/mnist_train.tfrecords'
image_test_path = './mnist_data_jpg/mnist_test_jpg_10000/'
label_test_path = './mnist_data_jpg/mnist_test_jpg_10000.txt'
tfRecord_test = './data/mnist_test.tfrecords'
data_path = './data'
resize_height = 28
resize_width = 28
# 生成tfrecords檔案
def write_tfRecord(tfRecordName, image_path, label_path):
    # 新建一個writer
    writer = tf.python_io.TFRecordWriter(tfRecordName)
    num_pic = 0
    f = open(label_path, 'r')
    contents = f.readlines()
    f.close()
    # 迴圈遍歷每張圖和標籤
    for content in contents:
        value = content.split()
        img_path = image_path + value[0]
        img = Image.open(img_path)
        img_raw = img.tobytes()#圖片轉換為二進位制資料
        labels = [0] * 10
        labels[int(value[1])] = 1
        # 把每張圖片和標籤封裝到example中
        example = tf.train.Example(features=tf.train.Features(feature={
            'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
            'label': tf.train.Feature(int64_list=tf.train.Int64List(value=labels))
        }))
        # 把example進行序列化
        writer.write(example.SerializeToString())
        num_pic += 1#每完成一張圖片,計數器加1
        print("the number of picture:", num_pic)
    # 關閉writer
    writer.close()
    print("write tfrecord successful")
def generate_tfRecord():
    isExists = os.path.exists(data_path)
    if not isExists:
        os.makedirs(data_path)
        print('The directory was created successfully')
    else:
        print('directory already exists')
    write_tfRecord(tfRecord_train, image_train_path, label_train_path)
    write_tfRecord(tfRecord_test, image_test_path, label_test_path)
# 解析tfrecords檔案
def read_tfRecord(tfRecord_path):
    # 該函式會生成一個先入先出的佇列,檔案閱讀器會使用它來讀取資料
    filename_queue = tf.train.string_input_producer([tfRecord_path], shuffle=True)
    # 新建一個reader
    reader = tf.TFRecordReader()
    # 把讀出的每個樣本儲存在serialized_example中進行解序列化,標籤和圖片的鍵名應該和製作tfrecords的鍵名相同,其中標籤給出幾分類。
    _, serialized_example = reader.read(filename_queue)
    # 將tf.train.Example協議記憶體塊(protocol buffer)解析為張量
    features = tf.parse_single_example(serialized_example,
                                       features={
                                           'label': tf.FixedLenFeature([10], tf.int64),# 10表示標籤的分類數量
                                           'img_raw': tf.FixedLenFeature([], tf.string)
                                       })
    # 將img_raw字串轉換為8位無符號整型
    img = tf.decode_raw(features['img_raw'], tf.uint8)
    # 將形狀變為一行784列
    img.set_shape([784])
    img = tf.cast(img, tf.float32) * (1. / 255)
    # 變成0到1之間的浮點數
    label = tf.cast(features['label'], tf.float32)
    # 返回圖片和標籤
    return img, label
def get_tfrecord(num, isTrain=True):
    if isTrain:
        tfRecord_path = tfRecord_train
    else:
        tfRecord_path = tfRecord_test
    img, label = read_tfRecord(tfRecord_path)
    # 隨機讀取一個batch的資料,打亂資料
    img_batch, label_batch = tf.train.shuffle_batch([img, label],
                                                    batch_size=num,
                                                    num_threads=2,# 執行緒
                                                    capacity=1000,
                                                    min_after_dequeue=700)
    # 返回的圖片和標籤為隨機抽取的batch_size組
    return img_batch, label_batch
def main():
    generate_tfRecord()
if __name__ == '__main__':
    main()

在反向傳播mnistbackward.py和測試程式mnist_test.py中修改圖片標籤的介面。使用執行緒協調器,方法如下

coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess = sess,coord = coord)
#圖片和標籤的批獲取
coord.request_stop()
coord.join(threads)

mnist_backward.py檔案

執行緒協調器的程式碼是用################################################括起來的

#mnist_backward.py
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import os
import mnist_generateds  # 1
BATCH_SIZE = 200
LEARNING_RATE_BASE = 0.1
LEARNING_RATE_DECAY = 0.99
REGULARIZER = 0.0001
STEPS = 50000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./model/"
MODEL_NAME = "mnist_model"
# 手動給出訓練的總樣本數6萬
train_num_examples = 60000  # 給出資料集的數量
def backward():
    x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
    y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
    y = mnist_forward.forward(x, REGULARIZER)
    global_step = tf.Variable(0, trainable=False)
    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cem = tf.reduce_mean(ce)
    loss = cem + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        train_num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY,
        staircase=True)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    ema_op = ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step, ema_op]):
        train_op = tf.no_op(name='train')
    saver = tf.train.Saver()
    # 一次批獲取 batch_size張圖片和標籤
    ################################################
    img_batch, label_batch = mnist_generateds.get_tfrecord(BATCH_SIZE, isTrain=True)  # 3
    ################################################
    
    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        ################################################
        # 利用多執行緒提高圖片和標籤的批獲取效率
        coord = tf.train.Coordinator()  # 4
        # 啟動輸入佇列的執行緒
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)  # 5
        ################################################
        
        for i in range(STEPS):
            ################################################
            # 執行圖片和標籤的批獲取
            xs, ys = sess.run([img_batch, label_batch])  # 6
            ################################################
            
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
            if i % 1000 == 0:
                print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
        ################################################
        # 關閉執行緒協調器
        coord.request_stop()  # 7
        coord.join(threads)  # 8
        ################################################
def main():
    backward()  # 9
if __name__ == '__main__':
    main()

mnist_test.py檔案

執行緒協調器的程式碼是用################################################括起來的

import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import mnist_backward
import mnist_generateds
TEST_INTERVAL_SECS = 5
# 手動給出測試的總樣本數1萬
TEST_NUM = 10000  # 1
def test():
    with tf.Graph().as_default() as g:
        x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
        y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
        y = mnist_forward.forward(x, None)
        ema = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        ################################################
        # 用函式get_tfrecord替換讀取所有測試集1萬張圖片
        img_batch, label_batch = mnist_generateds.get_tfrecord(TEST_NUM, isTrain=False)  # 2
        ################################################
        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(mnist_backward.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    ################################################
                    # 利用多執行緒提高圖片和標籤的批獲取效率
                    coord = tf.train.Coordinator()  # 3
                    # 啟動輸入佇列的執行緒
                    threads = tf.train.start_queue_runners(sess=sess, coord=coord)  # 4
                    
                    # 執行圖片和標籤的批獲取
                    xs, ys = sess.run([img_batch, label_batch])  # 5
                    ################################################
                    accuracy_score = sess.run(accuracy, feed_dict={x: xs, y_: ys})
                    print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score))
                    ################################################
                    # 關閉執行緒協調器
                    coord.request_stop()  # 6
                    coord.join(threads)  # 7
                    ################################################
                    
                else:
                    print('No checkpoint file found')
                    return
            time.sleep(TEST_INTERVAL_SECS)
def main():
    test()  # 8
if __name__ == '__main__':
    main()