簡單全連線神經網路--MNIST
阿新 • • 發佈:2019-02-18
使用全連線神經網路進行手寫數字識別,這個效果比CNN要差,僅做練習。
1、mnist_inference.py
#coding:utf-8
import tensorflow as tf
#定義神經網路結構相關的引數
INPURT_NODE = 784
OUTPUT_NIDE = 10
LAYER1_NODE = 500
def get_weight_variable(shape,regularizer):
weights = tf.get_variable("weight",shape,initializer=tf.truncated_normal_initializer(stddev=0.1 ))
if regularizer != None:
tf.add_to_collection("losses",regularizer(weights))
return weights
def inference(input_tensor,regularizer):
with tf.variable_scope("layer1"):
weights = get_weight_variable([INPURT_NODE,LAYER1_NODE],regularizer)
biases = tf.get_variable("biases" ,[LAYER1_NODE],initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor,weights)+biases)
with tf.variable_scope("layer2"):
weights = get_weight_variable([LAYER1_NODE,OUTPUT_NIDE],regularizer)
biases = tf.get_variable("biases",[OUTPUT_NIDE],initializer=tf.constant_initializer(0.0 ))
layer2 = tf.matmul(layer1,weights)+biases
return layer2
2、train.py
#coding:utf-8
import tensorflow as tf
import os
from tensorflow.examples.tutorials.mnist import input_data
#載入剛剛些的前向傳播過程
import mnist_inference
#配置神經網路的引數
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8#指數衰減基礎學習率
LEARNING_RATE_DECAY = 0.99#衰減率
REGULARAZTION_RATE = 0.0001#正則化的權重
TRAIN_STEP = 30000
MOVING_AVERAGE_DECAY = 0.99#滑動平均率
MODEL_SAVE_PATH = "./model"
MODEL_NAME = "model.ckpt"
def train(mnist):
x = tf.placeholder(tf.float32,[None,mnist_inference.INPURT_NODE],name="x-input")
y_ = tf.placeholder(tf.float32,[None,mnist_inference.OUTPUT_NIDE],name="y-input")
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
y = mnist_inference.inference(x,regularizer)
global_step = tf.Variable(0,trainable=False)#設定global_step為不可訓練數值,在訓練過程中它不進行相應的更新
#對w,b進行滑動平均操作
variable_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)#對滑動平均函式進行輸入滑動平均率以及步數
variable_average_op = variable_average.apply(tf.trainable_variables())#對所以可訓練的引數進行滑動平均操作
#計算損失函式
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels = y_,logits = y)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean+tf.add_n(tf.get_collection("losses"))#這裡計算collection裡的所有的和。之前把w正則化的值放在了collection裡
#對 學習率 進行指數衰減
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY)
#定義訓練過程
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)#每當進行一次訓練global_step會加1
#一次進行多個操作,既進行反向傳播更新神經網路中的引數,又更新每一個引數的滑動平均值(滑動平均是影子操作)
with tf.control_dependencies([train_step,variable_average_op]):
train_op = tf.no_op(name="train")
#儲存操作
saver = tf.train.Saver()
#啟動程式
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(TRAIN_STEP):
xs,ys = mnist.train.next_batch(BATCH_SIZE)
_,loss_value,step = sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys})
#每1000輪儲存一次模型
if i%1000 ==0:
print "step ",step," ","loss ",loss_value
saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step = global_step)
def main(argv=None):
mnist = input_data.read_data_sets("/tmp/data",one_hot=True)
train(mnist)
if __name__ =="__main__":
tf.app.run()