tensorflow中 “same”與“valid” 區別
阿新 • • 發佈:2018-12-18
在valid情況下, 輸出形狀計算方法為: new_height=new_width=[(W-F+1)/S]
在same情況下,輸出形狀計算方法為: new_height=new_width=[W/S]
其中W為輸入的尺寸,F為濾波器尺寸,S為步長,[ ]為向上取整函式。
由上可知,如果要保持卷積或池化之後的影象尺寸不變,則步長S必須為1。
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data # load資料 # import input_data # mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) mnist = input_data.read_data_sets("MNIST_data", one_hot=True) # 定義網路超引數 learning_rate = 0.001 training_iters = 130000 batch_size = 64 display_step = 20 # 定義網路引數 n_input = 784 # 輸入的維度 n_classes = 10 # 標籤的維度 dropout = 0.8 # Dropout 的概率 # 佔位符輸入 with tf.variable_scope('Input'): x = tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32, [None, n_classes]) keep_prob = tf.placeholder(tf.float32) # 卷積操作 def conv2d(name, l_input, w, b): return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME'),b), name=name) # 最大下采樣操作 def max_pool(name, l_input, k1, k2): return tf.nn.max_pool(l_input, ksize=[1, k1, k1, 1], strides=[1, k2, k2, 1], padding='SAME', name=name) # 歸一化操作 def norm(name, l_input, lsize=4): return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name) # 定義整個網路 def alex_net(_X, _weights, _biases, _dropout): # 向量轉為矩陣 _X = tf.reshape(_X, shape=[-1, 28, 28, 1]) # 卷積層 with tf.variable_scope('Conv1'): conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1']) #28*28 # 下采樣層 pool1 = max_pool('pool1', conv1, k1=3, k2=2)#14*14 # 歸一化層 norm1 = norm('norm1', pool1, lsize=4) # Dropout norm1 = tf.nn.dropout(norm1, _dropout) tf.summary.histogram('conv', conv1) tf.summary.histogram('norm', norm1) # 卷積 with tf.variable_scope('Conv2'): conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])#14*14 # 下采樣 pool2 = max_pool('pool2', conv2, k1=3, k2=2)#7*7 # 歸一化 norm2 = norm('norm2', pool2, lsize=4) # Dropout norm2 = tf.nn.dropout(norm2, _dropout) tf.summary.histogram('conv', conv2) tf.summary.histogram('norm', norm2) # 卷積 with tf.variable_scope('Conv3'): conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])#7*7 # 下采樣 pool3 = max_pool('pool3', conv3, k1=3, k2=2) #4*4 # 歸一化 norm3 = norm('norm3', pool3, lsize=4) # Dropout norm3 = tf.nn.dropout(norm3, _dropout) tf.summary.histogram('conv', conv3) tf.summary.histogram('norm', norm3) # 全連線層,先把特徵圖轉為向量 with tf.variable_scope('FC'): dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]]) dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1') # 全連線層 dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2') # Relu activation tf.summary.histogram('fc_out', dense2) # 網路輸出層 with tf.variable_scope('Out'): out = tf.matmul(dense2, _weights['out']) + _biases['out'] tf.summary.histogram('pred', out) return out # 儲存所有的網路引數 weights = { 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])), 'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])), 'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])), 'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])), 'wd2': tf.Variable(tf.random_normal([1024, 1024])), 'out': tf.Variable(tf.random_normal([1024, 10])) } biases = { 'bc1': tf.Variable(tf.random_normal([64])), 'bc2': tf.Variable(tf.random_normal([128])), 'bc3': tf.Variable(tf.random_normal([256])), 'bd1': tf.Variable(tf.random_normal([1024])), 'bd2': tf.Variable(tf.random_normal([1024])), 'out': tf.Variable(tf.random_normal([n_classes])) } # 構建模型 pred = alex_net(x, weights, biases, keep_prob) # 定義損失函式和學習步驟 with tf.variable_scope('Cost'): cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,labels= y)) tf.summary.scalar('cost',cost) with tf.variable_scope('Train'): optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # 測試網路 with tf.variable_scope('Accuracy'): correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) tf.summary.scalar('Acc',accuracy) # 初始化所有的共享變數 init = tf.initialize_all_variables() # 開啟一個訓練 with tf.Session() as sess: sess.run(init) writer = tf.summary.FileWriter('./log', sess.graph) # write to file merge_op = tf.summary.merge_all() # operation to merge all summary step = 1 # Keep training until reach max iterations while step * batch_size < training_iters: batch_xs, batch_ys = mnist.train.next_batch(batch_size) # 獲取批資料 # sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout}) _, result = sess.run([optimizer, merge_op], {x: batch_xs, y: batch_ys, keep_prob: dropout}) writer.add_summary(result, step) #record to file if step % display_step == 0: # 計算精度 acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.}) # 計算損失值 loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.}) print ("Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)) step += 1 print ("Optimization Finished!") # 計算測試精度 print ("Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.}))