使用tensorflow訓練自己的資料集(二)
阿新 • • 發佈:2018-12-15
使用tensorflow訓練自己的資料集—定義神經網路
上一篇使用tensorflow訓練自己的資料集(一)中製作已經介紹了製作自己的資料集、接下來就是定義向前傳播過程了也就是定義神經網路。本次使用了兩層卷積兩層最大池化兩層全連線神經網路最後加softmax層的經典卷積神經網路結構。
import tensorflow as tf
# 配置引數
# 圖片size
IMAGE_SIZE = 128
NUM_CHANNELS = 3
NUM_LABELS = 2
# 第一層卷積層的尺寸和深度
CONV1_DEEP = 64
CONV1_SIZE = 5
# 第二層卷積層的尺寸和深度
CONV2_DEEP = 128
CONV2_SIZE = 5
# 全連線層的節點個數
FC_SIZE = 512
def get_Weight(shape,regularizer_rate = None): # 定義weight如需正則化需傳入zhengzehualv預設值為None
Weight = tf.Variable(tf.truncated_normal(shape=shape,stddev=0.1),dtype=tf.float32) # tensorflow API推薦隨機初始化
if regularizer_rate != None:
regularizer = tf.contrib.layers.l2_regularizer(regularizer_rate)
tf.add_to_collection('losses',regularizer(Weight))
return Weight
def get_biase(shape): # 定義biase
biase = tf.Variable(tf.constant(value=0.1,shape=shape),dtype=tf.float32) # tensorflow API推薦初始化0.1
return biase
def create_conv2d (x,w): # 定義卷積層
conv2d = tf.nn.conv2d(x,w,strides=[1,1,1,1],padding='SAME') # 步幅為1、SAME填充
return conv2d
def max_pooling(x): # 定義最大值池化
pool = tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME') # ksize為2、步幅為2、SAME填充
return pool
def create_fc(x,w,b): # 定義全連線層
fc = tf.matmul(x,w) + b
return fc
# 定義前向傳播的過程
# 這裡添加了一個新的引數train,用於區分訓練過程和測試過程。
# 在這個程式中將用到dropout方法,dropout可以進一步提升模型可靠性並防止過擬合
# dropout過程只在訓練時使用
def inference(input_tensor, train, regularizer_rate):
with tf.variable_scope('layer1-conv1'):
conv1_Weights = get_Weight([CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP]) # 5*5*64
conv1_baises = get_biase([CONV1_DEEP])
conv1 = tf.nn.bias_add(create_conv2d(input_tensor,conv1_Weights),conv1_baises)
conv1 = tf.nn.relu(conv1) # 使用ReLu啟用函式
with tf.name_scope('layer2-pool1'): # 64*64*64
pool1 = max_pooling(conv1)
with tf.variable_scope('layer3-conv2'):
conv2_Weights = get_Weight([CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP]) # 5*5*128
conv2_biases = get_biase([CONV2_DEEP])
conv2 = tf.nn.bias_add(create_conv2d(pool1,conv2_Weights),conv2_biases)
conv2 = tf.nn.relu(conv2)
with tf.name_scope('layer4-pool2'): # 32*32*128
pool2 = max_pooling(conv2)
pool_shape = pool2.get_shape().as_list()
# pool_shape為[batch_size,32,32,128]
# 計算將矩陣拉直成向量之後的長度,這個長度就是矩陣長度及深度的乘積。
nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
# 通過tf.reshape函式將第四層的輸出變成一個batch的向量
reshaped = tf.reshape(pool2, [pool_shape[0], nodes])
# 宣告第五層全連線層的變數並實現前向傳播過程
with tf.variable_scope('layer5-fc1'):
fc1_Weights = get_Weight([nodes,FC_SIZE],regularizer_rate)
fc1_biases = get_biase([FC_SIZE])
fc1 = tf.nn.relu(create_fc(reshaped,fc1_Weights,fc1_biases))
# 訓練過程新增dropout防止過擬合
if train:
fc1 = tf.nn.dropout(fc1, 0.5)
# 宣告第六層全連線層的變數並實現前向傳播過程
with tf.variable_scope('layer6-fc2'):
fc2_Weights = get_Weight([FC_SIZE,NUM_LABELS],regularizer_rate)
fc2_biases = get_biase([NUM_LABELS])
logit = create_fc(fc1,fc2_Weights,fc2_biases)
# fc2 = tf.nn.relu(fc2) 需要softmax層時使用啟用函式
# with tf.variable_scope('layer7-softmax'):
# 可在全連線層後新增softmax層loss函式需要改變
# sm_Weight = get_Weight([FC_SIZE,NUM_LABELS])
# sm_biases = get_biase([NUM_LABELS])
# sm = tf.matmul(fc2,sm_Weight) + sm_biases
# logit = tf.nn.softmax(sm)
return logit
下一篇將介紹反向傳播過程 如有錯誤望多多指教~~