1. 程式人生 > >定義前向傳播模組 - inference

定義前向傳播模組 - inference

因要求,貼上inference模組供大家參考。

裡面的網路結構可以自己定義。

分離inference模組,有利於程式的可讀性和操作性。高內聚,低耦合?

import tensorflow as tf

# 定義神經網路結構相關的引數
INPUT_NODE = 128*128
OUTPUT_NODE = 62

IMAGE_SIZE = 128
NUM_CHANNELS = 1
NUM_LABELS = OUTPUT_NODE

# 第一層卷積層的尺寸和深度
CONV1_SIZE = 5
CONV1_DEEP = 32


# 第二層卷積層的尺寸和深度
CONV2_SIZE = 5
CONV2_DEEP = 64

# 全連線層的節點個數
FC_SIZE = 512


# 定義神經網路的前向傳播過程
def inference(input_tensor, train,  regularizer):
    # 第一層卷積層,輸出28*28*32的矩陣
    with tf.variable_scope('layer1-conv1'):
        conv1_weights = tf.get_variable("weight", [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable("bias", [CONV1_DEEP], initializer=tf.constant_initializer(0.0))

        # 使用邊長為5,深度為32的過濾器,過濾器移動的步長為1,且使用0填充
        conv1 = tf.nn.conv2d(input_tensor, conv1_weights,  strides=[1, 1, 1, 1], padding='SAME')
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))

    # 第二層池化層(最大池化),過濾器邊長為2,步長為2,輸出14*14*32的矩陣
    with tf.name_scope('layer2-pool1'):
        pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

    # 第三層卷積層,輸出14*14*64的矩陣
    with tf.variable_scope('layer3-conv2'):
        conv2_weights = tf.get_variable("weight", [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
                                        initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable("bias", [CONV2_DEEP], initializer=tf.constant_initializer(0.0))

        #使用邊長為5,深度為64的過濾器,步長為1,使用0填充
        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))

    # 第四層池化層(最大池化),過濾器邊長為2,步長為2,輸出7*7*64的矩陣
    with tf.name_scope('layer4-pool2'):
        pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

    # 第五層全連線層,輸入7*7*64矩陣
    # 先拉成一個長向量(扁平化)
    # 也可以直接  reshaped = tf.reshape(pool2, [-1, 7*7*64])
    pool_shape = pool2.get_shape().as_list()
    nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
    reshaped = tf.reshape(pool2, [-1, nodes])

    #  第五次全連線層,輸入向量長度為7*7*64=3136,輸出512長度的向量。這裡將加入dropout。dropout一般使用在全連線層
    with tf.variable_scope('layer5-fc1'):
        fc1_weights = tf.get_variable('weight', [nodes, FC_SIZE], initializer=tf.truncated_normal_initializer(stddev=0.1))

        # 只有全連線層的權重需要加入正則化
        if regularizer is not None:
            tf.add_to_collection('losses', regularizer(fc1_weights))
        fc1_biases = tf.get_variable('bias', [FC_SIZE], initializer=tf.constant_initializer(0.1))

        fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
        if train:
            fc1 = tf.nn.dropout(fc1, 0.5)

    # 第六層全連線層。輸入512,輸出10,
    with tf.variable_scope('layer6-fc2'):
        fc2_weights = tf.get_variable('weight', [FC_SIZE, NUM_LABELS], initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer is not None:
            tf.add_to_collection('losses',regularizer(fc2_weights))
        fc2_biases = tf.get_variable('bias', [NUM_LABELS], initializer=tf.constant_initializer(0.1))
        logit = tf.matmul(fc1, fc2_weights) + fc2_biases

    return logit