1. 程式人生 > >AlexNet(Tensorflow實現)

AlexNet(Tensorflow實現)

github部落格傳送門
部落格園傳送門

本章所需知識:

  1. 沒有基礎的請觀看深度學習系列視訊
  2. tensorflow

資料下載連結:

後面上傳

首先附上百度百科不要錢的網路結構圖:

百度百科的圖

再附上 恩達老師的視覺化極強的網路結構圖:

網路結構圖

接著加上我自己使用Tensorflow實現的程式碼:

AlexNet網路

import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data

mnist = input_data.
read_data_sets("MNIST_data/", one_hot=True) # 匯入資料集 '''AlexNet原本是用來訓練224*224的圖,因為便於訓練集,所以這裡使用MNIST,並修改了部分網路引數. 訓練影象和資料集時表現出色,比LeNet出色的另一個地方Relu, 原文是用來訓練三通道彩圖用的.''' class AlexNet: def __init__(self): self.in_x = tf.placeholder(dtype=tf.float32, shape=[None, 28, 28, 1], name="in_x") self.
in_y = tf.placeholder(dtype=tf.float32, shape=[None, 10], name="in_y") # 卷積層 (batch, 28, 28, 1) -> (batch, 8, 8, 96) # (原文:filters=96, kernel_size=11, strides=(4, 4)) self.conv1 = tf.layers.Conv2D(filters=96, kernel_size=7, strides=(3, 3), kernel_initializer=
tf.truncated_normal_initializer(stddev=tf.sqrt(1 / 48))) # 池化層 (batch, 8, 8, 96) -> (batch, 4, 4, 96) # (原文:pool_size=(3, 3), strides=(2, 2)) self.pool1 = tf.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)) # 卷積層 (batch, 4, 4, 96) -> (batch, 4, 4, 256) # (原文:filters=256, kernel_size=5) self.conv2 = tf.layers.Conv2D(filters=256, kernel_size=3, padding="SAME", kernel_initializer=tf.truncated_normal_initializer(stddev=tf.sqrt(1 / 128))) # 池化層 (batch, 4, 4, 256) -> (batch, 2, 2, 256) # (原文:pool_size=(3, 3), strides=(2, 2)) self.pool2 = tf.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)) # 卷積層 (batch, 2, 2, 256) -> (batch, 2, 2, 384) # (原文:filters=384, kernel_size=3) self.conv3 = tf.layers.Conv2D(filters=384, kernel_size=1, padding="SAME", kernel_initializer=tf.truncated_normal_initializer(stddev=tf.sqrt(1 / 192))) # 卷積層 (batch, 2, 2, 384) -> (batch, 2, 2, 384) # (原文:filters=384, kernel_size=3) self.conv4 = tf.layers.Conv2D(filters=384, kernel_size=1, padding="SAME", kernel_initializer=tf.truncated_normal_initializer(stddev=tf.sqrt(1 / 192))) # 卷積層 (batch, 2, 2, 384) -> (batch, 2, 2, 256) # (原文:filters=256, kernel_size=3) self.conv5 = tf.layers.Conv2D(filters=256, kernel_size=1, padding="SAME", kernel_initializer=tf.truncated_normal_initializer(stddev=tf.sqrt(1 / 128))) # 池化層 (batch, 2, 2, 256) -> (batch, 1, 1, 256) # (原文:pool_size=(3, 3), strides=(2, 2)) self.pool3 = tf.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)) # reshape(-1, 256) -> (batch, 256) # (原文:units=9216) self.fc1 = tf.layers.Dense(units=256, kernel_initializer=tf.truncated_normal_initializer(stddev=tf.sqrt(1 / 128))) # (batch, 256) -> (batch, 128) # (原文:units=4096) self.fc2 = tf.layers.Dense(units=128, kernel_initializer=tf.truncated_normal_initializer(stddev=tf.sqrt(1 / 64))) # (batch, 128) -> (batch, 128) # (原文:units=4096) self.fc3 = tf.layers.Dense(units=128, kernel_initializer=tf.truncated_normal_initializer(stddev=tf.sqrt(1 / 64))) # (batch, 128) -> (batch, 10) # (原文:根據自己分類需要) self.fc4 = tf.layers.Dense(units=10, kernel_initializer=tf.truncated_normal_initializer(stddev=tf.sqrt(1 / 5))) def forward(self): # 因為方便訓練集 所以改了部分AlexNet網路引數 和訓練的訓練集 改的地方已註明 self.conv1_out = tf.nn.relu(self.conv1(self.in_x)) self.poo1_out = self.pool1(self.conv1_out) self.conv2_out = tf.nn.relu(self.conv2(self.poo1_out)) self.poo2_out = self.pool2(self.conv2_out) self.conv3_out = tf.nn.relu(self.conv3(self.poo2_out)) self.conv4_out = tf.nn.relu(self.conv4(self.conv3_out)) self.conv5_out = tf.nn.relu(self.conv5(self.conv4_out)) self.pool3 = self.pool3(self.conv5_out) self.flat = tf.reshape(self.pool3, shape=[-1, 256]) self.fc1_out = tf.nn.relu(self.fc1(self.flat)) self.fc2_out = tf.nn.relu(self.fc2(self.fc1_out)) self.fc3_out = tf.nn.relu(self.fc3(self.fc2_out)) self.fc4_out = self.fc4(self.fc3_out) def backward(self): # 後向計算 self.loss = tf.reduce_mean((self.fc4_out - self.in_y) ** 2) self.opt = tf.train.AdamOptimizer().minimize(self.loss) def acc(self): # 精度計算(可不寫, 不影響網路使用) self.acc1 = tf.equal(tf.argmax(self.fc4_out, 1), tf.argmax(self.in_y, 1)) self.accaracy = tf.reduce_mean(tf.cast(self.acc1, dtype=tf.float32)) if __name__ == '__main__': net = AlexNet() # 建立AlexNet物件 net.forward() # 執行前向計算 net.backward() # 執行後向計算 net.acc() # 執行精度計算 init = tf.global_variables_initializer() # 初始化所有tensorflow變數 with tf.Session() as sess: sess.run(init) for i in range(10000): train_x, train_y = mnist.train.next_batch(100) # 取出mnist訓練集的 100 批資料和標籤 train_x_flat = train_x.reshape([-1, 28, 28, 1]) # 將資料整型 # 將資料傳入網路,並得到計算後的精度和損失 acc, loss, _ = sess.run(fetches=[net.accaracy, net.loss, net.opt], feed_dict={net.in_x: train_x_flat, net.in_y: train_y}) if i % 100 == 0: # 每訓練100次列印一次訓練集精度和損失 print("訓練集精度:|", acc) print("訓練集損失:|", loss) test_x, test_y = mnist.test.next_batch(100) # 取出100批測試集資料進行測試 test_x_flat = test_x.reshape([-1, 28, 28, 1]) # 同上 # 同上 test_acc, test_loss = sess.run(fetches=[net.accaracy, net.loss], feed_dict={net.in_x: test_x_flat, net.in_y: test_y}) print('----------') print("驗證集精度:|", test_acc) # 列印驗證集精度 print("驗證集損失:|", test_loss) # 列印驗證集損失 print('--------------------')

最後附上訓練截圖:

訓練截圖