學習TensorFlow,呼叫預訓練好的網路(Alex, VGG, ResNet etc)
阿新 • • 發佈:2019-01-05
視覺問題引入深度神經網路後,針對端對端的訓練和預測網路,可以看是特徵的表達和任務的決策問題(分類,迴歸等)。當我們自己的訓練資料量過小時,往往藉助牛人已經預訓練好的網路進行特徵的提取,然後在後面加上自己特定任務的網路進行調優。目前,ILSVRC比賽(針對1000類的分類問題)所使用資料的訓練集126萬張影象,驗證集5萬張,測試集10萬張(標註未公佈),大家一般使用這個比賽的前幾名的網路來搭建自己特定任務的神經網路。
本篇博文主要簡單講述怎麼使用TensorFlow呼叫預訓練好的VGG網路,其他的網路(如Alex, ResNet等)也是同樣的套路。分為三個部分:第一部分下載網路架構定義以及權重引數,第二部分是如何呼叫預訓練網路中的feature
map,第三部分給出參考資料。注:資料是學習查詢整理而得,理解有誤的地方,請多多指正~
一、下載網路架構定義以及權重引數
二、呼叫預訓練網路中的feature map(以VGG16為例)
import inspect import os import numpy as np import tensorflow as tf import time VGG_MEAN = [103.939, 116.779, 123.68] class Vgg16: def __init__(self, vgg16_npy_path=None): if vgg16_npy_path is None: path = inspect.getfile(Vgg16) path = os.path.abspath(os.path.join(path, os.pardir)) path = os.path.join(path, "vgg16.npy") vgg16_npy_path = path print path # 載入網路權重引數
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item() print("npy file loaded") def build(self, rgb): """ load variable from npy to build the VGG :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1] """ start_time = time.time() print("build model started") rgb_scaled = rgb * 255.0 # Convert RGB to BGR red, green, blue = tf.split(3, 3, rgb_scaled) assert red.get_shape().as_list()[1:] == [224, 224, 1] assert green.get_shape().as_list()[1:] == [224, 224, 1] assert blue.get_shape().as_list()[1:] == [224, 224, 1] bgr = tf.concat(3, [ blue - VGG_MEAN[0], green - VGG_MEAN[1], red - VGG_MEAN[2], ]) assert bgr.get_shape().as_list()[1:] == [224, 224, 3] self.conv1_1 = self.conv_layer(bgr, "conv1_1") self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2") self.pool1 = self.max_pool(self.conv1_2, 'pool1') self.conv2_1 = self.conv_layer(self.pool1, "conv2_1") self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2") self.pool2 = self.max_pool(self.conv2_2, 'pool2') self.conv3_1 = self.conv_layer(self.pool2, "conv3_1") self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2") self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3") self.pool3 = self.max_pool(self.conv3_3, 'pool3') self.conv4_1 = self.conv_layer(self.pool3, "conv4_1") self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2") self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3") self.pool4 = self.max_pool(self.conv4_3, 'pool4') self.conv5_1 = self.conv_layer(self.pool4, "conv5_1") self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2") self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3") self.pool5 = self.max_pool(self.conv5_3, 'pool5') self.fc6 = self.fc_layer(self.pool5, "fc6") assert self.fc6.get_shape().as_list()[1:] == [4096] self.relu6 = tf.nn.relu(self.fc6) self.fc7 = self.fc_layer(self.relu6, "fc7") self.relu7 = tf.nn.relu(self.fc7) self.fc8 = self.fc_layer(self.relu7, "fc8") self.prob = tf.nn.softmax(self.fc8, name="prob") self.data_dict = None print("build model finished: %ds" % (time.time() - start_time)) def avg_pool(self, bottom, name): return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def max_pool(self, bottom, name): return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name) def conv_layer(self, bottom, name): with tf.variable_scope(name): filt = self.get_conv_filter(name) conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME') conv_biases = self.get_bias(name) bias = tf.nn.bias_add(conv, conv_biases) relu = tf.nn.relu(bias) return relu def fc_layer(self, bottom, name): with tf.variable_scope(name): shape = bottom.get_shape().as_list() dim = 1 for d in shape[1:]: dim *= d x = tf.reshape(bottom, [-1, dim]) weights = self.get_fc_weight(name) biases = self.get_bias(name) # Fully connected layer. Note that the '+' operation automatically # broadcasts the biases. fc = tf.nn.bias_add(tf.matmul(x, weights), biases) return fc def get_conv_filter(self, name): return tf.constant(self.data_dict[name][0], name="filter") def get_bias(self, name): return tf.constant(self.data_dict[name][1], name="biases") def get_fc_weight(self, name): return tf.constant(self.data_dict[name][0], name="weights")
以上是VGG16網路的定義,假設我們現在輸入影象image,打算做分割,那麼我們可以使用端對端的全卷積網路進行訓練和測試。針對這個任務,我們只需要輸出pool5的feature map即可。
#以上你的網路定義,初始化方式,以及資料預處理...
vgg = vgg16.Vgg16()
vgg.build(image)
feature_map = vgg.pool5
mask = yournetwork(feature_map)
#以下定義loss,學習率策略,然後train...
三、參考資料