TensorFlow變數共享解析
阿新 • • 發佈:2019-02-04
- name_scope: 為了更好地管理變數的名稱空間而提出的。比如在 tensorboard 中,因為引入了 name_scope,我們的 Graph 看起來才井然有序。
- variable_scope: 大大大部分情況下,跟 tf.get_variable() 配合使用,實現變數共享的功能。
- tf.name_scope() 並不會對 tf.get_variable() 建立的變數有任何影響。
-
1、未權值共享程式碼
import tensorflow as tf
# 設定GPU按需增長
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# 拿官方的例子改動一下
def my_image_filter():
conv1_weights = tf.Variable(tf.random_normal([5, 5, 32, 32]),
name="conv1_weights")
conv1_biases = tf.Variable(tf.zeros([32]), name="conv1_biases")
conv2_weights = tf.Variable(tf.random_normal([5, 5, 32, 32]),
name="conv2_weights" )
conv2_biases = tf.Variable(tf.zeros([32]), name="conv2_biases")
return None
# First call creates one set of 4 variables.
result1 = my_image_filter()
# Another set of 4 variables is created in the second call.
result2 = my_image_filter()
# 獲取所有的可訓練變數
vs = tf.trainable_variables()
print 'There are %d train_able_variables in the Graph: ' % len(vs)
for v in vs:
print v
There are 8 train_able_variables in the Graph:
Tensor("conv1_weights/read:0", shape=(5, 5, 32, 32), dtype=float32)
Tensor("conv1_biases/read:0", shape=(32,), dtype=float32)
Tensor("conv2_weights/read:0", shape=(5, 5, 32, 32), dtype=float32)
Tensor("conv2_biases/read:0", shape=(32,), dtype=float32)
Tensor("conv1_weights_1/read:0", shape=(5, 5, 32, 32), dtype=float32)
Tensor("conv1_biases_1/read:0", shape=(32,), dtype=float32)
Tensor("conv2_weights_1/read:0", shape=(5, 5, 32, 32), dtype=float32)
Tensor("conv2_biases_1/read:0", shape=(32,), dtype=float32)
2、權值共享程式碼
import tensorflow as tf
# 設定GPU按需增長
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# 下面是定義一個卷積層的通用方式
def conv_relu(kernel_shape, bias_shape):
# Create variable named "weights".
weights = tf.get_variable("weights", kernel_shape, initializer=tf.random_normal_initializer())
# Create variable named "biases".
biases = tf.get_variable("biases", bias_shape, initializer=tf.constant_initializer(0.0))
return None
def my_image_filter():
# 按照下面的方式定義卷積層,非常直觀,而且富有層次感
with tf.variable_scope("conv1"):
# Variables created here will be named "conv1/weights", "conv1/biases".
relu1 = conv_relu([5, 5, 32, 32], [32])
with tf.variable_scope("conv2"):
# Variables created here will be named "conv2/weights", "conv2/biases".
return conv_relu( [5, 5, 32, 32], [32])
with tf.variable_scope("image_filters") as scope:
# 下面我們兩次呼叫 my_image_filter 函式,但是由於引入了 變數共享機制
# 可以看到我們只是建立了一遍網路結構。
result1 = my_image_filter()
scope.reuse_variables()
result2 = my_image_filter()
# 看看下面,完美地實現了變數共享!!!
vs = tf.trainable_variables()
print 'There are %d train_able_variables in the Graph: ' % len(vs)
for v in vs:
print v
There are 4 train_able_variables in the Graph:
Tensor("image_filters/conv1/weights/read:0", shape=(5, 5, 32, 32), dtype=float32)
Tensor("image_filters/conv1/biases/read:0", shape=(32,), dtype=float32)
Tensor("image_filters/conv2/weights/read:0", shape=(5, 5, 32, 32), dtype=float32)
Tensor("image_filters/conv2/biases/read:0", shape=(32,), dtype=float32)