Tensorboard 視覺化模板例子
阿新 • • 發佈:2018-11-07
Tensorboard 視覺化模板例子
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#載入資料
mnist = input_data.read_data_sets('../data/mnist' , one_hot= True)
#每個批次大小
batch_size = 100
n_batch = mnist.train.num_examples // batch_size
#參考概要
def variable_summaries(var , name = 'summaries'):
with tf.name_scope(name):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean) #平均值
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev' , stddev) #標準差
tf.summary.scalar('max' , tf.reduce_max(var)) #最大值
tf.summary.scalar('min' , tf.reduce_min(var)) #最小值
tf.summary.histogram('histogram' , var) #直方圖
#初始化權值
def weight_variable(shape):
with tf.name_scope('weight'):
initial = tf.truncated_normal(shape,stddev=0.1 ) #生成一個截斷的正態分佈
W = tf.Variable(initial , name='W')
return W
#初始化偏置
def bias_variable(shape):
with tf.name_scope('biases'):
initial = tf.truncated_normal(shape=shape , stddev=0.1)
b = tf.Variable(initial , name='b')
return b
#卷積層
def conv2d(x , W):
with tf.name_scope('conv2d'):
return tf.nn.conv2d(x , W , strides=[1,1,1,1] ,padding='SAME')
def max_pool_2x2(x):
with tf.name_scope('max_pool_2x2'):
return tf.nn.max_pool(x ,ksize=[1,2,2,1] , strides=[1,2,2,1] , padding='SAME')
#定義placeholder
with tf.name_scope('input'):
x = tf.placeholder(tf.float32 , [None , 784] ,name = 'x_input') #28*28
y = tf.placeholder(tf.float32, [None , 10] , name='y_input')
#改變x的格式轉為4D的向量[batch , in_height , in_width , in_channels]
x_image = tf.reshape(x , [-1,28,28,1] , name='x_image')
tf.summary.image('input_image', x_image,12)
with tf.name_scope('conv1'):
#初始化第一個卷積層的權值和偏置
W_conv1 = weight_variable([5,5,1,32]) #5*5取樣視窗,32個卷積核從一個平面抽取特徵
b_conv1 = bias_variable([32]) #每一個卷積核一個偏置
variable_summaries(W_conv1 , 'W_conv1')
variable_summaries(b_conv1, 'b_conv1')
#把x_image和權值向量進行卷積,加上偏置,然後應用relu啟用函式,maxpooling
h_conv1 = tf.nn.relu(conv2d(x_image , W_conv1) + b_conv1 , name= 'relu')
h_pool1 = max_pool_2x2(h_conv1)
with tf.name_scope('conv2'):
#初始化第二個卷積成的權值和偏置
W_conv2 = weight_variable([5,5,32,64]) #5*5取樣視窗,64個卷積核從32個平面抽取特徵
b_conv2 = bias_variable([64]) #每一個卷積核一個偏置
variable_summaries(W_conv2 , 'W_conv2')
variable_summaries(b_conv2, 'b_conv2')
#把h_pool1和權值向量進行卷積,加上偏置,然後應用relu啟用函式,maxpooling
h_conv2 = tf.nn.relu(conv2d(h_pool1 , W_conv2) + b_conv2 , name='relu')
h_pool2 = max_pool_2x2(h_conv2)
#28*28的圖片第一次卷積後28*28,pool後14*14
#第二次卷積14*14,pool後7*7
#經過上述之後得到64張7*7的平面
with tf.name_scope('fc1'):
#初始化第一個全連線的權值
W_fc1 = weight_variable([7*7*64 , 1024]) #上一層有7*7*64個神經元,全連線層有1024個神經元
b_fc1 = bias_variable([1024])
#將池化層2的輸出轉換成1維
h_pool2_flat = tf.reshape(h_pool2 , [-1,7*7*64])
#求第一個全連線的輸出
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+ b_fc1)
#dropout
with tf.name_scope('dropout'):
keep_pro = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1 , keep_pro)
with tf.name_scope('fc2'):
#初始化第二個全連線
W_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
#計算輸出
with tf.name_scope('softmax'):
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop , W_fc2) + b_fc2)
with tf.name_scope('cr_ep_loss'):
#交叉熵代價函式
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
tf.summary.scalar('cr_ep_loss' , cross_entropy)
with tf.name_scope('train'):
#使用AdamOptimizer優化
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
#將結果放進布林列表
correct_prediction = tf.equal(tf.argmax(prediction , 1) , tf.argmax(y ,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction , tf.float32) , name='accuracy')
tf.summary.scalar('accuracy',accuracy)
merged = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter('logs/train' , sess.graph)
test_writer = tf.summary.FileWriter('logs/test' , sess.graph)
for i in range(1001):
batch_xs , batch_ys = mnist.train.next_batch(batch_size)
sess.run([merged , train_step], feed_dict={x:batch_xs , y:batch_ys , keep_pro:0.7})
train_summary = sess.run(merged , feed_dict={x:batch_xs , y:batch_ys , keep_pro:1})
train_writer.add_summary(train_summary , i)
batch_xs, batch_ys = mnist.test.next_batch(batch_size)
test_summary,acc = sess.run([merged , accuracy] , feed_dict={x:batch_xs , y:batch_ys , keep_pro:1})
test_writer.add_summary(test_summary, i)
if i%100 == 0:
test_acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_pro: 1.0})
train_acc, loss = sess.run([accuracy,cross_entropy] , feed_dict={x:mnist.train.images[:10000] ,
y:mnist.train.labels[0:10000],
keep_pro :1.0})
print('Iter :%s; loss%s; Train Accuracy:%s; Test Accuracy:%s'%(
str(i) , str(loss),str(train_acc),str(test_acc)))
tensorboard 顯示的Graphs
使用with tf.name_scope()定義名稱空間
train_writer = tf.summary.FileWriter(‘logs/train’ , sess.graph) 語句寫入logs檔案
顯示loss和accuracy
記錄loss
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
tf.summary.scalar('cr_ep_loss' , cross_entropy)
記錄accuracy
batch_xs , batch_ys = mnist.train.next_batch(batch_size)
sess.run([merged , train_step], feed_dict={x:batch_xs , y:batch_ys , keep_pro:0.7})
train_summary = sess.run(merged , feed_dict={x:batch_xs , y:batch_ys , keep_pro:1})
train_writer.add_summary(train_summary , i)
batch_xs, batch_ys = mnist.test.next_batch(batch_size)
test_summary,acc = sess.run([merged , accuracy] , feed_dict={x:batch_xs , y:batch_ys , keep_pro:1})
test_writer.add_summary(test_summary, i)
顯示權值偏置的統計資訊
定義統計函式:
def variable_summaries(var , name = 'summaries'):
with tf.name_scope(name):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean) #平均值
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev' , stddev) #標準差
tf.summary.scalar('max' , tf.reduce_max(var)) #最大值
tf.summary.scalar('min' , tf.reduce_min(var)) #最小值
tf.summary.histogram('histogram' , var) #直方圖
使用:
傳入需統計的變數和名稱空間
W_conv1 = weight_variable([5,5,1,32]) #5*5取樣視窗,32個卷積核從一個平面抽取特徵
b_conv1 = bias_variable([32]) #每一個卷積核一個偏置
variable_summaries(W_conv1 , 'W_conv1')
variable_summaries(b_conv1, 'b_conv1')
直方圖資訊:
顯示輸入圖片
記錄顯示圖片
x_image = tf.reshape(x , [-1,28,28,1] , name='x_image')
tf.summary.image('input_image', x_image,12) #12表示在tensorboard中顯示12張圖片