如何用Tensorflow訓練模型成pb檔案(二)——基於tfrecord的讀取
阿新 • • 發佈:2019-02-02
簡介
上一篇介紹了基於原始圖片的讀取,這一篇介紹基於TFRecord的讀取。TFRecord是TensorFlow提供的資料讀取格式,效率高。這裡不介紹TFRecord的製作過程,網上有很多,假設你已經瞭解了。
訓練
定義網路結構,與上一篇相似,不多說了,也是placeholder name=”input”等,但是這裡多了inputs, pop_mean, pop_var, beta, scale = my_batch_norm(conv2_2),是進行了batch normalization處理,發現效果很不錯,訓練加快了很多,起先訓練如同隨機猜測,準確率只有0.5左右。
def build_network (height, width, channel):
x = tf.placeholder(tf.float32, shape=[None, height, width, channel], name="input")
y = tf.placeholder(tf.int32, shape=[None, n_classes], name="labels_placeholder")
def weight_variable(shape, name="weights"):
initial = tf.truncated_normal(shape, stddev=0.1 )
return tf.Variable(initial, name=name)
def bias_variable(shape, name="biases"):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def conv2d(input, w):
return tf.nn.conv2d(input, w, [1, 1, 1, 1], padding='SAME')
def pool_max (input):
return tf.nn.max_pool(input,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
def fc(input, w, b):
return tf.matmul(input, w) + b
# conv1
with tf.name_scope('conv1_1') as scope:
kernel = weight_variable([3, 3, Channels, 64])
biases = bias_variable([64])
conv1_1 = tf.nn.bias_add(conv2d(x, kernel), biases)
inputs, pop_mean, pop_var, beta, scale = my_batch_norm(conv1_1)
conv_batch_norm = tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, 0.001)
output_conv1_1 = tf.nn.relu(conv_batch_norm, name=scope)
with tf.name_scope('conv1_2') as scope:
kernel = weight_variable([3, 3, 64, 64])
biases = bias_variable([64])
conv1_2 = tf.nn.bias_add(conv2d(output_conv1_1, kernel), biases)
inputs, pop_mean, pop_var, beta, scale = my_batch_norm(conv1_2)
conv_batch_norm = tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, 0.001)
output_conv1_2 = tf.nn.relu(conv_batch_norm, name=scope)
pool1 = pool_max(output_conv1_2)
# conv2
with tf.name_scope('conv2_1') as scope:
kernel = weight_variable([3, 3, 64, 128])
biases = bias_variable([128])
conv2_1 = tf.nn.bias_add(conv2d(pool1, kernel), biases)
inputs, pop_mean, pop_var, beta, scale = my_batch_norm(conv2_1)
conv_batch_norm = tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, 0.001)
output_conv2_1 = tf.nn.relu(conv_batch_norm, name=scope)
with tf.name_scope('conv2_2') as scope:
kernel = weight_variable([3, 3, 128, 128])
biases = bias_variable([128])
conv2_2 = tf.nn.bias_add(conv2d(output_conv2_1, kernel), biases)
inputs, pop_mean, pop_var, beta, scale = my_batch_norm(conv2_2)
conv_batch_norm = tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, 0.001)
output_conv2_2 = tf.nn.relu(conv_batch_norm, name=scope)
pool2 = pool_max(output_conv2_2)
# conv3
with tf.name_scope('conv3_1') as scope:
kernel = weight_variable([3, 3, 128, 256])
biases = bias_variable([256])
conv3_1 = tf.nn.bias_add(conv2d(pool2, kernel), biases)
inputs, pop_mean, pop_var, beta, scale = my_batch_norm(conv3_1)
conv_batch_norm = tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, 0.001)
output_conv3_1 = tf.nn.relu(conv_batch_norm, name=scope)
with tf.name_scope('conv3_2') as scope:
kernel = weight_variable([3, 3, 256, 256])
biases = bias_variable([256])
conv3_2 = tf.nn.bias_add(conv2d(output_conv3_1, kernel), biases)
inputs, pop_mean, pop_var, beta, scale = my_batch_norm(conv3_2)
conv_batch_norm = tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, 0.001)
output_conv3_2 = tf.nn.relu(conv_batch_norm, name=scope)
# with tf.name_scope('conv3_3') as scope:
# kernel = weight_variable([3, 3, 256, 256])
# biases = bias_variable([256])
# output_conv3_3 = tf.nn.relu(conv2d(output_conv3_2, kernel) + biases, name=scope)
pool3 = pool_max(output_conv3_2)
'''
# conv4
with tf.name_scope('conv4_1') as scope:
kernel = weight_variable([3, 3, 256, 512])
biases = bias_variable([512])
output_conv4_1 = tf.nn.relu(conv2d(pool3, kernel) + biases, name=scope)
with tf.name_scope('conv4_2') as scope:
kernel = weight_variable([3, 3, 512, 512])
biases = bias_variable([512])
output_conv4_2 = tf.nn.relu(conv2d(output_conv4_1, kernel) + biases, name=scope)
with tf.name_scope('conv4_3') as scope:
kernel = weight_variable([3, 3, 512, 512])
biases = bias_variable([512])
output_conv4_3 = tf.nn.relu(conv2d(output_conv4_2, kernel) + biases, name=scope)
pool4 = pool_max(output_conv4_3)
# conv5
with tf.name_scope('conv5_1') as scope:
kernel = weight_variable([3, 3, 512, 512])
biases = bias_variable([512])
output_conv5_1 = tf.nn.relu(conv2d(pool4, kernel) + biases, name=scope)
with tf.name_scope('conv5_2') as scope:
kernel = weight_variable([3, 3, 512, 512])
biases = bias_variable([512])
output_conv5_2 = tf.nn.relu(conv2d(output_conv5_1, kernel) + biases, name=scope)
with tf.name_scope('conv5_3') as scope:
kernel = weight_variable([3, 3, 512, 512])
biases = bias_variable([512])
output_conv5_3 = tf.nn.relu(conv2d(output_conv5_2, kernel) + biases, name=scope)
pool5 = pool_max(output_conv5_3)
'''
#fc6
with tf.name_scope('fc6') as scope:
shape = int(np.prod(pool3.get_shape()[1:]))
kernel = weight_variable([shape, 120])
#kernel = weight_variable([shape, 4096])
#biases = bias_variable([4096])
biases = bias_variable([120])
pool5_flat = tf.reshape(pool3, [-1, shape])
output_fc6 = tf.nn.relu(fc(pool5_flat, kernel, biases), name=scope)
#fc7
with tf.name_scope('fc7') as scope:
#kernel = weight_variable([4096, 4096])
#biases = bias_variable([4096])
kernel = weight_variable([120, 100])
biases = bias_variable([100])
output_fc7 = tf.nn.relu(fc(output_fc6, kernel, biases), name=scope)
#fc8
with tf.name_scope('fc8') as scope:
#kernel = weight_variable([4096, n_classes])
kernel = weight_variable([100, n_classes])
biases = bias_variable([n_classes])
output_fc8 = tf.nn.relu(fc(output_fc7, kernel, biases), name=scope)
finaloutput = tf.nn.softmax(output_fc8, name="softmax")
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=finaloutput, labels=y))*1000
optimize = tf.train.AdamOptimizer(lr).minimize(cost)
prediction_labels = tf.argmax(finaloutput, axis=1, name="output")
read_labels = tf.argmax(y, axis=1)
correct_prediction = tf.equal(prediction_labels, read_labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
correct_times_in_batch = tf.reduce_sum(tf.cast(correct_prediction, tf.int32))
return dict(
x=x,
y=y,
lr=lr,
optimize=optimize,
correct_prediction=correct_prediction,
correct_times_in_batch=correct_times_in_batch,
cost=cost,
accuracy=accuracy,
)
batch normalization的定義如下,用於conv層的:
def my_batch_norm(inputs):
scale = tf.Variable(tf.ones([inputs.get_shape()[-1]]),dtype=tf.float32)
beta = tf.Variable(tf.zeros([inputs.get_shape()[-1]]),dtype=tf.float32)
batch_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), trainable=False)
batch_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]), trainable=False)
batch_mean, batch_var = tf.nn.moments(inputs,[0,1,2])
return inputs, batch_mean, batch_var, beta, scale
訓練網路,可以從下面程式碼中發現,每epoch_delta彙報一次訓練和校驗的損失值與準確率,每500次儲存模型:
def train_network(graph, batch_size, num_epochs, pb_file_path):
tra_image_batch, tra_label_batch = input_data.read_and_decode2stand(tfrecords_file=tra_data_dir,
batch_size= batch_size)
val_image_batch, val_label_batch = input_data.read_and_decode2stand(tfrecords_file=val_data_dir,
batch_size= batch_size)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
epoch_delta = 100
try:
for epoch_index in range(num_epochs):
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-epoch_index/decay_speed)
tra_images,tra_labels = sess.run([tra_image_batch, tra_label_batch])
accuracy,mean_cost_in_batch,return_correct_times_in_batch,_=sess.run([graph['accuracy'],graph['cost'],graph['correct_times_in_batch'],graph['optimize']], feed_dict={
graph['x']: tra_images,
graph['lr']:learning_rate,
graph['y']: tra_labels
})
if epoch_index % epoch_delta == 0:
# 開始在 train set上計算一下accuracy和cost
print("index[%s]".center(50,'-')%epoch_index)
print("Train: cost_in_batch:{},correct_in_batch:{},accuracy:{}".format(mean_cost_in_batch,return_correct_times_in_batch,accuracy))
# 開始在 test set上計算一下accuracy和cost
val_images, val_labels = sess.run([val_image_batch, val_label_batch])
mean_cost_in_batch,return_correct_times_in_batch = sess.run([graph['cost'],graph['correct_times_in_batch']], feed_dict={
graph['x']: val_images,
graph['y']: val_labels
})
print("***Val: cost_in_batch:{},correct_in_batch:{},accuracy:{}".format(mean_cost_in_batch,return_correct_times_in_batch,return_correct_times_in_batch/batch_size))
if epoch_index % 500 == 0:
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["output"])
with tf.gfile.FastGFile(pb_file_path, mode='wb') as f:
f.write(constant_graph.SerializeToString())
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
sess.close()
注意:
tra_images是4D資料
image_batch: 4D tensor - [batch_size, height, width, channel],
tra_labels是2D資料
label_batch: 2D tensor - [batch_size, n_classes]
input_data.py:
def read_and_decode2stand(tfrecords_file, batch_size):
'''read and decode tfrecord file, generate (image, label) batches
Args:
tfrecords_file: the directory of tfrecord file
batch_size: number of images in each batch
Returns:
image_batch: 4D tensor - [batch_size, height, width, channel]
label_batch: 2D tensor - [batch_size, n_classes]
'''
# make an input queue from the tfrecord file
filename_queue = tf.train.string_input_producer([tfrecords_file])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
img_features = tf.parse_single_example(
serialized_example,
features={
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string),
})
image = tf.decode_raw(img_features['image_raw'], tf.uint8)
image = tf.reshape(image, [H, W,channels])
image = tf.cast(image, tf.float32) * (1.0 /255)
image = tf.image.per_image_standardization(image)#standardization
# all the images of notMNIST are 200*150, you need to change the image size if you use other dataset.
label = tf.cast(img_features['label'], tf.int32)
image_batch, label_batch = tf.train.batch([image, label],
batch_size= batch_size,
num_threads= 64,
capacity = 2000)
#Change to ONE-HOT
label_batch = tf.one_hot(label_batch, depth= n_classes)
label_batch = tf.cast(label_batch, dtype=tf.int32)
label_batch = tf.reshape(label_batch, [batch_size, n_classes])
print(label_batch)
return image_batch, label_batch
給出全部的訓練模型程式碼:
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import math
import tensorflow as tf
from tensorflow.python.framework import graph_util
import input_data
tra_data_dir = 'D://AutoSparePart//Train_Test_TF//train.tfrecords'
val_data_dir = 'D://AutoSparePart//Train_Test_TF//val.tfrecords'
max_learning_rate = 0.002 #0.0002
min_learning_rate = 0.0001
decay_speed = 2000.0
lr = tf.placeholder(tf.float32)
learning_rate = lr
W = 200
H = 150
Channels = 3
n_classes = 2
def my_batch_norm(inputs):
scale = tf.Variable(tf.ones([inputs.get_shape()[-1]]),dtype=tf.float32)
beta = tf.Variable(tf.zeros([inputs.get_shape()[-1]]),dtype=tf.float32)
batch_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), trainable=False)
batch_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]), trainable=False)
batch_mean, batch_var = tf.nn.moments(inputs,[0,1,2])
return inputs, batch_mean, batch_var, beta, scale
def build_network(height, width, channel):
x = tf.placeholder(tf.float32, shape=[None, height, width, channel], name="input")
y = tf.placeholder(tf.int32, shape=[None, n_classes], name="labels_placeholder")
def weight_variable(shape, name="weights"):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def bias_variable(shape, name="biases"):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def conv2d(input, w):
return tf.nn.conv2d(input, w, [1, 1, 1, 1], padding='SAME')
def pool_max(input):
return tf.nn.max_pool(input,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
def fc(input, w, b):
return tf.matmul(input, w) + b
# conv1
with tf.name_scope('conv1_1') as scope:
kernel = weight_variable([3, 3, Channels, 64])
biases = bias_variable([64])
conv1_1 = tf.nn.bias_add(conv2d(x, kernel), biases)
inputs, pop_mean, pop_var, beta, scale = my_batch_norm(conv1_1)
conv_batch_norm = tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, 0.001)
output_conv1_1 = tf.nn.relu(conv_batch_norm, name=scope)
with tf.name_scope('conv1_2') as scope:
kernel = weight_variable([3, 3, 64, 64])
biases = bias_variable([64])
conv1_2 = tf.nn.bias_add(conv2d(output_conv1_1, kernel), biases)
inputs, pop_mean, pop_var, beta, scale = my_batch_norm(conv1_2)
conv_batch_norm = tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, 0.001)
output_conv1_2 = tf.nn.relu(conv_batch_norm, name=scope)
pool1 = pool_max(output_conv1_2)
# conv2
with tf.name_scope('conv2_1') as scope:
kernel = weight_variable([3, 3, 64, 128])
biases = bias_variable([128])
conv2_1 = tf.nn.bias_add(conv2d(pool1, kernel), biases)
inputs, pop_mean, pop_var, beta, scale = my_batch_norm(conv2_1)
conv_batch_norm = tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, 0.001)
output_conv2_1 = tf.nn.relu(conv_batch_norm, name=scope)
with tf.name_scope('conv2_2') as scope:
kernel = weight_variable([3, 3, 128, 128])
biases = bias_variable([128])
conv2_2 = tf.nn.bias_add(conv2d(output_conv2_1, kernel), biases)
inputs, pop_mean, pop_var, beta, scale = my_batch_norm(conv2_2)
conv_batch_norm = tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, 0.001)
output_conv2_2 = tf.nn.relu(conv_batch_norm, name=scope)
pool2 = pool_max(output_conv2_2)
# conv3
with tf.name_scope('conv3_1') as scope:
kernel = weight_variable([3, 3, 128, 256])
biases = bias_variable([256])
conv3_1 = tf.nn.bias_add(conv2d(pool2, kernel), biases)
inputs, pop_mean, pop_var, beta, scale = my_batch_norm(conv3_1)
conv_batch_norm = tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, 0.001)
output_conv3_1 = tf.nn.relu(conv_batch_norm, name=scope)
with tf.name_scope('conv3_2') as scope:
kernel = weight_variable([3, 3, 256, 256])
biases = bias_variable([256])
conv3_2 = tf.nn.bias_add(conv2d(output_conv3_1, kernel), biases)
inputs, pop_mean, pop_var, beta, scale = my_batch_norm(conv3_2)
conv_batch_norm = tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, 0.001)
output_conv3_2 = tf.nn.relu(conv_batch_norm, name=scope)
# with tf.name_scope('conv3_3') as scope:
# kernel = weight_variable([3, 3, 256, 256])
# biases = bias_variable([256])
# output_conv3_3 = tf.nn.relu(conv2d(output_conv3_2, kernel) + biases, name=scope)
pool3 = pool_max(output_conv3_2)
'''
# conv4
with tf.name_scope('conv4_1') as scope:
kernel = weight_variable([3, 3, 256, 512])
biases = bias_variable([512])
output_conv4_1 = tf.nn.relu(conv2d(pool3, kernel) + biases, name=scope)
with tf.name_scope('conv4_2') as scope:
kernel = weight_variable([3, 3, 512, 512])
biases = bias_variable([512])
output_conv4_2 = tf.nn.relu(conv2d(output_conv4_1, kernel) + biases, name=scope)
with tf.name_scope('conv4_3') as scope:
kernel = weight_variable([3, 3, 512, 512])
biases = bias_variable([512])
output_conv4_3 = tf.nn.relu(conv2d(output_conv4_2, kernel) + biases, name=scope)
pool4 = pool_max(output_conv4_3)
# conv5
with tf.name_scope('conv5_1') as scope:
kernel = weight_variable([3, 3, 512, 512])
biases = bias_variable([512])
output_conv5_1 = tf.nn.relu(conv2d(pool4, kernel) + biases, name=scope)
with tf.name_scope('conv5_2') as scope:
kernel = weight_variable([3, 3, 512, 512])
biases = bias_variable([512])
output_conv5_2 = tf.nn.relu(conv2d(output_conv5_1, kernel) + biases, name=scope)
with tf.name_scope('conv5_3') as scope:
kernel = weight_variable([3, 3, 512, 512])
biases = bias_variable([512])
output_conv5_3 = tf.nn.relu(conv2d(output_conv5_2, kernel) + biases, name=scope)
pool5 = pool_max(output_conv5_3)
'''
#fc6
with tf.name_scope('fc6') as scope:
shape = int(np.prod(pool3.get_shape()[1:]))
kernel = weight_variable([shape, 120])
#kernel = weight_variable([shape, 4096])
#biases = bias_variable([4096])
biases = bias_variable([120])
pool5_flat = tf.reshape(pool3, [-1, shape])
output_fc6 = tf.nn.relu(fc(pool5_flat, kernel, biases), name=scope)
#fc7
with tf.name_scope('fc7') as scope:
#kernel = weight_variable([4096, 4096])
#biases = bias_variable([4096])
kernel = weight_variable([120, 100])
biases = bias_variable([100])
output_fc7 = tf.nn.relu(fc(output_fc6, kernel, biases), name=scope)
#fc8
with tf.name_scope('fc8') as scope:
#kernel = weight_variable([4096, n_classes])
kernel = weight_variable([100, n_classes])
biases = bias_variable([n_classes])
output_fc8 = tf.nn.relu(fc(output_fc7, kernel, biases), name=scope)
finaloutput = tf.nn.softmax(output_fc8, name="softmax")
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=finaloutput, labels=y))*1000
optimize = tf.train.AdamOptimizer(lr).minimize(cost)
prediction_labels = tf.argmax(finaloutput, axis=1, name="output")
read_labels = tf.argmax(y, axis=1)
correct_prediction = tf.equal(prediction_labels, read_labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
correct_times_in_batch = tf.reduce_sum(tf.cast(correct_prediction, tf.int32))
return dict(
x=x,
y=y,
lr=lr,
optimize=optimize,
correct_prediction=correct_prediction,
correct_times_in_batch=correct_times_in_batch,
cost=cost,
accuracy=accuracy,
)
def train_network(graph, batch_size, num_epochs, pb_file_path):
tra_image_batch, tra_label_batch = input_data.read_and_decode2stand(tfrecords_file=tra_data_dir,
batch_size= batch_size)
val_image_batch, val_label_batch = input_data.read_and_decode2stand(tfrecords_file=val_data_dir,
batch_size= batch_size)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
epoch_delta = 100
try:
for epoch_index in range(num_epochs):
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-epoch_index/decay_speed)
tra_images,tra_labels = sess.run([tra_image_batch, tra_label_batch])
accuracy,mean_cost_in_batch,return_correct_times_in_batch,_=sess.run([graph['accuracy'],graph['cost'],graph['correct_times_in_batch'],graph['optimize']], feed_dict={
graph['x']: tra_images,
graph['lr']:learning_rate,
graph['y']: tra_labels
})
if epoch_index % epoch_delta == 0:
# 開始在 train set上計算一下accuracy和cost
print("index[%s]".center(50,'-')%epoch_index)
print("Train: cost_in_batch:{},correct_in_batch:{},accuracy:{}".format(mean_cost_in_batch,return_correct_times_in_batch,accuracy))
# 開始在 test set上計算一下accuracy和cost
val_images, val_labels = sess.run([val_image_batch, val_label_batch])
mean_cost_in_batch,return_correct_times_in_batch = sess.run([graph['cost'],graph['correct_times_in_batch']], feed_dict={
graph['x']: val_images,
graph['y']: val_labels
})
print("***Val: cost_in_batch:{},correct_in_batch:{},accuracy:{}".format(mean_cost_in_batch,return_correct_times_in_batch,return_correct_times_in_batch/batch_size))
if epoch_index % 500 == 0:
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["output"])
with tf.gfile.FastGFile(pb_file_path, mode='wb') as f:
f.write(constant_graph.SerializeToString())
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
sess.close()
def main():
batch_size = 40
num_epochs = 5001
pb_file_path = "./output/autosparepart.pb"
g = build_network(height=H, width=W, channel=3)
train_network(g, batch_size, num_epochs, pb_file_path)
main()
給出訓練批次大小batch_size = 40 ,迭代次數 num_epochs = 5001,這裡的epochs更確確的講是step,epochs是完成所有一次的。
訓練結果:
測試
測試環節很簡單,只有幾行的程式碼:
'''
Created on 2017年9月9日
@author: admin
'''
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import PIL.Image as Image
from skimage import transform
W = 200
H = 150
def recognize(jpg_path, pb_file_path):
with tf.Graph().as_default():
output_graph_def = tf.GraphDef()
with open(pb_file_path, "rb") as f:
output_graph_def.ParseFromString(f.read()) #rb
_ = tf.import_graph_def(output_graph_def, name="")
with tf.Session() as sess:
tf.global_variables_initializer().run()
input_x = sess.graph.get_tensor_by_name("input:0")
print (input_x)
out_softmax = sess.graph.get_tensor_by_name("softmax:0")
print (out_softmax)
out_label = sess.graph.get_tensor_by_name("output:0")
print (out_label)
img = np.array(Image.open(jpg_path).convert('L'))
img = transform.resize(img, (H, W, 3))
plt.figure("fig1")
plt.imshow(img)
img = img * (1.0 /255)
img_out_softmax = sess.run(out_softmax, feed_dict={input_x:np.reshape(img, [-1, H, W, 3])})
print ("img_out_softmax:",img_out_softmax)
prediction_labels = np.argmax(img_out_softmax, axis=1)
print ("prediction_labels:",prediction_labels)
plt.show()
recognize("D:/AutoSparePart/ToFinall_Data/0/crop_or_pad010.jpg", "./output/autosparepart.pb")
測試結果:
零件有缺陷:
零件無缺陷: