tensorflow將訓練好的模型freeze,即將權重固化到圖裡面,並使用該模型進行預測(tf.graph_util.convert_variables_to_constants函式)
阿新 • • 發佈:2019-02-17
我們很多時候需要儲存tensorflow模型的pb檔案,這時用tf.graph_util.convert_variables_to_constants函式會非常方便。
1.訓練網路:fully_conected.py
import argparse
import os
import time
import tensorflow as tf
import datasets_mnist
# Basic model parameters as external flags.
FLAGS = None
NUM_CLASSES = 10
# The MNIST images are always 28x28 pixels.
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
def placeholder_inputs(batch_size):
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
return images_placeholder, labels_placeholder
def fill_feed_dict (data_set, images_pl, labels_pl):
images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,FLAGS.fake_data)
feed_dict_value = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict_value
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME' )
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def inference(images):
"""Build the MNIST model up to where it may be used for inference.
Args:
images: Images placeholder, from inputs().
hidden1_units: Size of the first hidden layer.
hidden2_units: Size of the second hidden layer.
Returns:
softmax_linear: Output tensor with the computed logits.
"""
W_conv1 = tf.Variable(tf.truncated_normal([5, 5, 1, 32],stddev=0.1))
b_conv1 = tf.Variable(0.0,[32])
x_image = tf.reshape(images, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64],stddev=0.1))
b_conv2 = tf.Variable(0.0,[64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024],stddev=0.1))
b_fc1 = tf.Variable(0.0,[1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, 0.5)
W_fc2 = tf.Variable(tf.truncated_normal([1024, 10],stddev=0.1))
b_fc2 = tf.Variable(0.0,[10])
logits=tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return logits
def loss(logits, labels):
labels = tf.to_int64(labels)
# labels = tf.to_float(labels)
# labels= tf.one_hot(labels, 10)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=labels, name='xentropy')
# y_conv = tf.nn.softmax(logits)
# cross_entropy = -tf.reduce_sum(labels*tf.log(y_conv))
return tf.reduce_mean(cross_entropy, name='xentropy_mean')
def training(loss):
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.minimize(loss)
return train_op
def evaluation(logits, labels):
labels1=tf.one_hot(labels,10)
correct = tf.nn.in_top_k(logits, labels, 1)
correct1 = tf.equal(tf.argmax(logits,1), tf.argmax(labels1,1))
return tf.reduce_mean(tf.cast(correct, tf.float32)) ,tf.reduce_mean(tf.cast(correct1, tf.float32))
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_set):
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in range(steps_per_epoch):
feed_dict_value = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict_value)
precision = float(true_count) / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
def run_training(logits,labels_placeholder):
if tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir)
loss_value= loss(logits, labels_placeholder)
tf.summary.scalar('loss', loss_value)
train_op = training(loss_value)
eval_correct,eval_correct1 = evaluation(logits, labels_placeholder)
tf.summary.scalar('precision', eval_correct)
summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
sess = tf.Session()
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
sess.run(init)
for step in range(FLAGS.max_steps):
start_time = time.time()
feed_dict_value = fill_feed_dict(train,
images_placeholder,
labels_placeholder)
_, loss_value1,eval_correct_value,eval_correct_value1 = sess.run([train_op, loss_value,eval_correct,eval_correct],feed_dict=feed_dict_value)
duration = time.time() - start_time
if step % 100 == 0:
print('Step %d: loss = %.2f (%.3f sec),precision=%.3f,%.3f' % (step, loss_value1, duration,eval_correct_value,eval_correct_value1))
summary_str = sess.run(summary, feed_dict=feed_dict_value)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
saver.save(sess, checkpoint_file, global_step=step)
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
train)
# Evaluate against the validation set.
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
validation)
# Evaluate against the test set.
print('Test Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
test)
def run_testing():
sess=tf.Session()
saver.restore(sess, tf.train.latest_checkpoint('ckpt'))
feed_dict_value=fill_feed_dict(test,images_placeholder,labels_placeholder)
a,accuracy=evaluation(logits,labels_placeholder)
accuracy_=sess.run(a,feed_dict=feed_dict_value)
print('accuracy is %f'%accuracy_)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--max_steps',
type=int,
default=500,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--input_data_dir',
type=str,
default=os.path.join('datasets'),
help='Directory to put the input data.'
)
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join('log'),
help='Directory to put the log data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
parser.add_argument(
'--train',
type=bool ,default=True
)
parser.add_argument(
'--test',type=bool,default=True)
FLAGS, unparsed = parser.parse_known_args()
checkpoint_file = os.path.join('log', 'model.ckpt')
train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(
FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = inference(images_placeholder)
saver = tf.train.Saver()
if FLAGS.train:
run_training(logits,labels_placeholder)
# exit('Training finished')
# run_testing()
2.匯出pb檔案:export.py
import fully_conected as model
import tensorflow as tf
def export_graph(model_name):
graph = tf.Graph()
with graph.as_default():
input_image = tf.placeholder(tf.float32, shape=[None,28*28], name='inputdata')
logits = model.inference(input_image)
y_conv = tf.nn.softmax(logits,name='outputdata')
restore_saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
latest_ckpt = tf.train.latest_checkpoint('log')
restore_saver.restore(sess, latest_ckpt)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), ['outputdata'])
# tf.train.write_graph(output_graph_def, 'log', model_name, as_text=False)
with tf.gfile.GFile('log/mnist.pb', "wb") as f:
f.write(output_graph_def.SerializeToString())
export_graph('mnist.pb')
3.測試:test.py
from __future__ import absolute_import, unicode_literals
from datasets_mnist import read_data_sets
import tensorflow as tf
train,validation,test = read_data_sets("datasets/", one_hot=True)
with tf.Graph().as_default():
output_graph_def = tf.GraphDef()
output_graph_path = 'log/mnist.pb'
# sess.graph.add_to_collection("input", mnist.test.images)
with open(output_graph_path, "rb") as f:
output_graph_def.ParseFromString(f.read())
tf.import_graph_def(output_graph_def, name="")
with tf.Session() as sess:
tf.initialize_all_variables().run()
input_x = sess.graph.get_tensor_by_name("inputdata:0")
output = sess.graph.get_tensor_by_name("outputdata:0")
y_conv_2 = sess.run(output,{input_x:test.images})
print( "y_conv_2", y_conv_2)
# Test trained model
#y__2 = tf.placeholder("float", [None, 10])
y__2 = test.labels
correct_prediction_2 = tf.equal(tf.argmax(y_conv_2, 1), tf.argmax(y__2, 1))
print ("correct_prediction_2", correct_prediction_2 )
accuracy_2 = tf.reduce_mean(tf.cast(correct_prediction_2, "float"))
print ("accuracy_2", accuracy_2)
print ("check accuracy %g" % accuracy_2.eval())
4.這裡用的資料是mnist資料,程式碼是:datasets_mnist.py
import gzip
import os
import numpy
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SOURCE_URL = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(f):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth].
Args:
f: A file object that can be passed into a gzip reader.
Returns:
data: A 4D uint8 numpy array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %
(magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
#
def extract_labels(f, one_hot=False, num_classes=10):
"""Extract the labels into a 1D uint8 numpy array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D uint8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' %
(magic, f.name))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels, num_classes)
return labels
#
class DataSet(object):
def __init__(self,
images,
labels,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
seed=None):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`. Seed arg provides for convenient deterministic testing.
"""
seed1, seed2 = random_seed.get_seed(seed)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seed1 if seed is None else seed2)
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False, shuffle=True):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)
]
start = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = numpy.arange(self._num_examples)
numpy.random.shuffle(perm0)
self._images = self.images[perm0]
self._labels = self.labels[perm0]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
rest_num_examples = self._num_examples - start
images_rest_part = self._images[start:self._num_examples]
labels_rest_part = self._labels[start:self._num_examples]
# Shuffle the data
if shuffle:
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self.images[perm]
self._labels = self.labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
images_new_part = self._images[start:end]
labels_new_part = self._labels[start:end]
return numpy.concatenate((images_rest_part, images_new_part), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
validation_size=5000,
seed=None):
if fake_data:
def fake():
return DataSet(
[], [], fake_data=True, one_hot=one_hot, dtype=dtype, seed=seed)
train = fake()
validation = fake()
test = fake()
return base.Datasets(train=train, validation=validation, test=test)
# TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
# TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
# TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
# TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
local_file = os.path.join('datasets','train-images-idx3-ubyte.gz')
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = os.path.join('datasets','train-labels-idx1-ubyte.gz')
with open(local_file, 'rb') as f:
train_labels = extract_labels(f, one_hot=one_hot)
local_file = os.path.join('datasets','t10k-images-idx3-ubyte.gz')
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file =os.path.join('datasets','t10k-labels-idx1-ubyte.gz')
with open(local_file, 'rb') as f:
test_labels = extract_labels(f, one_hot=one_hot)
if not 0 <= validation_size <= len(train_images):
raise ValueError(
'Validation size should be between 0 and {}. Received: {}.'
.format(len(train_images), validation_size))
validation_images = train_images[:validation_size]
validation_labels = train_labels[:validation_size]
train_images = train_images[validation_size:]
train_labels = train_labels[validation_size:]
train = DataSet(
train_images, train_labels, dtype=dtype, reshape=reshape, seed=seed)
validation = DataSet(
validation_images,
validation_labels,
dtype=dtype,
reshape=reshape,
seed=seed)
test = DataSet(
test_images, test_labels, dtype=dtype, reshape=reshape, seed=seed)
return train,validation,test