1. 程式人生 > >tensorflow利用vgg19網路做貓狗識別的遷移學習

tensorflow利用vgg19網路做貓狗識別的遷移學習

這是本人的第一次部落格,寫的不好請見諒哈。博主大二小菜鳥一個,歡迎指教和私信。樓主學習tensorflow也有一段時間了,最近嘗試了一下用vgg19網路做遷移學習。vgg結構如下:

首先下載vgg19網路的mat檔案,然後對資料進行預處理,這裡博主借用了別人的處理方法,處理好的資料,直接送到了我們預先展開的vgg網路中,注意這裡權重是constant,直接把別人訓練好的權重拿來初始化,然後最後改一下全連線層。直接貼程式碼

#遷移學習貓狗識別
import scipy.misc
import scipy.io as scio
import tensorflow as tf
import os
import 
numpy as np import sys def get_files(file_dir): cats = [] label_cats = [] dogs = [] label_dogs = [] for file in os.listdir(file_dir): name = file.split(sep='.') if 'cat' in name[0]: cats.append(file_dir +"\\"+ file) label_cats.append(0) else
: if 'dog' in name[0]: dogs.append(file_dir +"\\"+ file) label_dogs.append(1) image_list = np.hstack((cats, dogs)) label_list = np.hstack((label_cats, label_dogs)) # print('There are %d cats\nThere are %d dogs' %(len(cats), len(dogs))) # 多個種類分別的時候需要把多個種類放在一起,打亂順序,這裡不需要
# 把標籤和圖片都放倒一個 temp 中 然後打亂順序,然後取出來 temp = np.array([image_list, label_list]) temp = temp.transpose() # 打亂順序 np.random.shuffle(temp) # 取出第一個元素作為 image 第二個元素作為 label image_list = list(temp[:, 0]) label_list = list(temp[:, 1]) label_list = [int(i) for i in label_list] return image_list, label_list # 測試 get_files # imgs , label = get_files('/Users/yangyibo/GitWork/pythonLean/AI/貓狗識別/testImg/') # for i in imgs: # print("img:",i) # for i in label: # print('label:',i) # 測試 get_files end # image_W ,image_H 指定圖片大小,batch_size 每批讀取的個數 ,capacity佇列中 最多容納元素的個數 def get_batch(image, label, image_W, image_H, batch_size, capacity): # 轉換資料為 ts 能識別的格式 image = tf.cast(image, tf.string) label = tf.cast(label, tf.int32) # 將image 和 label 放倒佇列裡 input_queue = tf.train.slice_input_producer([image, label]) label = input_queue[1] # 讀取圖片的全部資訊 image_contents = tf.read_file(input_queue[0]) # 把圖片解碼,channels =3 為彩色圖片, r,g ,b 黑白圖片為 1 ,也可以理解為圖片的厚度 image = tf.image.decode_jpeg(image_contents, channels=3) # 將圖片以圖片中心進行裁剪或者擴充為 指定的image_W,image_H image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H) # 對資料進行標準化,標準化,就是減去它的均值,除以他的方差 image = tf.image.per_image_standardization(image) # 生成批次 num_threads 有多少個執行緒根據電腦配置設定 capacity 佇列中 最多容納圖片的個數 tf.train.shuffle_batch 打亂順序, image_batch, label_batch = tf.train.batch([image, label], batch_size=batch_size, num_threads=64, capacity=capacity) # 重新定義下 label_batch 的形狀 label_batch = tf.reshape(label_batch, [batch_size]) # 轉化圖片 image_batch = tf.cast(image_batch, tf.float32) return image_batch, label_batch def _conv_layer(input,weights,bias): conv=tf.nn.conv2d(input,tf.constant(weights),strides=[1,1,1,1],padding="SAME") return tf.nn.bias_add(conv,bias) def _pool_layer(input): return tf.nn.max_pool(input,ksize=(1,2,2,1),strides=(1,2,2,1,),padding="SAME") def net(data_path,input_image): layers=('conv1_1','relu1_1','conv1_2','relu1_2','pool1', 'conv2_1','relu2_1','conv2_2','relu2_2','pool2', 'conv3_1','relu3_1','conv3_2','relu3_2','conv3_3','relu3_3','conv3_4','relu3_4','pool3', 'conv4_1','relu4_1','conv4_2','relu4_2','conv4_3','relu4_3','conv4_4','relu4_4','pool4', 'conv5_1', 'relu5_1','conv5_2','relu5_2','conv5_3','relu5_3','conv5_4','relu5_4' ) data=scio.loadmat(data_path) mean=data['normalization'][0][0][0] mean_pixel=np.mean(mean,axis=(0,1)) weights=data['layers'][0] net={} current=input_image for i,name in enumerate(layers): kind=name[:4] if kind=='conv': kernels,bias=weights[i][0][0][0][0] kernels=np.transpose(kernels,[1,0,2,3]) bias=bias.reshape(-1) current=_conv_layer(current,kernels,bias) elif kind=='relu': current=tf.nn.relu(current) elif kind=="pool": current=_pool_layer(current) net[name]=current assert len(net)==len(layers) return net,mean_pixel,layers VGG_PATH="D:\\imagenet-vgg-verydeep-19.mat" train_dir = 'E:\\BaiduNetdiskDownload\\Dogs vs Cats Redux Kernels Edition\\aaa' # My dir--20170727-csq # 獲取圖片和標籤集 train, train_label = get_files(train_dir) # 生成批次 train_batch, train_label_batch =get_batch(train,train_label,224,224,32,256) # 進入模型 nets,mean_pixel,all_layers=net(VGG_PATH,train_batch) with tf.variable_scope("dense1"): image=tf.reshape(nets["relu5_4"],[32,-1]) weights=tf.Variable(tf.random_normal(shape=[14*14*512,1024],stddev=0.1)) bias=tf.Variable(tf.zeros(shape=[1024])+0.1) dense1=tf.nn.tanh(tf.matmul(image,weights)+bias) with tf.variable_scope("out"): weights=tf.Variable(tf.random_normal(shape=[1024,2],stddev=0.1)) bias=tf.Variable(tf.zeros(shape=[2])+0.1) out=tf.matmul(dense1,weights)+bias loss=tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=out,labels=train_label_batch)) op=tf.train.AdamOptimizer(0.0001).minimize(loss) correct = tf.nn.in_top_k(out,train_label_batch, 1) correct = tf.cast(correct, tf.float16) accuracy = tf.reduce_mean(correct) init=tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: for step in np.arange(100): if coord.should_stop(): print("結束") sys.exit(0) _, tra_loss, tra_acc = sess.run([op, loss, accuracy]) if step % 1 == 0: print("step",step,"loss",tra_loss,"acc",tra_acc) except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()