使用VGG模型遷移學習進行貓狗大戰
阿新 • • 發佈:2020-08-01
import numpy as np import matplotlib.pyplot as plt import os import torch import torch.nn as nn import torchvision from torchvision import models,transforms,datasets import time import json # 判斷是否存在GPU裝置 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print('Using gpu: %s ' % torch.cuda.is_available()) #1下載資料 #! wget http://fenggao-image.stor.sinaapp.com/dogscats.zip #! unzip dogscats.zip #2資料處理 normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) vgg_format = transforms.Compose([ transforms.CenterCrop(224), transforms.ToTensor(), normalize, ]) data_dir = './dogscats' dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), vgg_format) for x in ['train', 'valid']} dset_sizes = {x: len(dsets[x]) for x in ['train', 'valid']} dset_classes = dsets['train'].classes # 通過下面程式碼可以檢視 dsets 的一些屬性 print(dsets['train'].classes) print(dsets['train'].class_to_idx) print(dsets['train'].imgs[:5]) print('dset_sizes: ', dset_sizes) loader_train = torch.utils.data.DataLoader(dsets['train'], batch_size=64, shuffle=True, num_workers=6) loader_valid = torch.utils.data.DataLoader(dsets['valid'], batch_size=5, shuffle=False, num_workers=6) #valid 資料一共有2000張圖,每個batch是5張,因此,下面進行遍歷一共會輸出到 400 同時,把第一個 batch 儲存到 inputs_try, labels_try,分別檢視 count = 1 for data in loader_valid: print(count, end='\n') if count == 1: inputs_try,labels_try = data count +=1 print(labels_try) print(inputs_try.shape) # 顯示圖片的小程式 def imshow(inp, title=None): # Imshow for Tensor. inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = np.clip(std * inp + mean, 0,1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated # 顯示 labels_try 的5張圖片,即valid裡第一個batch的5張圖片 out = torchvision.utils.make_grid(inputs_try) imshow(out, title=[dset_classes[x] for x in labels_try]) #3建立VGG Model !wget https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json model_vgg = models.vgg16(pretrained=True) with open('./imagenet_class_index.json') as f: class_dict = json.load(f) dic_imagenet = [class_dict[str(i)][1] for i in range(len(class_dict))] inputs_try , labels_try = inputs_try.to(device), labels_try.to(device) model_vgg = model_vgg.to(device) outputs_try = model_vgg(inputs_try) print(outputs_try) print(outputs_try.shape) #為了將VGG網路輸出的結果轉化為對每一類的預測概率,我們把結果輸入到 Softmax 函式 m_softm = nn.Softmax(dim=1) probs = m_softm(outputs_try) vals_try,pred_try = torch.max(probs,dim=1) print( 'prob sum: ', torch.sum(probs,1)) print( 'vals_try: ', vals_try) print( 'pred_try: ', pred_try) print([dic_imagenet[i] for i in pred_try.data]) imshow(torchvision.utils.make_grid(inputs_try.data.cpu()), title=[dset_classes[x] for x in labels_try.data.cpu()]) #4修改最後一層 print(model_vgg) model_vgg_new = model_vgg; for param in model_vgg_new.parameters(): param.requires_grad = False model_vgg_new.classifier._modules['6'] = nn.Linear(4096, 2) model_vgg_new.classifier._modules['7'] = torch.nn.LogSoftmax(dim = 1) model_vgg_new = model_vgg_new.to(device) print(model_vgg_new.classifier) #5訓練並測試 #第一步:建立損失函式和優化器 criterion = nn.NLLLoss() # 學習率 lr = 0.001 # 隨機梯度下降 optimizer_vgg = torch.optim.SGD(model_vgg_new.classifier[6].parameters(),lr = lr) #第二步:訓練模型 def train_model(model,dataloader,size,epochs=1,optimizer=None): model.train() for epoch in range(epochs): running_loss = 0.0 running_corrects = 0 count = 0 for inputs,classes in dataloader: inputs = inputs.to(device) classes = classes.to(device) outputs = model(inputs) loss = criterion(outputs,classes) optimizer = optimizer optimizer.zero_grad() loss.backward() optimizer.step() _,preds = torch.max(outputs.data,1) # statistics running_loss += loss.data.item() running_corrects += torch.sum(preds == classes.data) count += len(inputs) print('Training: No. ', count, ' process ... total: ', size) epoch_loss = running_loss / size epoch_acc = running_corrects.data.item() / size print('Loss: {:.4f} Acc: {:.4f}'.format( epoch_loss, epoch_acc)) # 模型訓練 train_model(model_vgg_new,loader_train,size=dset_sizes['train'], epochs=1, optimizer=optimizer_vgg) def test_model(model,dataloader,size): model.eval() predictions = np.zeros(size) all_classes = np.zeros(size) all_proba = np.zeros((size,2)) i = 0 running_loss = 0.0 running_corrects = 0 for inputs,classes in dataloader: inputs = inputs.to(device) classes = classes.to(device) outputs = model(inputs) loss = criterion(outputs,classes) _,preds = torch.max(outputs.data,1) # statistics running_loss += loss.data.item() running_corrects += torch.sum(preds == classes.data) predictions[i:i+len(classes)] = preds.to('cpu').numpy() all_classes[i:i+len(classes)] = classes.to('cpu').numpy() all_proba[i:i+len(classes),:] = outputs.data.to('cpu').numpy() i += len(classes) print('Testing: No. ', i, ' process ... total: ', size) epoch_loss = running_loss / size epoch_acc = running_corrects.data.item() / size print('Loss: {:.4f} Acc: {:.4f}'.format( epoch_loss, epoch_acc)) return predictions, all_proba, all_classes predictions, all_proba, all_classes = test_model(model_vgg_new,loader_valid,size=dset_sizes['valid']) #6視覺化 # 單次視覺化顯示的圖片個數 n_view = 8 correct = np.where(predictions==all_classes)[0] from numpy.random import random, permutation idx = permutation(correct)[:n_view] print('random correct idx: ', idx) loader_correct = torch.utils.data.DataLoader([dsets['valid'][x] for x in idx], batch_size = n_view,shuffle=True) for data in loader_correct: inputs_cor,labels_cor = data # Make a grid from batch out = torchvision.utils.make_grid(inputs_cor) imshow(out, title=[l.item() for l in labels_cor])