1. 程式人生 > >用pytorch實現預訓練網路的finetune

用pytorch實現預訓練網路的finetune

繼續熟悉pytorch,發現的確比TensorFlow好用,在官網finetune教程的基礎上進行了大幅修改,主要是熟悉了pytorch自帶的Dataset和DataLoader類。

# -*- coding: utf-8 -*-

import os, torch, glob, time, copy
import numpy as np
from torch.autograd import Variable
from PIL import Image  
from torchvision import models, transforms
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torch.optim as optim
from torch.optim import lr_scheduler

train_data_dir = './hymenoptera_data/train'
test_data_dir = './hymenoptera_data/val'

def get_lists(data_dir):        
    files_list = []
    labels_list = []
    sub_dirs = [x[0] for x in os.walk(data_dir) ]
    sub_dirs = sub_dirs[1:]
    
    extentions = ['jpg', 'jpeg', 'JPG', 'JPEG']
        
    for sub_dir in sub_dirs:
        if os.path.basename(sub_dir) == 'ants':
            label = [0]
        elif os.path.basename(sub_dir) == 'bees':
            label = [1] 
            
        for extention in extentions:
            file_glob = os.path.join(sub_dir, '*.' + extention)
            new_files = glob.glob(file_glob)
            files_list.extend(new_files)
            labels_list = labels_list + label * len(new_files)
    return files_list, labels_list

class myDataset(Dataset):
    # TensorDataset繼承Dataset, 過載了__init__, __getitem__, __len__
    # 實現將一組Tensor資料對封裝成Tensor資料集
    # 能夠通過index得到資料集的資料,能夠通過len,得到資料集大小

    def __init__(self, files_list, labels_list, transform):
        self.files_list = files_list
        self.labels_list = labels_list
        self.transform = transform

    def __getitem__(self, index):
        img = Image.open(self.files_list[index])
        img = self.transform(img)
        return img, self.labels_list[index]

    def __len__(self):
        return len(self.labels_list)
    
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
    since1 = time.time()

    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0

    for epoch in range(num_epochs):
        since2 = time.time()
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        # Each epoch has a training and validation phase
        for phase in ['train', 'val']:
            if phase == 'train':
                scheduler.step()
                model.train(True)  # Set model to training mode
            else:
                model.train(False)  # Set model to evaluate mode

            running_loss = 0.0
            running_corrects = 0

            # Iterate over data.
            for data in (dataloaders[phase]):
                # get the inputs
                inputs, labels = data

                # wrap them in Variable
                if use_gpu:
                    inputs = Variable(inputs.cuda())
                    labels = Variable(labels.cuda())
                else:
                    inputs, labels = Variable(inputs), Variable(labels)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                outputs = model(inputs)
                _, preds = torch.max(outputs.data, 1)
                loss = criterion(outputs, labels)

                # backward + optimize only if in training phase
                if phase == 'train':
                    loss.backward()
                    optimizer.step()

                # statistics
                running_loss += loss.data[0] * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)

            epoch_loss = running_loss / dataset_sizes[phase]
            epoch_acc = running_corrects / dataset_sizes[phase]

            print('{} Loss: {:.4f} Acc: {:.4f}'.format(
                phase, epoch_loss, epoch_acc))

            

            # deep copy the model
            if phase == 'val' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())
                
        print(time.time() - since2)


    time_elapsed = time.time() - since1
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # load best model weights
    model.load_state_dict(best_model_wts)
    return model

if __name__ == '__main__':
    BATCH_SIZE = 4
    use_gpu = torch.cuda.is_available()

    transform = transforms.Compose([
            transforms.Scale(256),
            transforms.CenterCrop(224),
            transforms.ToTensor()    ]
    )
    train_images, train_labels = get_lists(train_data_dir)
    test_images, test_labels = get_lists(test_data_dir)

    train_dataset = myDataset(train_images, train_labels, transform)
    test_dataset = myDataset(test_images, test_labels, transform)
    train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE,  
                               shuffle=True, num_workers=0)  
    test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE,  
                               shuffle=True, num_workers=0)      
    dataloaders = {}
    dataloaders['train'] = train_loader
    dataloaders['val'] = test_loader
    
    dataset_sizes = {}
    dataset_sizes['train'] = len(train_labels)
    dataset_sizes['val'] = len(test_labels)
    
    model_ft = models.resnet18(pretrained=True)
    num_ftrs = model_ft.fc.in_features
    model_ft.fc = nn.Linear(num_ftrs, 2)
    
    if use_gpu:
        model_ft = model_ft.cuda()
    
    criterion = nn.CrossEntropyLoss()
    
    # Observe that all parameters are being optimized
    optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
    
    # Decay LR by a factor of 0.1 every 7 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
    model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
                           num_epochs=25)
#    for (batch_x, batch_y) in enumerate(train_loader):
#        print(batch_x, batch_y)