1. 程式人生 > 實用技巧 >AI藝術鑑賞挑戰賽 - 看畫猜作者程式碼解讀

AI藝術鑑賞挑戰賽 - 看畫猜作者程式碼解讀

AI藝術鑑賞挑戰賽

AI藝術鑑賞-3rd-solution form 今天沒吃飯

基本框架:基於Resnext50eff-b3訓練影象尺寸448,512,600的模型,取得分最高的4組結果進行投票

  1. data_count.py

    劃分資料集,30張以下的資料沒有被劃分驗證集

    class_cnt:

    lable
    idx
    idx

    with open() as讀寫檔案

  2. dataload.py

    size:448/512/600

    torchvision是pytorch的一個圖形庫,它服務於PyTorch深度學習框架的,主要用來構建計算機視覺模型。torchvision.transforms主要是用於常見的一些圖形變換。以下是torchvision

    的構成:

    1. torchvision.datasets: 一些載入資料的函式及常用的資料集介面;
    2. torchvision.models: 包含常用的模型結構(含預訓練模型),例如AlexNet、VGG、ResNet等;
    3. torchvision.transforms: 常用的圖片變換,例如裁剪、旋轉等;
    4. torchvision.utils: 其他的一些有用的方法。

    torchvision.transforms.Compose()的主要作用是串聯多個圖片變換的操作,進行資料增強

    trans = {
            'train':
                transforms.Compose([
                    #以0.5的概率水平翻轉給定的PIL影象
                    transforms.RandomHorizontalFlip(),
                    transforms.Resize((int(size / 0.875), int(size / 0.875))),
                    #在一個隨機的位置進行裁剪
                    transforms.RandomCrop((size, size)),
                    #Convert a PIL Image or numpy.ndarray to tensor
                    transforms.ToTensor(),
                    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
                    #隨機選擇影象中的矩形區域並刪除其畫素
                    transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3))
                ]),
            'val':
                transforms.Compose([
                    transforms.Resize((int(size / 0.875), int(size / 0.875))),
                    transforms.CenterCrop((size, size)),
                    transforms.ToTensor(),
                    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
                ])
            }
    
  3. train.py

    資料匯入,進行訓練,記錄最好結果。

    (程式碼跑到這裡colab報錯untimeError: CUDA out of memory. )

    from torch.utils.data import DataLoader
    from ArtModel import BaseModel
    import time
    import numpy as np
    import random
    from torch.optim import lr_scheduler
    from torch.backends import cudnn
    import argparse
    import os
    import torch
    import torch.nn as nn
    from dataload import Dataset
    
    #引數
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name', default='resnext50', type=str)
    parser.add_argument('--savepath', default='./Art/', type=str)
    parser.add_argument('--loss', default='ce', type=str)
    parser.add_argument('--num_classes', default=49, type=int)
    parser.add_argument('--pool_type', default='avg', type=str)
    parser.add_argument('--metric', default='linear', type=str)
    parser.add_argument('--down', default=0, type=int)
    parser.add_argument('--lr', default=0.01, type=float)
    parser.add_argument('--weight_decay', default=5e-4, type=float)
    parser.add_argument('--momentum', default=0.9, type=float)
    parser.add_argument('--scheduler', default='cos', type=str)
    parser.add_argument('--resume', default=None, type=str)
    parser.add_argument('--lr_step', default=25, type=int)
    parser.add_argument('--lr_gamma', default=0.1, type=float)
    parser.add_argument('--total_epoch', default=60, type=int)
    parser.add_argument('--batch_size', default=32, type=int)
    parser.add_argument('--num_workers', default=8, type=int)
    parser.add_argument('--multi-gpus', default=0, type=int)
    parser.add_argument('--gpu', default=0, type=int)
    parser.add_argument('--seed', default=2020, type=int)
    parser.add_argument('--pretrained', default=1, type=int)
    parser.add_argument('--gray', default=0, type=int)
    
    args = parser.parse_args()
    
    
    def train():
        model.train()
    
        epoch_loss = 0
        correct = 0.
        total = 0.
        t1 = time.time()
        for idx, (data, labels) in enumerate(trainloader):
            data, labels = data.to(device), labels.long().to(device)
    
            out, se, feat_flat = model(data)
           
            loss = criterion(out, labels)
            optimizer.zero_grad()
            
            loss.backward()
            optimizer.step()
    
            epoch_loss += loss.item() * data.size(0)
            total += data.size(0)
            _, pred = torch.max(out, 1)
            correct += pred.eq(labels).sum().item()
    
        acc = correct / total
        loss = epoch_loss / total
    
        print(f'loss:{loss:.4f} acc@1:{acc:.4f} time:{time.time() - t1:.2f}s', end=' --> ')
        
        with open(os.path.join(savepath, 'log.txt'), 'a+')as f:
            f.write('loss:{:.4f}, acc:{:.4f} ->'.format(loss, acc))
        
        return {'loss': loss, 'acc': acc}
    
    
    def test(epoch):
        model.eval()
    
        epoch_loss = 0
        correct = 0.
        total = 0.
        with torch.no_grad():
            for idx, (data, labels) in enumerate(valloader):
                data, labels = data.to(device), labels.long().to(device)
                
                out = model(data)
    
                loss = criterion(out, labels)
    
                epoch_loss += loss.item() * data.size(0)
                total += data.size(0)
                _, pred = torch.max(out, 1)
                correct += pred.eq(labels).sum().item()
    
            acc = correct / total
            loss = epoch_loss / total
    
            print(f'test loss:{loss:.4f} acc@1:{acc:.4f}', end=' ')
    
        global best_acc, best_epoch
    
        state = {
            'net': model.state_dict(),
            'acc': acc,
            'epoch': epoch
        }
    
        if acc > best_acc:
            best_acc = acc
            best_epoch = epoch
    
            torch.save(state, os.path.join(savepath, 'best.pth'))
            print('*')
        else:
            print()
    
        torch.save(state, os.path.join(savepath, 'last.pth'))
    
    
        with open(os.path.join(savepath, 'log.txt'), 'a+')as f:
            f.write('epoch:{}, loss:{:.4f}, acc:{:.4f}\n'.format(epoch, loss, acc))
    
        return {'loss': loss, 'acc': acc}
    
    
    def plot(d, mode='train', best_acc_=None):
        import matplotlib.pyplot as plt
        plt.figure(figsize=(10, 4))
        plt.suptitle('%s_curve' % mode)
        plt.subplots_adjust(wspace=0.2, hspace=0.2)
        epochs = len(d['acc'])
    
        plt.subplot(1, 2, 1)
        plt.plot(np.arange(epochs), d['loss'], label='loss')
        plt.xlabel('epoch')
        plt.ylabel('loss')
        plt.legend(loc='upper left')
    
        plt.subplot(1, 2, 2)
        plt.plot(np.arange(epochs), d['acc'], label='acc')
        if best_acc_ is not None:
            plt.scatter(best_acc_[0], best_acc_[1], c='r')
        plt.xlabel('epoch')
        plt.ylabel('acc')
        plt.legend(loc='upper left')
    
        plt.savefig(os.path.join(savepath, '%s.jpg' % mode), bbox_inches='tight')
        plt.close()
    
    
    if __name__ == '__main__':
        best_epoch = 0
        best_acc = 0.
        use_gpu = False
    
        if args.seed is not None:
            print('use random seed:', args.seed)
            torch.manual_seed(args.seed)
            torch.cuda.manual_seed(args.seed)
            torch.cuda.manual_seed_all(args.seed)
            np.random.seed(args.seed)
            random.seed(args.seed)
            cudnn.deterministic = False
    
        if torch.cuda.is_available():
            use_gpu = True
            cudnn.benchmark = True
    
        # loss交叉熵損失
        criterion = nn.CrossEntropyLoss()
        # dataloader
        trainset = Dataset(mode='train')
        valset = Dataset(mode='val')
        
        trainloader = DataLoader(dataset=trainset, batch_size=args.batch_size, shuffle=True, \
                                 num_workers=args.num_workers, pin_memory=True, drop_last=True)
        
        valloader = DataLoader(dataset=valset, batch_size=128, shuffle=False, num_workers=args.num_workers, \
                               pin_memory=True)
    
        # model
        model = BaseModel(model_name=args.model_name, num_classes=args.num_classes, pretrained=args.pretrained, pool_type=args.pool_type, down=args.down, metric=args.metric)
        if args.resume:
            state = torch.load(args.resume)
            print('best_epoch:{}, best_acc:{}'.format(state['epoch'], state['acc']))
            model.load_state_dict(state['net'])
    
        if torch.cuda.device_count() > 1 and args.multi_gpus:
            print('use multi-gpus...')
            os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
            device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
            torch.distributed.init_process_group(backend="nccl", init_method='tcp://localhost:23456', rank=0, world_size=1)
            model = model.to(device)
            model = nn.parallel.DistributedDataParallel(model)
        else:
            device = ('cuda:%d'%args.gpu if torch.cuda.is_available() else 'cpu')
            model = model.to(device)
        print('device:', device)
    
        # optim優化
        optimizer = torch.optim.SGD(
                [{'params': filter(lambda p: p.requires_grad, model.parameters()), 'lr': args.lr}],
                weight_decay=args.weight_decay, momentum=args.momentum)
    
        print('init_lr={}, weight_decay={}, momentum={}'.format(args.lr, args.weight_decay, args.momentum))
    
        if args.scheduler == 'step':
    		#動態調整學習率
            scheduler = lr_scheduler.StepLR(optimizer, step_size=args.lr_step, gamma=args.lr_gamma, last_epoch=-1)
        elif args.scheduler == 'multi':
            scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[150, 225], gamma=args.lr_gamma, last_epoch=-1)
        elif args.scheduler == 'cos':
            warm_up_step = 10
            lambda_ = lambda epoch: (epoch + 1) / warm_up_step if epoch < warm_up_step else 0.5 * (
                        np.cos((epoch - warm_up_step) / (args.total_epoch - warm_up_step) * np.pi) + 1)
            scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda_)
        
        # savepath
        savepath = os.path.join(args.savepath, args.model_name+args.pool_type+args.metric+'_'+str(args.down))
    
        print('savepath:', savepath)
    
        if not os.path.exists(savepath):
            os.makedirs(savepath)
    
        with open(os.path.join(savepath, 'setting.txt'), 'w')as f:
            for k, v in vars(args).items():
                f.write('{}:{}\n'.format(k, v))
    
        f = open(os.path.join(savepath, 'log.txt'), 'w')
        f.close()
    
        total = args.total_epoch
        start = time.time()
    
        train_info = {'loss': [], 'acc': []}
        test_info = {'loss': [], 'acc': []}
    
        for epoch in range(total):
            print('epoch[{:>3}/{:>3}]'.format(epoch, total), end=' ')
            d_train = train()
            scheduler.step()
            d_test = test(epoch)
    
            for k in train_info.keys():
                train_info[k].append(d_train[k])
                test_info[k].append(d_test[k])
    
            plot(train_info, mode='train')
            plot(test_info, mode='test', best_acc_=[best_epoch, best_acc])
    
        end = time.time()
        print('total time:{}m{:.2f}s'.format((end - start) // 60, (end - start) % 60))
        print('best_epoch:', best_epoch)
        print('best_acc:', best_acc)
    	#記錄最好結果
        with open(os.path.join(savepath, 'log.txt'), 'a+')as f:
            f.write('# best_acc:{:.4f}, best_epoch:{}'.format(best_acc, best_epoch))
    
    
  4. test.py

    進行預測並記錄預測結果

    import torch
    from ArtModel import BaseModel
    import os
    import pandas as pd
    from PIL import Image
    from torchvision import transforms
    import numpy as np
    import argparse
    
    
    def get_setting(path):
        args = {}
        with open(os.path.join(path, 'setting.txt'), 'r')as f:
            for i in f.readlines():
                k, v = i.strip().split(':')
                args[k] = v
        return args
    
    
    def load_pretrained_model(path, model, mode='best'):
        print('load pretrained model...')
        state = torch.load(os.path.join(path, '%s.pth' % mode))
        print('best_epoch:{}, best_acc:{}'.format(state['epoch'], state['acc']))
        model.load_state_dict(state['net'])
    
    
    if __name__ == '__main__':
        mode = 'best'
        
        parser = argparse.ArgumentParser()
        parser.add_argument('--savepath', default='./Base224L2/eff-b3', type=str)
        parser.add_argument('--last', action='store_true')
        args = parser.parse_args()
        
        path = args.savepath
        if args.last:
            mode = 'last'
        
        args = get_setting(path)
        # print(args)
    
        # model
        model = BaseModel(model_name=args['model_name'], num_classes=int(args['num_classes']), \
            pretrained=int(args['pretrained']), pool_type=args['pool_type'], down=int(args['down']), metric=args['metric'])
    
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        # device = torch.device('cpu')
        model = model.to(device)
        #載入預訓練模型
        load_pretrained_model(path, model, mode=mode)
    
        size = 255
        trans = transforms.Compose([
            transforms.Resize((int(size / 0.875), int(size / 0.875))),
            transforms.RandomHorizontalFlip(),
            transforms.RandomCrop((size, size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    
        submit = {'uuid': [], 'label': []}
        TTA_times = 7
    
        model.eval()
        with torch.no_grad():
            for i in range(0, 800):
                img_path = 'test/%d.jpg' % i
                raw_img = Image.open(img_path).convert('RGB')
                results = np.zeros(49)
    
                for j in range(TTA_times):
                    img = trans(raw_img)
                    img = img.unsqueeze(0).to(device)
                    out = model(img)
                    out = torch.softmax(out, dim=1)
                    _, pred = torch.max(out.cpu(), dim=1)
    
                    results[pred] += 1
                pred = np.argmax(results)
                print(i, ',', pred)
                submit['uuid'].append(i)
                submit['label'].append(pred)
    
        #儲存結果
        df = pd.DataFrame(submit)
        df.to_csv(os.path.join(path, 'result.csv'), encoding='utf-8', index=False, header=False)
    
  5. ArtModel.py

    分類模型:resnext50/eff-b3

    搭建模型框架

    import torch
    import torch.nn as nn
    import torch.nn.functional as F
    from torchvision import models
    import math
    import numpy as np
    from efficientnet_pytorch import EfficientNet
    import random
    
    
    class SELayer(nn.Module):
        def __init__(self, channel, reduction=16):
            super(SELayer, self).__init__()
            self.avg_pool = nn.AdaptiveAvgPool2d(1)
            self.fc = nn.Sequential(
                nn.Linear(channel, channel // reduction, bias=False),
                nn.ReLU(inplace=True),
                nn.Linear(channel // reduction, channel, bias=False),
                nn.Sigmoid()
            )
    
        def forward(self, x):
            b, c, _, _ = x.size()
            y = self.avg_pool(x).view(b, c)
            y = self.fc(y).view(b, c, 1, 1)
            return y
    
    
    class AdaptiveConcatPool2d(nn.Module):
        def __init__(self, sz=(1,1)):
            super().__init__()
            self.ap = nn.AdaptiveAvgPool2d(sz)
            self.mp = nn.AdaptiveMaxPool2d(sz)
            
        def forward(self, x):
            return torch.cat([self.mp(x), self.ap(x)], 1)
    
    
    class GeneralizedMeanPooling(nn.Module):
        def __init__(self, norm=3, output_size=1, eps=1e-6):
            super().__init__()
            assert norm > 0
            self.p = float(norm)
            self.output_size = output_size
            self.eps = eps
    
        def forward(self, x):
            x = x.clamp(min=self.eps).pow(self.p)
            
            return torch.nn.functional.adaptive_avg_pool2d(x, self.output_size).pow(1. / self.p)
    
        def __repr__(self):
            return self.__class__.__name__ + '(' \
                   + str(self.p) + ', ' \
                   + 'output_size=' + str(self.output_size) + ')'
    
    
    #模型框架
    class BaseModel(nn.Module):
        def __init__(self, model_name, num_classes=2, pretrained=True, pool_type='max', down=True, metric='linear'):
            super().__init__()
            self.model_name = model_name
            
            #eff-b3/resnext50
            if model_name == 'eff-b3':
                backbone = EfficientNet.from_pretrained('efficientnet-b3')
                plane = 1536
            elif model_name == 'resnext50':
                backbone = nn.Sequential(*list(models.resnext50_32x4d(pretrained=pretrained).children())[:-2])
                plane = 2048
            else:
                backbone = None
                plane = None
    
            self.backbone = backbone
            
            #pool
            if pool_type == 'avg':
                self.pool = nn.AdaptiveAvgPool2d((1, 1))
            elif pool_type == 'cat':
                self.pool = AdaptiveConcatPool2d()
                down = 1
            elif pool_type == 'max':
                self.pool = nn.AdaptiveMaxPool2d((1, 1))
            elif pool_type == 'gem':
                self.pool = GeneralizedMeanPooling()
            else:
                self.pool = None
            
            if down:
                if pool_type == 'cat':
                    self.down = nn.Sequential(
                        nn.Linear(plane * 2, plane),
                        nn.BatchNorm1d(plane),
                        nn.Dropout(0.2),
                        nn.ReLU(True)
                        )
                else:
                    self.down = nn.Sequential(
                        nn.Linear(plane, plane),
                        nn.BatchNorm1d(plane),
                        nn.Dropout(0.2),
                        nn.ReLU(True)
                    )
            else:
                self.down = nn.Identity()
            
            self.se = SELayer(plane)
            self.hidden = nn.Linear(plane, plane)
            self.relu = nn.ReLU(True)
            
            if metric == 'linear':
                self.metric = nn.Linear(plane, num_classes)
            elif metric == 'am':
                self.metric = AddMarginProduct(plane, num_classes)
            else:
                self.metric = None
    
        def forward(self, x):
            if self.model_name == 'eff-b3':
                feat = self.backbone.extract_features(x)
            else:
                feat = self.backbone(x)
            
            feat = self.pool(feat)
            se = self.se(feat).view(feat.size(0), -1)
            feat_flat = feat.view(feat.size(0), -1)
            feat_flat = self.relu(self.hidden(feat_flat) * se)
    
            out = self.metric(feat_flat)
            return out
    
    
    if __name__ == '__main__':
        model = BaseModel(model_name='eff-b3').eval()
        x = torch.randn((1, 3, 224, 224))
        out = model(x)
        print(out.size())
        print(model)
    
  6. vote.py

    投出最好的結果

classification——art form 000wangbo

主幹網路resnest200,輸入448尺寸,在不同loss下取得5組最好效果,最後進行投票,得到最後分數。單模最高93.75。

這個程式碼好像不全orz沒有跑成功

import os
import math
import copy
import shutil
import time
import random
import pickle
import pandas as pd
import numpy as np
from PIL import Image
from tqdm import tqdm
from collections import OrderedDict, namedtuple
from sklearn.metrics import roc_auc_score, average_precision_score
#import se_resnext101_32x4d
from pretrainedmodels import se_resnext101_32x4d
from efficientnet_pytorch import  EfficientNet
#沒有找到這個包
from data_augmentation import FixedRotation
from inceptionv4 import inceptionv4
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data as data
import torchvision.datasets as datasets
import torchvision.models as models
from torchvision.models import resnet101,resnet50,resnet152,resnet34
import torchvision.transforms as transforms
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
from resnest.torch import resnest200,resnest269,resnest101
from torch.utils.data import Dataset, DataLoader
import warnings
warnings.filterwarnings("ignore")

#def main():https://www.cnblogs.com/chentianwei/p/11912463.html
def main(index):
    np.random.seed(359)
    torch.manual_seed(359)
    torch.cuda.manual_seed_all(359)
    random.seed(359)

    os.environ["CUDA_VISIBLE_DEVICES"] = '0,1,2,3,4,5,6,7'
    batch_size = 48
    workers = 16

    # stage_epochs = [8, 8, 8, 6, 5, 4, 3, 2]
    # stage_epochs = [12, 6, 5, 3, 4]
    lr = 5e-4
    lr_decay = 10
    weight_decay = 1e-4

    #引數
    stage = 0
    start_epoch = 0
    # total_epochs = sum(stage_epochs)
    total_epochs = 200
    patience = 4
    no_improved_times = 0
    total_stages = 3
    best_score = 0
    samples_num = 54

    print_freq = 20
    train_ratio = 0.9  # others for validation
    momentum = 0.9
    pre_model = 'senet'
    pre_trained = True
    evaluate = False
    use_pre_model = False
    # file_name = os.path.basename(__file__).split('.')[0]

    file_name = "resnest200_448_all_{}".format(index)
    img_size = 448

    resumeflg = False
    resume = ''

    # 建立儲存模型和結果的資料夾
    if not os.path.exists('./model/%s' % file_name):
        os.makedirs('./model/%s' % file_name)
    if not os.path.exists('./result/%s' % file_name):
        os.makedirs('./result/%s' % file_name)

    if not os.path.exists('./result/%s.txt' % file_name):
        txt_mode = 'w'
    else:
        txt_mode = 'a'
    with open('./result/%s.txt' % file_name, txt_mode) as acc_file:
        acc_file.write('\n%s %s\n' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())), file_name))

    # build a model
    model =resnest200(pretrained=True)
    model.avgpool = torch.nn.AdaptiveAvgPool2d(output_size=1)
    model.fc = torch.nn.Linear(model.fc.in_features,49)
    # model = se_resnext101_32x4d.se_resnext101(num_classes=3)
    # model = EfficientNet.from_pretrained('efficientnet-b4',num_classes=2)
    # model = inceptionv4(pretrained='imagenet')
    # model.last_linear  = torch.nn.Linear(model.last_linear.in_features,2)
    model = torch.nn.DataParallel(model).cuda()

    def load_pre_cloth_model_dict(self, state_dict):
        own_state = self.state_dict()
        for name, param in state_dict.items():
            if name not in own_state:
                continue
            if 'fc' in name:
                continue
            if isinstance(param, nn.Parameter):
                # backwards compatibility for serialized parameters
                param = param.data
            own_state[name].copy_(param)

    if use_pre_model:
        print('using pre model')
        pre_model_path = ''
        load_pre_cloth_model_dict(model, torch.load(pre_model_path)['state_dict'])

    # optionally resume from a checkpoint
    if resume:
        if os.path.isfile(resume):
            print("=> loading checkpoint '{}'".format(resume))
            checkpoint = torch.load(resume)
            start_epoch = checkpoint['epoch']
            best_score = checkpoint['best_score']
            stage = checkpoint['stage']
            lr = checkpoint['lr']
            model.load_state_dict(checkpoint['state_dict'])
            no_improved_times = checkpoint['no_improved_times']
            if no_improved_times == 0:
                model.load_state_dict(torch.load('./model/%s/model_best.pth.tar' % file_name)['state_dict'])
            print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(resume))

    def default_loader(root_dir,path):
        final_path = os.path.join(root_dir,str(path))
        return Image.open(final_path+".jpg").convert('RGB')
        # return Image.open(path)

    class TrainDataset(Dataset):
        def __init__(self, label_list, transform=None, target_transform=None, loader=default_loader):
            imgs = []
            for index, row in label_list.iterrows():
                imgs.append((row['filename'], row['label']))
            self.imgs = imgs
            self.transform = transform
            self.target_transform = target_transform
            self.loader = loader

        def __getitem__(self, index):
            filename, label= self.imgs[index]
            label = label
            img = self.loader('../train/',filename)


            if self.transform is not None:
                    img = self.transform(img)

            return img, label

        def __len__(self):
            return len(self.imgs)

    class ValDataset(Dataset):
        def __init__(self, label_list, transform=None, target_transform=None, loader=default_loader):
            imgs = []
            for index, row in label_list.iterrows():
                imgs.append((row['filename'], row['label']))
            self.imgs = imgs
            self.transform = transform
            self.target_transform = target_transform
            self.loader = loader

        def __getitem__(self, index):
            filename, label= self.imgs[index]
            label = label
            img = self.loader('../train/',filename)
            if self.transform is not None:
                img = self.transform(img)
            return img, label, filename

        def __len__(self):
            return len(self.imgs)

    class TestDataset(Dataset):
        def __init__(self, label_list, transform=None, target_transform=None, loader=default_loader):
            imgs = []
            for index, row in label_list.iterrows():
                imgs.append((row['filename'], row['label']))
            self.imgs = imgs
            self.transform = transform
            self.target_transform = target_transform
            self.loader = loader

        def __getitem__(self, index):
            filename,label = self.imgs[index]
            img = self.loader('../test/',filename)
            if self.transform is not None:
                img = self.transform(img)
            return img, filename

        def __len__(self):
            return len(self.imgs)

    train_data_list = pd.read_csv("data/train_{}.csv".format(index), sep=",")
    val_data_list = pd.read_csv("data/test_{}.csv".format(index), sep=",")
    test_data_list = pd.read_csv("../test.csv",sep=",")

    train_data_list = train_data_list.fillna(0)

    # 訓練集正常樣本尺寸
    random_crop = [transforms.RandomCrop(640), transforms.RandomCrop(768), transforms.RandomCrop(896)]



    smax = nn.Softmax()
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

    #資料處理
    train_data = TrainDataset(train_data_list,
                              transform=transforms.Compose([
                                  transforms.Resize((img_size, img_size)),
                                  transforms.ColorJitter(0.3, 0.3, 0.3, 0.15),
                                  # transforms.RandomRotation(30),
                                  transforms.RandomHorizontalFlip(),
#                                   transforms.RandomVerticalFlip(),
#                                   transforms.RandomGrayscale(),
                                  FixedRotation([-16,-14,-12,-10,-8,-6,-4,-2,0,2,4,6,8,10,12,14,16]),
                                  transforms.ToTensor(),
                                  normalize,
                              ]))

    val_data = ValDataset(val_data_list,
                          transform=transforms.Compose([
                              transforms.Resize((img_size, img_size)),
                              # transforms.CenterCrop((500, 500)),
                              transforms.ToTensor(),
                              normalize,
                          ]))

    test_data = TestDataset(test_data_list,
                            transform=transforms.Compose([
                                transforms.Resize((img_size, img_size)),
                                # transforms.CenterCrop((500, 500)),
                                transforms.ToTensor(),
                                normalize,
                                # transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
                                # transforms.Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops])),
                            ]))

    train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=workers,drop_last=True)
    val_loader = DataLoader(val_data, batch_size=batch_size * 2, shuffle=False, pin_memory=False, num_workers=workers,drop_last=True)
    test_loader = DataLoader(test_data, batch_size=batch_size * 2, shuffle=False, pin_memory=False, num_workers=workers)

    test_data_hflip = TestDataset(test_data_list,
                            transform=transforms.Compose([
                                transforms.Resize((img_size, img_size)),
                                transforms.RandomHorizontalFlip(p=2),
                                # transforms.CenterCrop((500, 500)),
                                transforms.ToTensor(),
                                normalize,
                            ]))


    test_loader_hflip = DataLoader(test_data_hflip, batch_size=batch_size * 2, shuffle=False, pin_memory=False, num_workers=workers)

    test_data_vflip = TestDataset(test_data_list,
                                  transform=transforms.Compose([
                                      transforms.Resize((336, 336)),
                                      transforms.RandomVerticalFlip(p=2),
                                      # transforms.CenterCrop((500, 500)),
                                      transforms.ToTensor(),
                                      normalize,
                                  ]))

    test_loader_vflip = DataLoader(test_data_vflip, batch_size=batch_size * 2, shuffle=False, pin_memory=False,
                                   num_workers=workers)

    test_data_vhflip = TestDataset(test_data_list,
                                  transform=transforms.Compose([
                                      transforms.Resize((336, 336)),
                                      transforms.RandomHorizontalFlip(p=2),
                                      transforms.RandomVerticalFlip(p=2),
                                      # transforms.CenterCrop((500, 500)),
                                      transforms.ToTensor(),
                                      normalize,
                                  ]))

    test_loader_vhflip = DataLoader(test_data_vhflip, batch_size=batch_size * 2, shuffle=False, pin_memory=False,
                                   num_workers=workers)


    #train
    def train(train_loader, model, criterion, optimizer, epoch):
        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses = AverageMeter()
        acc = AverageMeter()

        # switch to train mode
        model.train()

        end = time.time()
        for i, (images, target) in enumerate(train_loader):
            # measure data loading
            # if len(target) % workers == 1:
            #     images = images[:-1]
            #     target = target[:-1]

            data_time.update(time.time() - end)
            image_var = torch.tensor(images, requires_grad=False).cuda(non_blocking=True)
            # print(image_var)
            label = torch.tensor(target).cuda(non_blocking=True)
            # compute y_pred
            y_pred = model(image_var)
            loss = criterion(y_pred, label)

            # measure accuracy and record loss
            prec, PRED_COUNT = accuracy(y_pred.data, target, topk=(1, 1))
            losses.update(loss.item(), images.size(0))
            acc.update(prec, PRED_COUNT)

            # compute gradient and do SGD step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % print_freq == 0:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Accuray {acc.val:.3f} ({acc.avg:.3f})'.format(
                    epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, acc=acc))

    def validate(val_loader, model, criterion):
        batch_time = AverageMeter()
        # losses = AverageMeter()
        # acc = AverageMeter()

        # switch to evaluate mode
        model.eval()

        # 儲存概率,用於評測
        val_imgs, val_preds, val_labels, = [], [], []

        end = time.time()
        for i, (images, labels, img_path) in enumerate(val_loader):
            # if len(labels) % workers == 1:
            #     images = images[:-1]
            #     labels = labels[:-1]
            image_var = torch.tensor(images, requires_grad=False).cuda(non_blocking=True)  # for pytorch 0.4
            # label_var = torch.tensor(labels, requires_grad=False).cuda(async=True)  # for pytorch 0.4
            target = torch.tensor(labels).cuda(non_blocking=True)

            # compute y_pred
            with torch.no_grad():
                y_pred = model(image_var)
                loss = criterion(y_pred, target)

            # measure accuracy and record loss
            # prec, PRED_COUNT = accuracy(y_pred.data, labels, topk=(1, 1))
            # losses.update(loss.item(), images.size(0))
            # acc.update(prec, PRED_COUNT)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % (print_freq * 5) == 0:
                print('TrainVal: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.format(i, len(val_loader),
                                                                                  batch_time=batch_time))

            # 儲存概率,用於評測
            smax_out = smax(y_pred)
            val_imgs.extend(img_path)
            val_preds.extend([i.tolist() for i in smax_out])
            val_labels.extend([i.item() for i in labels])
        val_preds = [';'.join([str(j) for j in i]) for i in val_preds]
        val_score = pd.DataFrame({'img_path': val_imgs, 'preds': val_preds, 'label': val_labels,})
        val_score.to_csv('./result/%s/val_score.csv' % file_name, index=False)
        acc, f1  = score(val_score)
        print('acc: %.4f, f1: %.4f' % (acc, f1))
        print(' * Score {final_score:.4f}'.format(final_score=f1), '(Previous Best Score: %.4f)' % best_score)
        return acc, f1

    def test(test_loader, model):
        csv_map = OrderedDict({'FileName': [], 'type': [], 'probability': []})
        # switch to evaluate mode
        model.eval()
        for i, (images, filepath) in enumerate(tqdm(test_loader)):
            # bs, ncrops, c, h, w = images.size()

            filepath = [str(i) for i in filepath]
            image_var = torch.tensor(images, requires_grad=False)  # for pytorch 0.4

            with torch.no_grad():
                y_pred = model(image_var)  # fuse batch size and ncrops
                # y_pred = y_pred.view(bs, ncrops, -1).mean(1) # avg over crops

                # get the index of the max log-probability
                smax = nn.Softmax()
                smax_out = smax(y_pred)
            csv_map['FileName'].extend(filepath)
            for output in smax_out:
                prob = ';'.join([str(i) for i in output.data.tolist()])
                csv_map['probability'].append(prob)
                csv_map['type'].append(np.argmax(output.data.tolist()))
            # print(len(csv_map['filename']), len(csv_map['probability']))

        result = pd.DataFrame(csv_map)
        result.to_csv('./result/%s/submission.csv' % file_name, index=False)
        result[['FileName','type']].to_csv('./result/%s/final_submission.csv' % file_name, index=False)
        return

    def save_checkpoint(state, is_best, filename='./model/%s/checkpoint.pth.tar' % file_name):
        torch.save(state, filename)
        if is_best:
            shutil.copyfile(filename, './model/%s/model_best.pth.tar' % file_name)

    class AverageMeter(object):
        """Computes and stores the average and current value"""

        def __init__(self):
            self.reset()

        def reset(self):
            self.val = 0
            self.avg = 0
            self.sum = 0
            self.count = 0

        def update(self, val, n=1):
            self.val = val
            self.sum += val * n
            self.count += n
            self.avg = self.sum / self.count

    def adjust_learning_rate():
        nonlocal lr
        lr = lr / lr_decay
        return optim.Adam(model.parameters(), lr, weight_decay=weight_decay, amsgrad=True)

    def accuracy(y_pred, y_actual, topk=(1,)):
        """Computes the precision@k for the specified values of k"""
        final_acc = 0
        maxk = max(topk)
        # for prob_threshold in np.arange(0, 1, 0.01):
        PRED_COUNT = y_actual.size(0)
        PRED_CORRECT_COUNT = 0

        prob, pred = y_pred.topk(maxk, 1, True, True)
        # prob = np.where(prob > prob_threshold, prob, 0)


        for j in range(pred.size(0)):
            if int(y_actual[j]) == int(pred[j]):
                PRED_CORRECT_COUNT += 1
        if PRED_COUNT == 0:
            final_acc = 0
        else:
            final_acc = PRED_CORRECT_COUNT / PRED_COUNT
        return final_acc * 100, PRED_COUNT

    def softmax(x):
        return np.exp(x) / np.sum(np.exp(x), axis=0)

    def doitf(tp, fp, fn):
        if (tp + fp == 0):
            return 0
        if (tp + fn == 0):
            return 0
        pre = float(1.0 * float(tp) / float(tp + fp))
        rec = float(1.0 * float(tp) / float(tp + fn))
        if (pre + rec == 0):
            return 0
        return (2 * pre * rec) / (pre + rec)

    # 引數 samples_num 表示選取多少個樣本來取平均
    def score(val_score):
        val_score['preds'] = val_score['preds'].map(lambda x: [float(i) for i in x.split(';')])
        acc = 0
        tp = np.zeros(49)
        fp = np.zeros(49)
        fn = np.zeros(49)
        f1 = np.zeros(49)
        f1_tot = 0

        print(val_score.head(10))

        val_score['preds_label'] = val_score['preds'].apply(lambda x: np.argmax(x))
        for i in range(val_score.shape[0]):
            preds = val_score['preds_label'].iloc[i]
            label = val_score['label'].iloc[i]
            if (preds == label):
                acc = acc + 1
                tp[label] = tp[label] + 1
            else:
                fp[preds] = fp[preds] + 1
                fn[label] = fn[label] + 1
        
        for classes in range(49):
            f1[classes] = doitf(tp[classes], fp[classes], fn[classes])
            f1_tot = f1_tot + f1[classes]
        acc = acc / val_score.shape[0]
        f1_tot = f1_tot / 49

        return acc, f1_tot

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()

    # optimizer = optim.Adam(model.module.last_linear.parameters(), lr, weight_decay=weight_decay, amsgrad=True)
    optimizer = optim.Adam(model.parameters(), lr, weight_decay=weight_decay, amsgrad=True)

    if evaluate:
        validate(val_loader, model, criterion)
    else:
        for epoch in range(start_epoch, total_epochs):
            if stage >= total_stages - 1:
                break
            # train for one epoch
            train(train_loader, model, criterion, optimizer, epoch)
            # evaluate on validation set
            if epoch >= 0:
                acc , f1 = validate(val_loader, model, criterion)

                with open('./result/%s.txt' % file_name, 'a') as acc_file:
                    acc_file.write('Epoch: %2d, acc: %.8f, f1: %.8f\n' % (epoch, acc, f1))

                # remember best Accuracy and save checkpoint
                is_best = acc > best_score
                best_score = max(acc, best_score)

                # if (epoch + 1) in np.cumsum(stage_epochs)[:-1]:
                #     stage += 1
                #     optimizer = adjust_learning_rate()

                if is_best:
                    no_improved_times = 0
                else:
                    no_improved_times += 1

                print('stage: %d, no_improved_times: %d' % (stage, no_improved_times))

                if no_improved_times >= patience:
                    stage += 1
                    optimizer = adjust_learning_rate()

                state = {
                    'epoch': epoch + 1,
                    'arch': pre_model,
                    'state_dict': model.state_dict(),
                    'best_score': best_score,
                    'no_improved_times': no_improved_times,
                    'stage': stage,
                    'lr': lr,
                }
                save_checkpoint(state, is_best)

                # if (epoch + 1) in np.cumsum(stage_epochs)[:-1]:
                if no_improved_times >= patience:
                    no_improved_times = 0
                    model.load_state_dict(torch.load('./model/%s/model_best.pth.tar' % file_name)['state_dict'])
                    print('Step into next stage')
                    with open('./result/%s.txt' % file_name, 'a') as acc_file:
                        acc_file.write('---------------------Step into next stage---------------------\n')

    with open('./result/%s.txt' % file_name, 'a') as acc_file:
        acc_file.write('* best acc: %.8f  %s\n' % (best_score, os.path.basename(__file__)))
    with open('./result/best_acc.txt', 'a') as acc_file:
        acc_file.write('%s  * best acc: %.8f  %s\n' % (
        time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())), best_score, os.path.basename(__file__)))

    # test
    best_model = torch.load('model/{}/model_best.pth.tar'.format(file_name))
    model.load_state_dict(best_model['state_dict'])
    test(test_loader=test_loader, model=model)

    torch.cuda.empty_cache()
    # resume = False


if __name__ == '__main__':
    for index in range(1,6):
        main(index)