1. 程式人生 > >Mxnet訓練自己的資料集並測試

Mxnet訓練自己的資料集並測試

使用Mxnet訓練圖片分類類器
1、準備資料:
(1)建立一個根目錄然後,再為每一個類別的圖片建立一個子資料夾,把每一類圖片放入對應的子資料夾即可。
--root:
----class1
----class2
......
----classn
首先生成訓練集和測試集的list,命令如下:

Python ~/mxnet/tools/im2rec.py --list True --recursive True --train-ratio 0.9 myData /home/xxx/root/

–list:當要生成list檔案時,這個引數一定要設為True,表示當前用來生成的list檔案;預設是生成rec檔案;

–recursive:遞迴的遍歷你的所有資料集,要設為True;

–train-ratio:用來將你的全部資料集拆分成兩部分:訓練集(train)和交叉驗證集(val),具體多少作為訓練集,多少作為驗證集,就由這個引數來確定;

–test-ratio:同上,分成訓練集和測試集兩部分;

–exts:這個是你資料的字尾(注,這裡我們一般說的圖片資料),目前的MXNet只支援兩種圖片格式:jpg和jpeg。
執行完這個命令,你會發現生成兩個檔案:myData_train.lst和myData_val.lst
(2)生成rec檔案:

python ~/mxnet/tools/im2rec.py –num-thread 4 –pass-through 1 myData /home/xxx/root/

–pass-through: 設為1,即跳過矩陣變換,否則會報錯:未知的array type;

myData就是第一步中生成.lst檔案的字首,這裡用它來生成rec;

執行完這條命令,你就會看到兩個檔案:myData_train.rec和myData_val.rec

2、載入訓練資料以及驗證資料集:

def get_iterators(batch_size, data_shape=(3, 224, 224)):
    train = mx.io.ImageRecordIter(
        path_imgrec         = './ld_train/my_images_train.rec',
        data_name           = 'data',
        label_name          = 'softmax_label',
        batch_size          = batch_size,
        data_shape          = data_shape,
        shuffle             = True,
        rand_crop           = True,
        rand_mirror         = True)
    val = mx.io.ImageRecordIter(
        path_imgrec         = './ld_train/my_images_val.rec',
        data_name           = 'data',
        label_name          = 'softmax_label',
        batch_size          = batch_size,
        data_shape          = data_shape,
        rand_crop           = False,
        rand_mirror         = False)
    return (train, val)
 train,val=get_iterators(128,(3,128,128))#指定Batch_size以及圖片大小。

3、定義網路結構:
這裡以resnet為例:

'''
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
Original author Wei Wu

Implemented the following paper:

Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Identity Mappings in Deep Residual Networks"
'''
import mxnet as mx
import numpy as np

def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, bn_mom=0.9, workspace=256, memonger=False):
    """Return ResNet Unit symbol for building ResNet
    Parameters
    ----------
    data : str
        Input data
    num_filter : int
        Number of output channels
    bnf : int
        Bottle neck channels factor with regard to num_filter
    stride : tupe
        Stride used in convolution
    dim_match : Boolen
        True means channel number between input and output is the same, otherwise means differ
    name : str
        Base name of the operators
    workspace : int
        Workspace used in convolution operator
    """
    if bottle_neck:
        # the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
        bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
        act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
        weight = mx.symbol.Variable(name=name + '_conv1_weight', dtype=np.float32)
        weight = mx.symbol.Cast(data=weight, dtype=np.float16)
        conv1 = mx.sym.Convolution(data=act1, weight=weight, num_filter=int(num_filter*0.25), kernel=(1,1), stride=(1,1), pad=(0,0),
                                   no_bias=True, workspace=workspace, name=name + '_conv1')
        bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
        act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
        weight = mx.symbol.Variable(name=name + '_conv2_weight', dtype=np.float32)
        weight = mx.symbol.Cast(data=weight, dtype=np.float16)
        conv2 = mx.sym.Convolution(data=act2, weight=weight, num_filter=int(num_filter*0.25), kernel=(3,3), stride=stride, pad=(1,1),
                                   no_bias=True, workspace=workspace, name=name + '_conv2')
        bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
        act3 = mx.sym.Activation(data=bn3, act_type='relu', name=name + '_relu3')
        weight = mx.symbol.Variable(name=name + '_conv3_weight', dtype=np.float32)
        weight = mx.symbol.Cast(data=weight, dtype=np.float16)
        conv3 = mx.sym.Convolution(data=act3, weight=weight, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
                                   workspace=workspace, name=name + '_conv3')
        if dim_match:
            shortcut = data
        else:
            weight = mx.symbol.Variable(name=name + '_sc_weight', dtype=np.float32)
            weight = mx.symbol.Cast(data=weight, dtype=np.float16)
            shortcut = mx.sym.Convolution(data=act1, weight=weight, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
                                            workspace=workspace, name=name+'_sc')
        if memonger:
            shortcut._set_attr(mirror_stage='True')
        return conv3 + shortcut
    else:
        bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1')
        act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
        weight = mx.symbol.Variable(name=name + '_conv1_weight', dtype=np.float32)
        weight = mx.symbol.Cast(data=weight, dtype=np.float16)
        conv1 = mx.sym.Convolution(data=act1, weight=weight, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1),
                                      no_bias=True, workspace=workspace, name=name + '_conv1')
        bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
        act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2')
        weight = mx.symbol.Variable(name=name + '_conv2_weight', dtype=np.float32)
        weight = mx.symbol.Cast(data=weight, dtype=np.float16)
        conv2 = mx.sym.Convolution(data=act2, weight=weight, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
                                      no_bias=True, workspace=workspace, name=name + '_conv2')
        if dim_match:
            shortcut = data
        else:
            weight = mx.symbol.Variable(name=name + '_sc_weight', dtype=np.float32)
            weight = mx.symbol.Cast(data=weight, dtype=np.float16)
            shortcut = mx.sym.Convolution(data=act1, weight=weight, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
                                            workspace=workspace, name=name+'_sc')
        if memonger:
            shortcut._set_attr(mirror_stage='True')
        return conv2 + shortcut

def resnet(units, num_stages, filter_list, num_classes, image_shape, bottle_neck=True, bn_mom=0.9, workspace=256, memonger=False):
    """Return ResNet symbol of
    Parameters
    ----------
    units : list
        Number of units in each stage
    num_stages : int
        Number of stage
    filter_list : list
        Channel size of each stage
    num_classes : int
        Ouput size of symbol
    dataset : str
        Dataset type, only cifar10 and imagenet supports
    workspace : int
        Workspace used in convolution operator
    """
    num_unit = len(units)
    assert(num_unit == num_stages)
    data = mx.sym.Variable(name='data')
    data = mx.symbol.Cast(data=data, dtype=np.float16)
    data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data')
    (nchannel, height, width) = image_shape
    weight = mx.symbol.Variable(name='conv0_weight', dtype=np.float32)
    weight = mx.symbol.Cast(data=weight, dtype=np.float16)
    if height <= 32:            # such as cifar10
        body = mx.sym.Convolution(data=data, weight=weight, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1),
                                  no_bias=True, name="conv0", workspace=workspace)
    else:                       # often expected to be 224 such as imagenet
        body = mx.sym.Convolution(data=data, weight=weight, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3),
                                  no_bias=True, name="conv0", workspace=workspace)
        body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0')
        body = mx.sym.Activation(data=body, act_type='relu', name='relu0')
        body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max')

    for i in range(num_stages):
        body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False,
                             name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, workspace=workspace,
                             memonger=memonger)
        for j in range(units[i]-1):
            body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2),
                                 bottle_neck=bottle_neck, workspace=workspace, memonger=memonger)
    bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn1')
    relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
    # Although kernel is not used here when global_pool=True, we should put one
    pool1 = mx.symbol.Pooling(data=relu1, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1')
    flat = mx.symbol.Flatten(data=pool1)
    weight = mx.symbol.Variable(name='fc1_weight', dtype=np.float32)
    bias = mx.symbol.Variable(name='fc1_bias', dtype=np.float32)
    weight = mx.symbol.Cast(data=weight, dtype=np.float16)
    bias = mx.symbol.Cast(data=bias, dtype=np.float16)
    fc1 = mx.symbol.FullyConnected(data=flat, weight=weight, bias=bias, num_hidden=num_classes, name='fc1')
    fc1 = mx.symbol.Cast(data=fc1, dtype=np.float32)
    return mx.symbol.SoftmaxOutput(data=fc1, name='softmax')

def get_symbol(num_classes, num_layers, image_shape, conv_workspace=256, **kwargs):
    """
    Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
    Original author Wei Wu
    """
    image_shape = [int(l) for l in image_shape.split(',')]
    (nchannel, height, width) = image_shape
    if height <= 28:
        num_stages = 3
        if (num_layers-2) % 9 == 0 and num_layers >= 164:
            per_unit = [(num_layers-2)//9]
            filter_list = [16, 64, 128, 256]
            bottle_neck = True
        elif (num_layers-2) % 6 == 0 and num_layers < 164:
            per_unit = [(num_layers-2)//6]
            filter_list = [16, 16, 32, 64]
            bottle_neck = False
        else:
            raise ValueError("no experiments done on num_layers {}, you can do it youself".format(num_layers))
        units = per_unit * num_stages
    else:
        if num_layers >= 50:
            filter_list = [64, 256, 512, 1024, 2048]
            bottle_neck = True
        else:
            filter_list = [64, 64, 128, 256, 512]
            bottle_neck = False
        num_stages = 4
        if num_layers == 18:
            units = [2, 2, 2, 2]
        elif num_layers == 34:
            units = [3, 4, 6, 3]
        elif num_layers == 50:
            units = [3, 4, 6, 3]
        elif num_layers == 101:
            units = [3, 4, 23, 3]
        elif num_layers == 152:
            units = [3, 8, 36, 3]
        elif num_layers == 200:
            units = [3, 24, 36, 3]
        elif num_layers == 269:
            units = [3, 30, 48, 8]
        else:
            raise ValueError("no experiments done on num_layers {}, you can do it youself".format(num_layers))

    return resnet(units       = units,
                  num_stages  = num_stages,
                  filter_list = filter_list,
                  num_classes = num_classes,
                  image_shape = image_shape,
                  bottle_neck = bottle_neck,
                  workspace   = conv_workspace)
resnet18=get_symbol(2,18,'3,128,128')

4、訓練並儲存模型:

# construct a callback function to save checkpoints
model_prefix = 'resnet18'
checkpoint = mx.callback.do_checkpoint(model_prefix)

mod = mx.mod.Module(symbol=lenet,context=mx.gpu(0))
mod.fit(train,
        eval_data=val,
        optimizer='sgd',
        optimizer_params={'learning_rate':0.001, 'momentum': 0.9},
        eval_metric='acc',
        num_epoch=30, 
        epoch_end_callback=checkpoint)

5、載入模型準備測試:

sym, arg_params, aux_params = mx.model.load_checkpoint('resnet18', 30)
mod = mx.mod.Module(symbol=sym, context=mx.gpu(0), label_names=None)
mod.bind(for_training=False, data_shapes=[('data', (1,3,128,128))], 
         label_shapes=mod._label_shapes)
mod.set_params(arg_params, aux_params, allow_missing=True)

6、測試數進行預測:

%matplotlib inline
import matplotlib.pyplot as plt
import cv2
import numpy as np
# define a simple data batch
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])

def get_image(path, show=False):
    # download and show the image
    img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
    if img is None:
         return None
    if show:
         plt.imshow(img)
         plt.axis('off')
    # convert into format (batch, RGB, width, height)
    img = cv2.resize(img, (128, 128))
    img = np.swapaxes(img, 0, 2)
    img = np.swapaxes(img, 1, 2)
    img = img[np.newaxis, :]
    return img

def predict(path):
    img = get_image(path, show=True)
    # compute the predict probabilities
    mod.forward(Batch([mx.nd.array(img)]))
    prob = mod.get_outputs()[0].asnumpy()
    # print the top-5
    prob = np.squeeze(prob)
    a = np.argsort(prob)[::-1]
    for i in a[0:5]:
        print((prob[i]))
predict('/home/test_images/cat.jpg')