1. 程式人生 > 實用技巧 >第一週:深度學習及pytorch基礎

第一週:深度學習及pytorch基礎

(注:視訊課程已學習,略)

貓狗大戰進展

一、"AI研習社"實驗過程

1、將下載好的資料集上傳到colab平臺
  • 首先連線Google雲端:

import os
from google.colab import drive
drive.mount('/content/drive')
​
path = "/content/drive/My Drive"
​
os.chdir(path)
os.listdir(path)
  • 將資料集上傳到雲端,執行程式碼時結果報錯:

  • 解決:(解決方法錯誤,和ImageFolder有關,具體可見第2步):

2、將訓練集和驗證集按類別分類
import os
import shutil
shutil.rmtree('/content/drive/My Drive/cat_dog/test/1')
print('delete finished')
​
os.mkdir('/content/drive/My Drive/cat_dog/val/cat')
os.mkdir('/content/drive/My Drive/cat_dog/val/dog')
​
path = r'/content/drive/My Drive/cat_dog/val'
newcat = '/content/drive/My Drive/cat_dog/val/cat'
newdog = '/content/drive/My Drive/cat_dog/val/dog'
fns = [os.path.join(root,fn) for root, dirs, files in os.walk(path) for fn in files]
for f in fns:
    name1 = str(f)
    if 'cat_dog/val/3/cat' in name1:
      shutil.copy(f, newcat)
    else:
      shutil.copy(f, newdog)
​
print(len(fns))

分類後檢視資料集發現目錄中包含了隱藏資料夾ipynb_checkpoints

['.ipynb_checkpoints', 'cat', 'dog']
{'.ipynb_checkpoints': 0, 'cat': 1, 'dog': 2}

解決:

cd /content/drive/My Drive/cat_dog/train

!rm -rf .ipynb_checkpoints
3、上傳測試集資料並自定義測試集testData
from PIL import Image
test_data_dir = '/content/drive/My Drive/cat_dog/test'
class TestDS(torch.utils.data.Dataset): 
    def __init__(self, transform=None):
        self.test_data = os.listdir(test_data_dir)
        self.test_label = np.zeros(2000)
        self.transform = transform
    def __getitem__(self, index):
        # 根據索引返回資料和對應的標籤
        image = Image.open(self.test_data[index]).convert('RGB')
        image = self.transform(image)
        return image, self.test_label[index]
    def __len__(self): 
        # 返回檔案資料的數目
        return len(self.test_data)
# 讀取測試集
testData = TestDS(transform=vgg_format)
print(len(testData))
4、載入模型:
# 載入與預訓練模型
model_vgg = models.vgg16(pretrained=True)
# 凍結模型引數
for param in model_vgg_new.parameters():
    param.requires_grad = False
# 修改最後一層模型
model_vgg_new.classifier._modules['6'] = nn.Linear(4096, 2)
# 損失函式nn.CrossEntropyLoss = log_softmax() + NLLLoss() 
model_vgg_new.classifier._modules['7'] = torch.nn.LogSoftmax(dim = 1) 
# 修改優化器為adam
optimizer_vgg = torch.optim.Adam(model_vgg_new.classifier[6].parameters(),lr = lr)
5、模型訓練並用驗證集檢查效果
# 訓練模型
def train_model(model,dataloader,size,epochs=1,optimizer=None):
    model.train()
    for epoch in range(epochs):
        running_loss = 0.0
        running_corrects = 0
        count = 0
        for inputs,classes in dataloader:
            inputs = inputs.to(device)
            classes = classes.to(device)
            outputs = model(inputs)
            loss = criterion(outputs,classes)     
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            _,preds = torch.max(outputs.data,1)
            # statistics
            running_loss += loss.data.item()
            running_corrects += torch.sum(preds == classes.data)
            count += len(inputs)
            # print('Training: No. ', count, ' process ... total: ', size)
        epoch_loss = running_loss / size
        epoch_acc = running_corrects.data.item() / size
        print('Loss: {:.4f} Acc: {:.4f}'.format(
                     epoch_loss, epoch_acc))    
# 模型訓練
train_model(model_vgg_new,loader_train,size=dset_sizes['train'], epochs=1, 
            optimizer=optimizer_vgg)  
​
#模型驗證
def test_model(model,dataloader,size):
    model.eval()
    predictions = np.zeros(size)
    all_classes = np.zeros(size)
    all_proba = np.zeros((size,2))
    i = 0
    running_loss = 0.0
    running_corrects = 0
    for inputs,classes in dataloader:
        inputs = inputs.to(device)
        classes = classes.to(device)
        outputs = model(inputs)
        loss = criterion(outputs,classes)           
        _,preds = torch.max(outputs.data,1)
        # statistics
        running_loss += loss.data.item()
        running_corrects += torch.sum(preds == classes.data)
        predictions[i:i+len(classes)] = preds.to('cpu').numpy()
        all_classes[i:i+len(classes)] = classes.to('cpu').numpy()
        all_proba[i:i+len(classes),:] = outputs.data.to('cpu').numpy()
        i += len(classes)
        # print('Testing: No. ', i, ' process ... total: ', size)        
    epoch_loss = running_loss / size
    epoch_acc = running_corrects.data.item() / size
    print('Loss: {:.4f} Acc: {:.4f}'.format(
                     epoch_loss, epoch_acc))
    return predictions, all_proba, all_classes
  
predictions, all_proba, all_classes = test_model(model_vgg_new,loader_valid,size=dset_sizes['val'])
6、編寫測試程式碼:
def test_model(model,dataloader,size):
    model.eval()
    predictions = np.zeros(size)
    all_classes = np.zeros(size)
    all_proba = np.zeros((size,2))
    i = 0
    running_loss = 0.0
    running_corrects = 0
​
    for inputs,classes in dataloader:
        inputs = inputs.to(device)
        classes = classes.to(device)
        outputs = model(inputs)
        loss = criterion(outputs,classes)           
        _,preds = torch.max(outputs.data,1)
​
        predictions[i:i+len(classes)] = preds.to('cpu').numpy()
        i += len(classes)
    return predictions
  
results = test_model(model_vgg_new,loader_test,size=2000)
print(results)
7、將測試集的輸出結果儲存:
name = []
for i in testData.test_data:
  j = i[37:-4]
  name.append(int(j))
print(results)
print(name)
​
import pandas as pd
#字典中的key值即為csv中列名
dataframe = pd.DataFrame({'name':name,'results':results})
#將DataFrame儲存為csv,index表示是否顯示行名,default=True
dataframe.to_csv("/content/sample_data/test.csv",index=False,sep=',')
8、將csv檔案按平臺要求修改,觀察結果

二、改進模型

1、修改訓練輪數epoch=3——沒什麼用,訓練輪數不宜太多,可能還會過擬合

2、使用resnet152預訓練模型
  • 修改模型和最後一層:

model_resnet = models.resnet152(pretrained=True)
model_resnet.fc = nn.Linear(2048, 2)
  • 損失函式使用:

criterion = nn.CrossEntropyLoss()
  • 修改學習率和隨機梯度下降,每隔7個epoch學習率降低

lr = 0.001
optimizer_vgg = torch.optim.Adam(model_resnet.fc.parameters(),lr = lr)
def adjust_learning_rate(optimizer, lr):
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
        
        
for eooch in range(epochs):
        if epoch > 7:
            lr = 0.0001
        elif epoch > 14:
            lr = 0.00001
        elif epoch > 21:
            lr = 0.000001
        elif epoch > 28:
            lr = 0.0000001
        adjust_learning_rate(optimizer, lr)
  • 修改 epoch = 30 並進行訓練,得到驗證集結果:

  • 上傳平臺,並未得到改進:(多次嘗試epoch和lr的設定,學習率總在98%左右,未能達到99%以上)

3、構造SENet網路,訓練過程引數太多,導致colab記憶體溢位
4、借鑑網路中的神經網路結構:https://www.cnblogs.com/ansang/p/9126427.html,將其復現為PyTorch結構,也不能達到99%以上的準確率:
​
class Net(nn.Module):
  def __init__(self):
    super(Net, self).__init__()
​
    self.conv1 = nn.Conv2d(3, 32, kernel_size=(3, 3), stride=1, padding=1)
    self.max_pooling2d = nn.MaxPool2d(2,stride=2)
    self.conv2 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=1, padding=1)
    self.conv3 = nn.Conv2d(32, 64, kernel_size=(3, 3), stride=1, padding=1)
    self.conv4 = nn.Conv2d(64, 64, kernel_size=(3, 3), stride=1, padding=1)
    self.fc1 = nn.Linear(28*28*64, 1024)
    self.fc2 = nn.Linear(1024, 2)
    self.relu = nn.ReLU(128)
    self.dropout = nn.Dropout(0.4)
​
  def forward(self, x):
    x = self.relu(self.conv1(x))
    x = self.max_pooling2d(x)
    x = self.relu(self.conv2(x))
    x = self.max_pooling2d(x)
    x = self.relu(self.conv3(x))
    x = self.relu(self.conv4(x))
    x = self.max_pooling2d(x)
    x = x.view(-1, x.shape[1]*x.shape[2]*x.shape[3])
    x = self.relu(self.fc1(x))
    x = self.dropout(x)
    x = self.fc2(x)
    return x