1. 程式人生 > 程式設計 >PyTorch的SoftMax交叉熵損失和梯度用法

PyTorch的SoftMax交叉熵損失和梯度用法

在PyTorch中可以方便的驗證SoftMax交叉熵損失和對輸入梯度的計算

關於softmax_cross_entropy求導的過程,可以參考HERE

示例

# -*- coding: utf-8 -*-
import torch
import torch.autograd as autograd
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import numpy as np

# 對data求梯度,用於反向傳播
data = Variable(torch.FloatTensor([[1.0,2.0,3.0],[1.0,3.0]]),requires_grad=True)

# 多分類標籤 one-hot格式
label = Variable(torch.zeros((3,3)))
label[0,2] = 1
label[1,1] = 1
label[2,0] = 1
print(label)

# for batch loss = mean( -sum(Pj*logSj) )
# for one : loss = -sum(Pj*logSj)
loss = torch.mean(-torch.sum(label * torch.log(F.softmax(data,dim=1)),dim=1))

loss.backward()
print(loss,data.grad)

輸出:

tensor([[ 0.,0.,1.],[ 0.,1.,0.],[ 1.,0.]])
# loss:損失 和 input's grad:輸入的梯度
tensor(1.4076) tensor([[ 0.0300,0.0816,-0.1116],[ 0.0300,-0.2518,0.2217],[-0.3033,0.2217]])

注意

對於單輸入的loss 和 grad

data = Variable(torch.FloatTensor([[1.0,requires_grad=True)


label = Variable(torch.zeros((1,3)))
#分別令不同索引位置label為1
label[0,0] = 1
# label[0,1] = 1
# label[0,2] = 1
print(label)

# for batch loss = mean( -sum(Pj*logSj) )
# for one : loss = -sum(Pj*logSj)
loss = torch.mean(-torch.sum(label * torch.log(F.softmax(data,data.grad)

其輸出:

# 第一組:
lable: tensor([[ 1.,0.]])
loss: tensor(2.4076) 
grad: tensor([[-0.9100,0.2447,0.6652]])

# 第二組:
lable: tensor([[ 0.,0.]])
loss: tensor(1.4076) 
grad: tensor([[ 0.0900,-0.7553,0.6652]])

# 第三組:
lable: tensor([[ 0.,1.]])
loss: tensor(0.4076) 
grad: tensor([[ 0.0900,-0.3348]])

"""
解釋:
對於輸入資料 tensor([[ 1.,2.,3.]]) softmax之後的結果如下
tensor([[ 0.0900,0.6652]])
交叉熵求解梯度推導公式可知 s[0,0]-1,s[0,1]-1,2]-1 是上面三組label對應的輸入資料梯度
"""

pytorch提供的softmax,和log_softmax 關係

# 官方提供的softmax實現
In[2]: import torch
 ...: import torch.autograd as autograd
 ...: from torch.autograd import Variable
 ...: import torch.nn.functional as F
 ...: import torch.nn as nn
 ...: import numpy as np
In[3]: data = Variable(torch.FloatTensor([[1.0,requires_grad=True)
In[4]: data
Out[4]: tensor([[ 1.,3.]])
In[5]: e = torch.exp(data)
In[6]: e
Out[6]: tensor([[ 2.7183,7.3891,20.0855]])
In[7]: s = torch.sum(e,dim=1)
In[8]: s
Out[8]: tensor([ 30.1929])
In[9]: softmax = e/s
In[10]: softmax
Out[10]: tensor([[ 0.0900,0.6652]])
In[11]: # 等同於 pytorch 提供的 softmax 
In[12]: org_softmax = F.softmax(data,dim=1)
In[13]: org_softmax
Out[13]: tensor([[ 0.0900,0.6652]])
In[14]: org_softmax == softmax # 計算結果相同
Out[14]: tensor([[ 1,1,1]],dtype=torch.uint8)

# 與log_softmax關係
# log_softmax = log(softmax)
In[15]: _log_softmax = torch.log(org_softmax) 
In[16]: _log_softmax
Out[16]: tensor([[-2.4076,-1.4076,-0.4076]])
In[17]: log_softmax = F.log_softmax(data,dim=1)
In[18]: log_softmax
Out[18]: tensor([[-2.4076,-0.4076]])

官方提供的softmax交叉熵求解結果

# -*- coding: utf-8 -*-
import torch
import torch.autograd as autograd
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import numpy as np

data = Variable(torch.FloatTensor([[1.0,requires_grad=True)
log_softmax = F.log_softmax(data,dim=1)

label = Variable(torch.zeros((3,0] = 1
print("lable: ",label)

# 交叉熵的計算方式之一
loss_fn = torch.nn.NLLLoss() # reduce=True loss.sum/batch & grad/batch
# NLLLoss輸入是log_softmax,target是非one-hot格式的label
loss = loss_fn(log_softmax,torch.argmax(label,dim=1))
loss.backward()
print("loss: ",loss,"\ngrad: ",data.grad)

"""
# 交叉熵計算方式二
loss_fn = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted
#CrossEntropyLoss適用於分類問題的損失函式
#input:沒有softmax過的nn.output,target是非one-hot格式label
loss = loss_fn(data,data.grad)
"""
"""

輸出

lable: tensor([[ 0.,0.]])
loss: tensor(1.4076) 
grad: tensor([[ 0.0300,0.2217]])

通過和示例的輸出對比,發現兩者是一樣的

以上這篇PyTorch的SoftMax交叉熵損失和梯度用法就是小編分享給大家的全部內容了,希望能給大家一個參考,也希望大家多多支援我們。