Pytorch 模型構建方法
阿新 • • 發佈:2020-12-08
import torch import torch.nn as nn import torch.nn.functional as F
Pytorch基礎構建模型方法
方法一:較為通用的方法
class Net(nn.Module): def __init__(self): self.conv = nn.Conv2d(in_channel,out_channel,kernel_size,stride,padding,bias) self.dense = dense1 = torch.nn.Linear(32 * 3 * 3, 128) def forward(self,x): x= F.max_pool2d(F.relu(self.conv(x))) x = x.view(x.size(0),-1) x = self.dense(x) x = F.sigmoid(x) return x
方法二:使用sequential進行包裝
1、基本用法
class Net(nn.Module): def __init__(self): self.conv = nn.Sequential( nn.Conv2d(in_channel,out_channel,kernel_size,stride,padding,bias), nn.ReLU() nn.BatchNorm2d(out_Channel), nn.MaxPool2d(2) ) self.dense = nn.Sequential( nn.Linear(out_channel*W*H), nn.ReLU(), ) def forward(x): x = self.conv(x) x = x.view(x.size(0),-1) x = self.dense(x) return 0
2、可增加層次命名
class Net(nn.Module): def __init__(self): self.conv = nn.Sequential() self.conv.add_module('name',nn.Conv2d(in_channel,out_channel,kernel_size,stride,padding,bias),) self.conv.add_module('name',nn.ReLU()) self.conv.add_module('name',nn.BatchNorm2d(out_Channel)) self.conv.add_module('name',nn.MaxPool2d(2)) self.dense = nn.Sequential() self.dense .add_module('name',nn.Linear(out_channel*W*H)) self.dense .add_module('name',nn.ReLU()) def forward(x): x = self.conv(x) x = x.view(x.size(0),-1) x = self.dense(x) return 0
3、使用字典的方式進行命名
class Net4(torch.nn.Module): def __init__(self): super(Net4, self).__init__() self.conv = nn.Sequential( OrderedDict( [ ("name", nn.Conv2d(in_channel,out_channel,kernel_size,stride,padding,bias), ("name", nn.ReLU()), ("name", nn.MaxPool2d(2)) ] )) self.dense = torch.nn.Sequential( OrderedDict([ ("name", nn.Linear(in_channel,out_channel,kernel_size,stride,padding,bias)), ("name", nn.ReLU()) ]) ) def forward(self, x): conv_out = self.conv1(x) res = conv_out.view(conv_out.size(0), -1) out = self.dense(res) return out
四 使用ModuleList()進行構建
相比於sequential ,ModuleList可以進行改變順序進行呼叫
class Net(nn.Module): def __init__(self, nf,t, activation='prelu', dropout=False, bn='batch', bias=False): super(Net, self).__init__() self.t = t self.down_convs = nn.ModuleList() for i in range(t): self.down_convs.append( ConvBlock(nf, nf * 2, kernel_size=3, stride=2, padding=1, activation=activation, droput=dropout, norm=bn, bias=bias) ) def forward(self, x): for i in range(self.t): feature_layer = self.down_convs[i](feature_layer) return feature_layer