Python torch.nn.Sequential() Examples
The following are 30 code examples for showing how to use torch.nn.Sequential(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
torch.nn
, or try the search function
.
Example 1
Project: hgraph2graph Author: wengong-jin File: encoder.py License: MIT License | 9 votes |
def __init__(self, rnn_type, input_size, node_fdim, hidden_size, depth, dropout): super(MPNEncoder, self).__init__() self.hidden_size = hidden_size self.input_size = input_size self.depth = depth self.W_o = nn.Sequential( nn.Linear(node_fdim + hidden_size, hidden_size), nn.ReLU(), nn.Dropout(dropout) ) if rnn_type == 'GRU': self.rnn = GRU(input_size, hidden_size, depth) elif rnn_type == 'LSTM': self.rnn = LSTM(input_size, hidden_size, depth) else: raise ValueError('unsupported rnn cell type ' + rnn_type)
Example 2
Project: DDPAE-video-prediction Author: jthsieh File: decoder.py License: MIT License | 7 votes |
def __init__(self, input_size, n_channels, ngf, n_layers, activation='tanh'): super(ImageDecoder, self).__init__() ngf = ngf * (2 ** (n_layers - 2)) layers = [nn.ConvTranspose2d(input_size, ngf, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True)] for i in range(1, n_layers - 1): layers += [nn.ConvTranspose2d(ngf, ngf // 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf // 2), nn.ReLU(True)] ngf = ngf // 2 layers += [nn.ConvTranspose2d(ngf, n_channels, 4, 2, 1, bias=False)] if activation == 'tanh': layers += [nn.Tanh()] elif activation == 'sigmoid': layers += [nn.Sigmoid()] else: raise NotImplementedError self.main = nn.Sequential(*layers)
Example 3
Project: cat-bbs Author: aleju File: model.py License: MIT License | 6 votes |
def _make_layer(self, block, planes, blocks, stride=1, dilation=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, 1, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): # here with dilation layers.append(block(self.inplanes, planes, dilation=dilation)) return nn.Sequential(*layers)
Example 4
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection Author: Sunarker File: resnet_v1.py License: MIT License | 6 votes |
def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers)
Example 5
Project: controllable-text-attribute-transfer Author: Nrgeup File: model2.py License: Apache License 2.0 | 6 votes |
def make_model(d_vocab, N, d_model, d_ff=1024, h=4, dropout=0.1): """Helper: Construct a model from hyperparameters.""" c = copy.deepcopy attn = MultiHeadedAttention(h, d_model) ff = PositionwiseFeedForward(d_model, d_ff, dropout) position = PositionalEncoding(d_model, dropout) model = EncoderDecoder( Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N), Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N), nn.GRU(d_model, d_model, 1), nn.Sequential(Embeddings(d_model, d_vocab), c(position)), nn.Sequential(Embeddings(d_model, d_vocab), c(position)), Generator(d_model, d_vocab), d_model ) # This was important from their code. # Initialize parameters with Glorot / fan_avg. for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model
Example 6
Project: controllable-text-attribute-transfer Author: Nrgeup File: model.py License: Apache License 2.0 | 6 votes |
def make_model(d_vocab, N, d_model, latent_size, d_ff=1024, h=4, dropout=0.1): """Helper: Construct a model from hyperparameters.""" c = copy.deepcopy attn = MultiHeadedAttention(h, d_model) ff = PositionwiseFeedForward(d_model, d_ff, dropout) position = PositionalEncoding(d_model, dropout) share_embedding = Embeddings(d_model, d_vocab) model = EncoderDecoder( Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N), Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N), # nn.Sequential(Embeddings(d_model, d_vocab), c(position)), # nn.Sequential(Embeddings(d_model, d_vocab), c(position)), nn.Sequential(share_embedding, c(position)), nn.Sequential(share_embedding, c(position)), Generator(d_model, d_vocab), c(position), d_model, latent_size, ) # This was important from their code. # Initialize parameters with Glorot / fan_avg. for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model
Example 7
Project: controllable-text-attribute-transfer Author: Nrgeup File: model2.py License: Apache License 2.0 | 6 votes |
def make_model(d_vocab, N, d_model, d_ff=1024, h=4, dropout=0.1): """Helper: Construct a model from hyperparameters.""" c = copy.deepcopy attn = MultiHeadedAttention(h, d_model) ff = PositionwiseFeedForward(d_model, d_ff, dropout) position = PositionalEncoding(d_model, dropout) model = EncoderDecoder( Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N), Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N), nn.GRU(d_model, d_model, 1), nn.Sequential(Embeddings(d_model, d_vocab), c(position)), nn.Sequential(Embeddings(d_model, d_vocab), c(position)), Generator(d_model, d_vocab), d_model ) # This was important from their code. # Initialize parameters with Glorot / fan_avg. for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model
Example 8
Project: controllable-text-attribute-transfer Author: Nrgeup File: model.py License: Apache License 2.0 | 6 votes |
def make_model(d_vocab, N, d_model, latent_size, d_ff=1024, h=4, dropout=0.1): """Helper: Construct a model from hyperparameters.""" c = copy.deepcopy attn = MultiHeadedAttention(h, d_model) ff = PositionwiseFeedForward(d_model, d_ff, dropout) position = PositionalEncoding(d_model, dropout) share_embedding = Embeddings(d_model, d_vocab) model = EncoderDecoder( Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N), Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N), # nn.Sequential(Embeddings(d_model, d_vocab), c(position)), # nn.Sequential(Embeddings(d_model, d_vocab), c(position)), nn.Sequential(share_embedding, c(position)), nn.Sequential(share_embedding, c(position)), Generator(d_model, d_vocab), c(position), d_model, latent_size, ) # This was important from their code. # Initialize parameters with Glorot / fan_avg. for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model
Example 9
Project: controllable-text-attribute-transfer Author: Nrgeup File: model2.py License: Apache License 2.0 | 6 votes |
def make_model(d_vocab, N, d_model, d_ff=1024, h=4, dropout=0.1): """Helper: Construct a model from hyperparameters.""" c = copy.deepcopy attn = MultiHeadedAttention(h, d_model) ff = PositionwiseFeedForward(d_model, d_ff, dropout) position = PositionalEncoding(d_model, dropout) model = EncoderDecoder( Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N), Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N), nn.GRU(d_model, d_model, 1), nn.Sequential(Embeddings(d_model, d_vocab), c(position)), nn.Sequential(Embeddings(d_model, d_vocab), c(position)), Generator(d_model, d_vocab), d_model ) # This was important from their code. # Initialize parameters with Glorot / fan_avg. for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model
Example 10
Project: hgraph2graph Author: wengong-jin File: encoder.py License: MIT License | 6 votes |
def __init__(self, rnn_type, input_size, node_fdim, hidden_size, depth, dropout): super(MPNEncoder, self).__init__() self.hidden_size = hidden_size self.input_size = input_size self.depth = depth self.W_o = nn.Sequential( nn.Linear(node_fdim + hidden_size, hidden_size), nn.ReLU(), nn.Dropout(dropout) ) if rnn_type == 'GRU': self.rnn = GRU(input_size, hidden_size, depth) elif rnn_type == 'LSTM': self.rnn = LSTM(input_size, hidden_size, depth) else: raise ValueError('unsupported rnn cell type ' + rnn_type)
Example 11
Project: mmdetection Author: open-mmlab File: builder.py License: Apache License 2.0 | 6 votes |
def build(cfg, registry, default_args=None): """Build a module. Args: cfg (dict, list[dict]): The config of modules, is is either a dict or a list of configs. registry (:obj:`Registry`): A registry the module belongs to. default_args (dict, optional): Default arguments to build the module. Defaults to None. Returns: nn.Module: A built nn module. """ if isinstance(cfg, list): modules = [ build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg ] return nn.Sequential(*modules) else: return build_from_cfg(cfg, registry, default_args)
Example 12
Project: mmdetection Author: open-mmlab File: single_stage.py License: Apache License 2.0 | 6 votes |
def init_weights(self, pretrained=None): """Initialize the weights in detector. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ super(SingleStageDetector, self).init_weights(pretrained) self.backbone.init_weights(pretrained=pretrained) if self.with_neck: if isinstance(self.neck, nn.Sequential): for m in self.neck: m.init_weights() else: self.neck.init_weights() self.bbox_head.init_weights()
Example 13
Project: mmdetection Author: open-mmlab File: two_stage.py License: Apache License 2.0 | 6 votes |
def init_weights(self, pretrained=None): """Initialize the weights in detector. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ super(TwoStageDetector, self).init_weights(pretrained) self.backbone.init_weights(pretrained=pretrained) if self.with_neck: if isinstance(self.neck, nn.Sequential): for m in self.neck: m.init_weights() else: self.neck.init_weights() if self.with_rpn: self.rpn_head.init_weights() if self.with_roi_head: self.roi_head.init_weights(pretrained)
Example 14
Project: mmdetection Author: open-mmlab File: ssd_vgg.py License: Apache License 2.0 | 6 votes |
def _make_extra_layers(self, outplanes): layers = [] kernel_sizes = (1, 3) num_layers = 0 outplane = None for i in range(len(outplanes)): if self.inplanes == 'S': self.inplanes = outplane continue k = kernel_sizes[num_layers % 2] if outplanes[i] == 'S': outplane = outplanes[i + 1] conv = nn.Conv2d( self.inplanes, outplane, k, stride=2, padding=1) else: outplane = outplanes[i] conv = nn.Conv2d( self.inplanes, outplane, k, stride=1, padding=0) layers.append(conv) self.inplanes = outplanes[i] num_layers += 1 if self.input_size == 512: layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1)) return nn.Sequential(*layers)
Example 15
Project: neural-fingerprinting Author: StephanZheng File: resnext.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1): super(Block, self).__init__() group_width = cardinality * bottleneck_width self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(group_width) self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False) self.bn2 = nn.BatchNorm2d(group_width) self.conv3 = nn.Conv2d(group_width, self.expansion*group_width, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(self.expansion*group_width) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*group_width: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*group_width, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*group_width) )
Example 16
Project: neural-fingerprinting Author: StephanZheng File: dpn.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer): super(Bottleneck, self).__init__() self.out_planes = out_planes self.dense_depth = dense_depth self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(in_planes) self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False) self.bn2 = nn.BatchNorm2d(in_planes) self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(out_planes+dense_depth) self.shortcut = nn.Sequential() if first_layer: self.shortcut = nn.Sequential( nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(out_planes+dense_depth) )
Example 17
Project: neural-fingerprinting Author: StephanZheng File: vgg.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if x == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers) # net = VGG('VGG11') # x = torch.randn(2,3,32,32) # print(net(Variable(x)).size())
Example 18
Project: neural-fingerprinting Author: StephanZheng File: googlenet.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self): super(GoogLeNet, self).__init__() self.pre_layers = nn.Sequential( nn.Conv2d(3, 192, kernel_size=3, padding=1), nn.BatchNorm2d(192), nn.ReLU(True), ) self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) self.e4 = Inception(528, 256, 160, 320, 32, 128, 128) self.a5 = Inception(832, 256, 160, 320, 32, 128, 128) self.b5 = Inception(832, 384, 192, 384, 48, 128, 128) self.avgpool = nn.AvgPool2d(8, stride=1) self.linear = nn.Linear(1024, 10)
Example 19
Project: neural-fingerprinting Author: StephanZheng File: shufflenet.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, in_planes, out_planes, stride, groups): super(Bottleneck, self).__init__() self.stride = stride mid_planes = out_planes/4 g = 1 if in_planes==24 else groups self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False) self.bn1 = nn.BatchNorm2d(mid_planes) self.shuffle1 = ShuffleBlock(groups=g) self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False) self.bn2 = nn.BatchNorm2d(mid_planes) self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False) self.bn3 = nn.BatchNorm2d(out_planes) self.shortcut = nn.Sequential() if stride == 2: self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
Example 20
Project: neural-fingerprinting Author: StephanZheng File: resnext.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1): super(Block, self).__init__() group_width = cardinality * bottleneck_width self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(group_width) self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False) self.bn2 = nn.BatchNorm2d(group_width) self.conv3 = nn.Conv2d(group_width, self.expansion*group_width, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(self.expansion*group_width) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*group_width: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*group_width, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*group_width) )
Example 21
Project: neural-fingerprinting Author: StephanZheng File: dpn.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer): super(Bottleneck, self).__init__() self.out_planes = out_planes self.dense_depth = dense_depth self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(in_planes) self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False) self.bn2 = nn.BatchNorm2d(in_planes) self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(out_planes+dense_depth) self.shortcut = nn.Sequential() if first_layer: self.shortcut = nn.Sequential( nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(out_planes+dense_depth) )
Example 22
Project: neural-fingerprinting Author: StephanZheng File: senet.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes) ) # SE layers self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
Example 23
Project: neural-fingerprinting Author: StephanZheng File: vgg.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if x == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers) # net = VGG('VGG11') # x = torch.randn(2,3,32,32) # print(net(Variable(x)).size())
Example 24
Project: neural-fingerprinting Author: StephanZheng File: googlenet.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self): super(GoogLeNet, self).__init__() self.pre_layers = nn.Sequential( nn.Conv2d(3, 192, kernel_size=3, padding=1), nn.BatchNorm2d(192), nn.ReLU(True), ) self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) self.e4 = Inception(528, 256, 160, 320, 32, 128, 128) self.a5 = Inception(832, 256, 160, 320, 32, 128, 128) self.b5 = Inception(832, 384, 192, 384, 48, 128, 128) self.avgpool = nn.AvgPool2d(8, stride=1) self.linear = nn.Linear(1024, 10)
Example 25
Project: neural-fingerprinting Author: StephanZheng File: shufflenet.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, in_planes, out_planes, stride, groups): super(Bottleneck, self).__init__() self.stride = stride mid_planes = out_planes/4 g = 1 if in_planes==24 else groups self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False) self.bn1 = nn.BatchNorm2d(mid_planes) self.shuffle1 = ShuffleBlock(groups=g) self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False) self.bn2 = nn.BatchNorm2d(mid_planes) self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False) self.bn3 = nn.BatchNorm2d(out_planes) self.shortcut = nn.Sequential() if stride == 2: self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
Example 26
Project: neural-fingerprinting Author: StephanZheng File: resnext.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1): super(Block, self).__init__() group_width = cardinality * bottleneck_width self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(group_width) self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False) self.bn2 = nn.BatchNorm2d(group_width) self.conv3 = nn.Conv2d(group_width, self.expansion*group_width, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(self.expansion*group_width) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion*group_width: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion*group_width, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion*group_width) )
Example 27
Project: neural-fingerprinting Author: StephanZheng File: dpn.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer): super(Bottleneck, self).__init__() self.out_planes = out_planes self.dense_depth = dense_depth self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(in_planes) self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False) self.bn2 = nn.BatchNorm2d(in_planes) self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(out_planes+dense_depth) self.shortcut = nn.Sequential() if first_layer: self.shortcut = nn.Sequential( nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(out_planes+dense_depth) )
Example 28
Project: neural-fingerprinting Author: StephanZheng File: senet.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes) ) # SE layers self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
Example 29
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection Author: Sunarker File: mobilenet_v1.py License: MIT License | 5 votes |
def _init_head_tail(self): self.mobilenet = mobilenet_v1_base() # Fix blocks assert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12) for m in list(self.mobilenet.children())[:cfg.MOBILENET.FIXED_LAYERS]: for p in m.parameters(): p.requires_grad = False def set_bn_fix(m): classname = m.__class__.__name__ if classname.find('BatchNorm') != -1: for p in m.parameters(): p.requires_grad=False self.mobilenet.apply(set_bn_fix) # Add weight decay def l2_regularizer(m, wd): if m.__class__.__name__.find('Conv') != -1: m.weight.weight_decay = cfg.MOBILENET.WEIGHT_DECAY if cfg.MOBILENET.REGU_DEPTH: self.mobilenet.apply(lambda x: l2_regularizer(x, cfg.MOBILENET.WEIGHT_DECAY)) else: self.mobilenet.apply(lambda x: l2_regularizer(x, 0)) # always set the first conv layer list(self.mobilenet.children())[0].apply(lambda x: l2_regularizer(x, cfg.MOBILENET.WEIGHT_DECAY)) # Build mobilenet. self._layers['head'] = nn.Sequential(*list(self.mobilenet.children())[:12]) self._layers['tail'] = nn.Sequential(*list(self.mobilenet.children())[12:])
Example 30
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection Author: Sunarker File: vgg16.py License: MIT License | 5 votes |
def _init_head_tail(self): self.vgg = models.vgg16() # Remove fc8 self.vgg.classifier = nn.Sequential(*list(self.vgg.classifier._modules.values())[:-1]) # Fix the layers before conv3: for layer in range(10): for p in self.vgg.features[layer].parameters(): p.requires_grad = False # not using the last maxpool layer self._layers['head'] = nn.Sequential(*list(self.vgg.features._modules.values())[:-1])