Python torch.nn.Module() Examples
The following are 30
code examples of torch.nn.Module().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn
, or try the search function
.

Example #1
Source File: hooks.py From mmdetection with Apache License 2.0 | 6 votes |
def wrap_fp16_model(model): """Wrap the FP32 model to FP16. 1. Convert FP32 model to FP16. 2. Remain some necessary layers to be FP32, e.g., normalization layers. Args: model (nn.Module): Model in FP32. """ # convert model to fp16 model.half() # patch the normalization layers to make it work in fp32 mode patch_norm_fp32(model) # set `fp16_enabled` flag for m in model.modules(): if hasattr(m, 'fp16_enabled'): m.fp16_enabled = True
Example #2
Source File: modules.py From ScenarioMeta with MIT License | 6 votes |
def __init__(self, input_size, query_size, value_size, head_num, dropout=0.0, concatenate=True, configurable=False, use_dot=True): nn.Module.__init__(self) self.use_dot = use_dot if use_dot is True: self.query_heads = nn.Linear(input_size, head_num * query_size, bias=True) else: self.query_heads = nn.Linear(query_size + input_size, head_num, bias=False) self.head_num = head_num self.concatenate = concatenate self.input_size = input_size self.value_size = value_size if concatenate: self.value_proj = nn.Linear(value_size, input_size) else: self.value_proj = nn.Linear(value_size, input_size * head_num) if configurable: self.param_divide(self.query_heads, with_query=True) self.param_divide(self.value_proj, with_query=True) if dropout > 0.0: self.attn_dropout = nn.Dropout(dropout) else: self.attn_dropout = None self.attn = None
Example #3
Source File: modules.py From ScenarioMeta with MIT License | 6 votes |
def __init__(self, useritem_embeds, source_ratings, item_padding_idx, input_size, hidden_layers): nn.Module.__init__(self) self.useritem_embeds = useritem_embeds self.source_ratings = source_ratings self.item_padding_idx = item_padding_idx last_size = input_size * 2 layers1, layers2, transfer_layers = [], [], [] for hidden_size in hidden_layers: layers1.append(nn.Linear(last_size, hidden_size)) layers2.append(nn.Linear(last_size, hidden_size)) transfer_layers.append(nn.Linear(last_size, hidden_size)) last_size = hidden_size self.target_layers = nn.ModuleList(layers1) self.auxiliary_layers = nn.ModuleList(layers2) self.transfer_layers = nn.ModuleList(transfer_layers) self.target_output = nn.Linear(last_size, 1) self.auxiliary_output = nn.Linear(last_size, 1)
Example #4
Source File: base.py From fast-MPN-COV with MIT License | 6 votes |
def _reconstruct_inception(self, basemodel): model = nn.Module() model.features = nn.Sequential(basemodel.Conv2d_1a_3x3, basemodel.Conv2d_2a_3x3, basemodel.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2), basemodel.Conv2d_3b_1x1, basemodel.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2), basemodel.Mixed_5b, basemodel.Mixed_5c, basemodel.Mixed_5d, basemodel.Mixed_6a, basemodel.Mixed_6b, basemodel.Mixed_6c, basemodel.Mixed_6d, basemodel.Mixed_6e, basemodel.Mixed_7a, basemodel.Mixed_7b, basemodel.Mixed_7c) model.representation = nn.AdaptiveAvgPool2d((1, 1)) model.classifier = basemodel.fc model.representation_dim=basemodel.fc.weight.size(1) return model
Example #5
Source File: albunet.py From neural-pipeline with MIT License | 6 votes |
def __init__(self, base_model: torch.nn.Module, num_classes: int, weights_url: str = None): super().__init__() if not hasattr(self, 'decoder_block'): self.decoder_block = UnetDecoderBlock if not hasattr(self, 'bottleneck_type'): self.bottleneck_type = ConvBottleneck if weights_url is not None: print("Model weights inited by url") pretrained_weights = model_zoo.load_url(weights_url) model_state_dict = base_model.state_dict() pretrained_weights = {k: v for k, v in pretrained_weights.items() if k in model_state_dict} base_model.load_state_dict(pretrained_weights) filters = [64, 64, 128, 256, 512] self.bottlenecks = nn.ModuleList([self.bottleneck_type(f * 2, f) for f in reversed(filters[:-1])]) self.decoder_stages = nn.ModuleList([self.get_decoder(filters, idx) for idx in range(1, len(filters))]) self.encoder_stages = nn.ModuleList([self.get_encoder(base_model, idx) for idx in range(len(filters))]) self.last_upsample = self.decoder_block(filters[0], filters[0]) self.final = self.make_final_classifier(filters[0], num_classes)
Example #6
Source File: meta.py From ScenarioMeta with MIT License | 6 votes |
def __init__(self, hidden_size, layer_norm=False, input_gate=True, forget_gate=True): nn.Module.__init__(self) self.hidden_size = hidden_size # gradient(2), param(2), loss self.lstm = nn.LSTMCell(input_size=5, hidden_size=hidden_size) if layer_norm: self.layer_norm = nn.LayerNorm(hidden_size) else: self.layer_norm = None self.input_gate = input_gate self.forget_gate = forget_gate if self.input_gate: self.lr_layer = nn.Linear(hidden_size, 1) self.lrs = [] else: self.output_layer = nn.Linear(hidden_size, 1) self.dets = [] if forget_gate: self.fg_layer = nn.Linear(hidden_size, 1) self.fgs = [] self.h_0 = nn.Parameter(torch.randn((hidden_size,), requires_grad=True)) self.c_0 = nn.Parameter(torch.randn((hidden_size,), requires_grad=True))
Example #7
Source File: pytorch_utils.py From H3DNet with MIT License | 6 votes |
def __init__( self, model, bn_lambda, last_epoch=-1, setter=set_bn_momentum_default ): if not isinstance(model, nn.Module): raise RuntimeError( "Class '{}' is not a PyTorch nn Module".format( type(model).__name__ ) ) self.model = model self.setter = setter self.lmbd = bn_lambda self.step(last_epoch + 1) self.last_epoch = last_epoch
Example #8
Source File: utils.py From deep-learning-note with MIT License | 6 votes |
def evaluate_accuracy(data_iter, net, device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')): acc_sum, n = 0.0, 0 with torch.no_grad(): for X, y in data_iter: if isinstance(net, torch.nn.Module): net.eval() # 评估模式,会关闭 dropout acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item() net.train() # 改回训练模式 else: # 如果是自定义的模型 if 'is_training' in net.__code__.co_varnames: acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item() else: acc_sum += (net(X).argmax(dim=1) == y).float().sum().item() n += y.shape[0] return acc_sum / n
Example #9
Source File: nnet.py From nmp_qc with MIT License | 6 votes |
def num_flat_features(self, x): size = x.size()[1:] # all dimensions except the batch dimension num_features = 1 for s in size: num_features *= s return num_features # class NNetM(nn.Module): # # def __init__(self, n_in, n_out): # super(NNetM, self).__init__() # # self.fc1 = nn.Linear(n_in, 120) # self.fc2 = nn.Linear(120, 84) # self.fc3 = nn.Linear(84, n_out[0]*n_out[1]) # # def forward(self, x): # # x = F.relu(self.fc1(x)) # x = F.relu(self.fc2(x)) # x = self.fc3(x) # return x
Example #10
Source File: models.py From A2C with MIT License | 6 votes |
def forward(self, conv_in): """ Module forward pass Args: conv_in (Variable): convolutional input, shaped [N x 4 x 84 x 84] Returns: pi (Variable): action probability logits, shaped [N x self.num_actions] v (Variable): value predictions, shaped [N x 1] """ N = conv_in.size()[0] conv_out = self.conv(conv_in).view(N, 64 * 7 * 7) fc_out = self.fc(conv_out) pi_out = self.pi(fc_out) v_out = self.v(fc_out) return pi_out, v_out
Example #11
Source File: bn.py From pytorch-segmentation-toolbox with MIT License | 6 votes |
def __init__(self, num_features, activation=nn.ReLU(inplace=True), **kwargs): """Creates an Activated Batch Normalization module Parameters ---------- num_features : int Number of feature channels in the input and output. activation : nn.Module Module used as an activation function. kwargs All other arguments are forwarded to the `BatchNorm2d` constructor. """ super(ABN, self).__init__(OrderedDict([ ("bn", nn.BatchNorm2d(num_features, **kwargs)), ("act", activation) ]))
Example #12
Source File: resnet101.py From easy-faster-rcnn.pytorch with MIT License | 6 votes |
def features(self) -> Tuple[nn.Module, nn.Module, int, int]: resnet101 = torchvision.models.resnet101(pretrained=self._pretrained) # list(resnet101.children()) consists of following modules # [0] = Conv2d, [1] = BatchNorm2d, [2] = ReLU, # [3] = MaxPool2d, [4] = Sequential(Bottleneck...), # [5] = Sequential(Bottleneck...), # [6] = Sequential(Bottleneck...), # [7] = Sequential(Bottleneck...), # [8] = AvgPool2d, [9] = Linear children = list(resnet101.children()) features = children[:-3] num_features_out = 1024 hidden = children[-3] num_hidden_out = 2048 for parameters in [feature.parameters() for i, feature in enumerate(features) if i <= 4]: for parameter in parameters: parameter.requires_grad = False features = nn.Sequential(*features) return features, hidden, num_features_out, num_hidden_out
Example #13
Source File: checkpoint.py From Res2Net-maskrcnn with MIT License | 6 votes |
def create_complex_model(self): m = nn.Module() m.block1 = nn.Module() m.block1.layer1 = nn.Linear(2, 3) m.layer2 = nn.Linear(3, 2) m.res = nn.Module() m.res.layer2 = nn.Linear(3, 2) state_dict = OrderedDict() state_dict["layer1.weight"] = torch.rand(3, 2) state_dict["layer1.bias"] = torch.rand(3) state_dict["layer2.weight"] = torch.rand(2, 3) state_dict["layer2.bias"] = torch.rand(2) state_dict["res.layer2.weight"] = torch.rand(2, 3) state_dict["res.layer2.bias"] = torch.rand(2) return m, state_dict
Example #14
Source File: resnet18.py From easy-faster-rcnn.pytorch with MIT License | 6 votes |
def features(self) -> Tuple[nn.Module, nn.Module, int, int]: resnet18 = torchvision.models.resnet18(pretrained=self._pretrained) # list(resnet18.children()) consists of following modules # [0] = Conv2d, [1] = BatchNorm2d, [2] = ReLU, # [3] = MaxPool2d, [4] = Sequential(Bottleneck...), # [5] = Sequential(Bottleneck...), # [6] = Sequential(Bottleneck...), # [7] = Sequential(Bottleneck...), # [8] = AvgPool2d, [9] = Linear children = list(resnet18.children()) features = children[:-3] num_features_out = 256 hidden = children[-3] num_hidden_out = 512 for parameters in [feature.parameters() for i, feature in enumerate(features) if i <= 4]: for parameter in parameters: parameter.requires_grad = False features = nn.Sequential(*features) return features, hidden, num_features_out, num_hidden_out
Example #15
Source File: resnet50.py From easy-faster-rcnn.pytorch with MIT License | 6 votes |
def features(self) -> Tuple[nn.Module, nn.Module, int, int]: resnet50 = torchvision.models.resnet50(pretrained=self._pretrained) # list(resnet50.children()) consists of following modules # [0] = Conv2d, [1] = BatchNorm2d, [2] = ReLU, # [3] = MaxPool2d, [4] = Sequential(Bottleneck...), # [5] = Sequential(Bottleneck...), # [6] = Sequential(Bottleneck...), # [7] = Sequential(Bottleneck...), # [8] = AvgPool2d, [9] = Linear children = list(resnet50.children()) features = children[:-3] num_features_out = 1024 hidden = children[-3] num_hidden_out = 2048 for parameters in [feature.parameters() for i, feature in enumerate(features) if i <= 4]: for parameter in parameters: parameter.requires_grad = False features = nn.Sequential(*features) return features, hidden, num_features_out, num_hidden_out
Example #16
Source File: builder.py From mmdetection with Apache License 2.0 | 6 votes |
def build(cfg, registry, default_args=None): """Build a module. Args: cfg (dict, list[dict]): The config of modules, is is either a dict or a list of configs. registry (:obj:`Registry`): A registry the module belongs to. default_args (dict, optional): Default arguments to build the module. Defaults to None. Returns: nn.Module: A built nn module. """ if isinstance(cfg, list): modules = [ build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg ] return nn.Sequential(*modules) else: return build_from_cfg(cfg, registry, default_args)
Example #17
Source File: resnet_v1.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 6 votes |
def train(self, mode=True): # Override train so that the training mode is set as we want nn.Module.train(self, mode) if mode: # Set fixed blocks to be in eval mode (not really doing anything) self.resnet.eval() if cfg.RESNET.FIXED_BLOCKS <= 3: self.resnet.layer4.train() if cfg.RESNET.FIXED_BLOCKS <= 2: self.resnet.layer3.train() if cfg.RESNET.FIXED_BLOCKS <= 1: self.resnet.layer2.train() if cfg.RESNET.FIXED_BLOCKS == 0: self.resnet.layer1.train() # Set batchnorm always in eval mode during training def set_bn_eval(m): classname = m.__class__.__name__ if classname.find('BatchNorm') != -1: m.eval() self.resnet.apply(set_bn_eval)
Example #18
Source File: maml.py From metalearn-leap with Apache License 2.0 | 6 votes |
def maml_inner_step(input, output, model, optimizer, criterion, create_graph): """Create a computation graph through the gradient operation Arguments: input (torch.Tensor): input tensor. output (torch.Tensor): target tensor. model (torch.nn.Module): task learner. optimizer (maml.optim): optimizer for inner loop. criterion (func): loss criterion. create_graph (bool): create graph through gradient step. """ new_parameters = None prediction = model(input) loss = criterion(prediction, output) loss.backward(create_graph=create_graph, retain_graph=create_graph) if create_graph: _, new_parameters = optimizer.step(retain_graph=create_graph) else: optimizer.step(retain_graph=create_graph) return loss, prediction, new_parameters
Example #19
Source File: concept_embedding.py From NSCL-PyTorch-Release with MIT License | 5 votes |
def __init__(self, attribute_agnostic): super().__init__() self.attribute_agnostic = attribute_agnostic self.all_attributes = list() self.all_concepts = list() self.attribute_operators = nn.Module() self.concept_embeddings = nn.Module()
Example #20
Source File: fpn.py From Res2Net-maskrcnn with MIT License | 5 votes |
def __init__( self, in_channels_list, out_channels, conv_block, top_blocks=None ): """ Arguments: in_channels_list (list[int]): number of channels for each feature map that will be fed out_channels (int): number of channels of the FPN representation top_blocks (nn.Module or None): if provided, an extra operation will be performed on the output of the last (smallest resolution) FPN output, and the result will extend the result list """ super(FPN, self).__init__() self.inner_blocks = [] self.layer_blocks = [] for idx, in_channels in enumerate(in_channels_list, 1): inner_block = "fpn_inner{}".format(idx) layer_block = "fpn_layer{}".format(idx) if in_channels == 0: continue inner_block_module = conv_block(in_channels, out_channels, 1) layer_block_module = conv_block(out_channels, out_channels, 3, 1) self.add_module(inner_block, inner_block_module) self.add_module(layer_block, layer_block_module) self.inner_blocks.append(inner_block) self.layer_blocks.append(layer_block) self.top_blocks = top_blocks
Example #21
Source File: recurrent.py From Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch with MIT License | 5 votes |
def forward(self, inputs, hidden): def zoneout(h, next_h, prob): if isinstance(h, tuple): num_h = len(h) if not isinstance(prob, tuple): prob = tuple([prob] * num_h) return tuple([zoneout(h[i], next_h[i], prob[i]) for i in range(num_h)]) mask = h.new_tensor(h.size()).bernoulli_(prob) return mask * next_h + (1 - mask) * h next_hidden = self.cell(inputs, hidden) next_hidden = zoneout(hidden, next_hidden, self.zoneout_prob) return next_hidden # class DropoutHiddenCell(nn.Module): # # def __init__(self, cell, dropout_hidden=0, dropout_all_states=True): # super(DropoutHiddenCell, self).__init__() # self.cell = cell # self.hidden_size = cell.hidden_size # self.dropout_all_states = dropout_all_states # self.dropout_hidden = nn.Dropout(dropout_hidden) # # def forward(self, inputs, hidden): # next_hidden = self.cell(inputs, hidden) # if isinstance(h, tuple): # if self.dropout_all_states: # next_hidden = tuple([self.dropout_hidden(h_i) for h_i in h]) # else: # # else: # next_hidden = self.dropout_hidden(h) # return next_hidden
Example #22
Source File: summarizable_module.py From L3C-PyTorch with GNU General Public License v3.0 | 5 votes |
def test_submodules(): class _T(nn.Module): def __init__(self): super(_T, self).__init__() self.foo = [] def register_foo(self, f): self.foo.append(f) def get_all(self): for m_ in iter_modules_of_class(self, _T): yield from m_.foo class _SomethingWithTs(nn.Module): def __init__(self): super(_SomethingWithTs, self).__init__() self.a_t = _T() class _M(_T): # first T def __init__(self): super(_M, self).__init__() self.conv = nn.Conv2d(1, 2, 3) self.t = _T() # here self.t.register_foo(1) self.list = nn.ModuleList( [nn.Conv2d(1, 2, 3), _T()]) # here inner = _T() self.seq = nn.Sequential( nn.Conv2d(1, 2, 3), inner, # here _SomethingWithTs()) # here inner.register_foo(2) m = _M() all_ts = list(iter_modules_of_class(m, _T)) assert len(all_ts) == 5, all_ts assert list(m.get_all()) == [1, 2]
Example #23
Source File: loss.py From ScenarioMeta with MIT License | 5 votes |
def __init__(self, margin=0.0): nn.Module.__init__(self) self.m = nn.MarginRankingLoss(margin=margin)
Example #24
Source File: loss.py From ScenarioMeta with MIT License | 5 votes |
def __init__(self): nn.Module.__init__(self) self.m = nn.LogSigmoid()
Example #25
Source File: meta_optimizers.py From L2T-ww with MIT License | 5 votes |
def test_metaSGD(): v1 = torch.nn.Parameter(torch.Tensor([1., 3.])) v2 = torch.nn.Parameter(torch.Tensor([[-1., -2.], [1., 0.]])) module = nn.Module() module.v1 = v1 module.v2 = v2 lmbd = torch.nn.Parameter(torch.zeros(2, 2)) sgd = MetaSGD([v1, v2], [module], lr=0.1, momentum=0.9, weight_decay=0.01) def inner_objective(): return v1.pow(2).mean() + (lmbd*(v2**2)).sum() def outer_objective(): return (v1*v2).mean() for _ in range(10): sgd.zero_grad() sgd.step(inner_objective) sgd.zero_grad() lmbd.grad.zero_() outer_objective().backward() sgd.meta_backward() print(lmbd.grad)
Example #26
Source File: SemBranch.py From Semantic-Aware-Scene-Recognition with MIT License | 5 votes |
def forward(self, x): out = self.conv(x) out = self.bn(out) # Channel Attention Module out = self.ca(out) * out out = self.relu(out) return out
Example #27
Source File: SASceneNet.py From Semantic-Aware-Scene-Recognition with MIT License | 5 votes |
def forward(self, x): out = self.conv(x) out = self.bn(out) # Channel Attention Module out = self.ca(out) * out out = self.relu(out) return out
Example #28
Source File: mlp.py From ConvLab with MIT License | 5 votes |
def __init__(self, net_spec, in_dim, out_dim): nn.Module.__init__(self) Net.__init__(self, net_spec, in_dim, out_dim) # set default util.set_attr(self, dict( init_fn=None, clip_grad_val=None, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_scheduler_spec=None, update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'shared', 'hid_layers', 'hid_layers_activation', 'init_fn', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_scheduler_spec', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) # Guard against inappropriate algorithms and environments # Build model body dims = [self.in_dim] + self.hid_layers self.model_body = net_util.build_fc_model(dims, self.hid_layers_activation) # output layers self.v = nn.Linear(dims[-1], 1) # state value self.adv = nn.Linear(dims[-1], out_dim) # action dependent raw advantage net_util.init_layers(self, self.init_fn) self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.to(self.device)
Example #29
Source File: common_utils.py From interpret-text with MIT License | 5 votes |
def _common_pytorch_generator(numCols, numClasses=None): class Net(nn.Module): def __init__(self): super(Net, self).__init__() # Apply layer normalization for stability and perf on wide variety of datasets # https://arxiv.org/pdf/1607.06450.pdf self.norm = nn.LayerNorm(numCols) self.fc1 = nn.Linear(numCols, 100) self.fc2 = nn.Dropout(p=0.2) if numClasses is None: self.fc3 = nn.Linear(100, 3) self.output = nn.Linear(3, 1) elif numClasses == 2: self.fc3 = nn.Linear(100, 2) self.output = nn.Sigmoid() else: self.fc3 = nn.Linear(100, numClasses) self.output = nn.Softmax() def forward(self, X): X = self.norm(X) X = F.relu(self.fc1(X)) X = self.fc2(X) X = self.fc3(X) X = self.output(X) return X return Net()
Example #30
Source File: network.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 5 votes |
def load_state_dict(self, state_dict): """ Because we remove the definition of fc layer in resnet now, it will fail when loading the model trained before. To provide back compatibility, we overwrite the load_state_dict """ nn.Module.load_state_dict(self, {k: state_dict[k] for k in list(self.state_dict())})