Python torch.nn.Module() Examples

The following are 30 code examples for showing how to use torch.nn.Module(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module torch.nn , or try the search function .

Example 1
def train(self, mode=True):
    # Override train so that the training mode is set as we want
    nn.Module.train(self, mode)
    if mode:
      # Set fixed blocks to be in eval mode (not really doing anything)
      self.resnet.eval()
      if cfg.RESNET.FIXED_BLOCKS <= 3:
        self.resnet.layer4.train()
      if cfg.RESNET.FIXED_BLOCKS <= 2:
        self.resnet.layer3.train()
      if cfg.RESNET.FIXED_BLOCKS <= 1:
        self.resnet.layer2.train()
      if cfg.RESNET.FIXED_BLOCKS == 0:
        self.resnet.layer1.train()

      # Set batchnorm always in eval mode during training
      def set_bn_eval(m):
        classname = m.__class__.__name__
        if classname.find('BatchNorm') != -1:
          m.eval()

      self.resnet.apply(set_bn_eval) 
Example 2
Project: mmdetection   Author: open-mmlab   File: builder.py    License: Apache License 2.0 6 votes vote down vote up
def build(cfg, registry, default_args=None):
    """Build a module.

    Args:
        cfg (dict, list[dict]): The config of modules, is is either a dict
            or a list of configs.
        registry (:obj:`Registry`): A registry the module belongs to.
        default_args (dict, optional): Default arguments to build the module.
            Defaults to None.

    Returns:
        nn.Module: A built nn module.
    """
    if isinstance(cfg, list):
        modules = [
            build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
        ]
        return nn.Sequential(*modules)
    else:
        return build_from_cfg(cfg, registry, default_args) 
Example 3
Project: mmdetection   Author: open-mmlab   File: hooks.py    License: Apache License 2.0 6 votes vote down vote up
def wrap_fp16_model(model):
    """Wrap the FP32 model to FP16.

    1. Convert FP32 model to FP16.
    2. Remain some necessary layers to be FP32, e.g., normalization layers.

    Args:
        model (nn.Module): Model in FP32.
    """
    # convert model to fp16
    model.half()
    # patch the normalization layers to make it work in fp32 mode
    patch_norm_fp32(model)
    # set `fp16_enabled` flag
    for m in model.modules():
        if hasattr(m, 'fp16_enabled'):
            m.fp16_enabled = True 
Example 4
Project: nmp_qc   Author: priba   File: nnet.py    License: MIT License 6 votes vote down vote up
def num_flat_features(self, x):
        size = x.size()[1:]  # all dimensions except the batch dimension
        num_features = 1
        for s in size:
            num_features *= s
        return num_features

# class NNetM(nn.Module):
#
#     def __init__(self, n_in, n_out):
#         super(NNetM, self).__init__()
#
#         self.fc1 = nn.Linear(n_in, 120)
#         self.fc2 = nn.Linear(120, 84)
#         self.fc3 = nn.Linear(84, n_out[0]*n_out[1])
#
#     def forward(self, x):
#
#         x = F.relu(self.fc1(x))
#         x = F.relu(self.fc2(x))
#         x = self.fc3(x)
#         return x 
Example 5
Project: deep-learning-note   Author: wdxtub   File: utils.py    License: MIT License 6 votes vote down vote up
def evaluate_accuracy(data_iter, net,
                      device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')):
    acc_sum, n = 0.0, 0
    with torch.no_grad():
        for X, y in data_iter:
            if isinstance(net, torch.nn.Module):
                net.eval() # 评估模式,会关闭 dropout
                acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
                net.train() # 改回训练模式
            else:
                # 如果是自定义的模型
                if 'is_training' in net.__code__.co_varnames:
                    acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
                else:
                    acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
            n += y.shape[0]
    return acc_sum / n 
Example 6
Project: neural-pipeline   Author: toodef   File: albunet.py    License: MIT License 6 votes vote down vote up
def __init__(self, base_model: torch.nn.Module, num_classes: int, weights_url: str = None):
        super().__init__()
        if not hasattr(self, 'decoder_block'):
            self.decoder_block = UnetDecoderBlock
        if not hasattr(self, 'bottleneck_type'):
            self.bottleneck_type = ConvBottleneck

        if weights_url is not None:
            print("Model weights inited by url")

            pretrained_weights = model_zoo.load_url(weights_url)
            model_state_dict = base_model.state_dict()
            pretrained_weights = {k: v for k, v in pretrained_weights.items() if k in model_state_dict}
            base_model.load_state_dict(pretrained_weights)

        filters = [64, 64, 128, 256, 512]

        self.bottlenecks = nn.ModuleList([self.bottleneck_type(f * 2, f) for f in reversed(filters[:-1])])
        self.decoder_stages = nn.ModuleList([self.get_decoder(filters, idx) for idx in range(1, len(filters))])

        self.encoder_stages = nn.ModuleList([self.get_encoder(base_model, idx) for idx in range(len(filters))])

        self.last_upsample = self.decoder_block(filters[0], filters[0])
        self.final = self.make_final_classifier(filters[0], num_classes) 
Example 7
Project: fast-MPN-COV   Author: jiangtaoxie   File: base.py    License: MIT License 6 votes vote down vote up
def _reconstruct_inception(self, basemodel):
        model = nn.Module()
        model.features = nn.Sequential(basemodel.Conv2d_1a_3x3,
                                       basemodel.Conv2d_2a_3x3,
                                       basemodel.Conv2d_2b_3x3,
                                       nn.MaxPool2d(kernel_size=3, stride=2),
                                       basemodel.Conv2d_3b_1x1,
                                       basemodel.Conv2d_4a_3x3,
                                       nn.MaxPool2d(kernel_size=3, stride=2),
                                       basemodel.Mixed_5b,
                                       basemodel.Mixed_5c,
                                       basemodel.Mixed_5d,
                                       basemodel.Mixed_6a,
                                       basemodel.Mixed_6b,
                                       basemodel.Mixed_6c,
                                       basemodel.Mixed_6d,
                                       basemodel.Mixed_6e,
                                       basemodel.Mixed_7a,
                                       basemodel.Mixed_7b,
                                       basemodel.Mixed_7c)
        model.representation = nn.AdaptiveAvgPool2d((1, 1))
        model.classifier = basemodel.fc
        model.representation_dim=basemodel.fc.weight.size(1)
        return model 
Example 8
Project: A2C   Author: lnpalmer   File: models.py    License: MIT License 6 votes vote down vote up
def forward(self, conv_in):
        """ Module forward pass

        Args:
            conv_in (Variable): convolutional input, shaped [N x 4 x 84 x 84]

        Returns:
            pi (Variable): action probability logits, shaped [N x self.num_actions]
            v (Variable): value predictions, shaped [N x 1]
        """
        N = conv_in.size()[0]

        conv_out = self.conv(conv_in).view(N, 64 * 7 * 7)

        fc_out = self.fc(conv_out)

        pi_out = self.pi(fc_out)
        v_out = self.v(fc_out)

        return pi_out, v_out 
Example 9
Project: easy-faster-rcnn.pytorch   Author: potterhsu   File: resnet101.py    License: MIT License 6 votes vote down vote up
def features(self) -> Tuple[nn.Module, nn.Module, int, int]:
        resnet101 = torchvision.models.resnet101(pretrained=self._pretrained)

        # list(resnet101.children()) consists of following modules
        #   [0] = Conv2d, [1] = BatchNorm2d, [2] = ReLU,
        #   [3] = MaxPool2d, [4] = Sequential(Bottleneck...),
        #   [5] = Sequential(Bottleneck...),
        #   [6] = Sequential(Bottleneck...),
        #   [7] = Sequential(Bottleneck...),
        #   [8] = AvgPool2d, [9] = Linear
        children = list(resnet101.children())
        features = children[:-3]
        num_features_out = 1024

        hidden = children[-3]
        num_hidden_out = 2048

        for parameters in [feature.parameters() for i, feature in enumerate(features) if i <= 4]:
            for parameter in parameters:
                parameter.requires_grad = False

        features = nn.Sequential(*features)

        return features, hidden, num_features_out, num_hidden_out 
Example 10
Project: easy-faster-rcnn.pytorch   Author: potterhsu   File: resnet18.py    License: MIT License 6 votes vote down vote up
def features(self) -> Tuple[nn.Module, nn.Module, int, int]:
        resnet18 = torchvision.models.resnet18(pretrained=self._pretrained)

        # list(resnet18.children()) consists of following modules
        #   [0] = Conv2d, [1] = BatchNorm2d, [2] = ReLU,
        #   [3] = MaxPool2d, [4] = Sequential(Bottleneck...),
        #   [5] = Sequential(Bottleneck...),
        #   [6] = Sequential(Bottleneck...),
        #   [7] = Sequential(Bottleneck...),
        #   [8] = AvgPool2d, [9] = Linear
        children = list(resnet18.children())
        features = children[:-3]
        num_features_out = 256

        hidden = children[-3]
        num_hidden_out = 512

        for parameters in [feature.parameters() for i, feature in enumerate(features) if i <= 4]:
            for parameter in parameters:
                parameter.requires_grad = False

        features = nn.Sequential(*features)

        return features, hidden, num_features_out, num_hidden_out 
Example 11
Project: easy-faster-rcnn.pytorch   Author: potterhsu   File: resnet50.py    License: MIT License 6 votes vote down vote up
def features(self) -> Tuple[nn.Module, nn.Module, int, int]:
        resnet50 = torchvision.models.resnet50(pretrained=self._pretrained)

        # list(resnet50.children()) consists of following modules
        #   [0] = Conv2d, [1] = BatchNorm2d, [2] = ReLU,
        #   [3] = MaxPool2d, [4] = Sequential(Bottleneck...),
        #   [5] = Sequential(Bottleneck...),
        #   [6] = Sequential(Bottleneck...),
        #   [7] = Sequential(Bottleneck...),
        #   [8] = AvgPool2d, [9] = Linear
        children = list(resnet50.children())
        features = children[:-3]
        num_features_out = 1024

        hidden = children[-3]
        num_hidden_out = 2048

        for parameters in [feature.parameters() for i, feature in enumerate(features) if i <= 4]:
            for parameter in parameters:
                parameter.requires_grad = False

        features = nn.Sequential(*features)

        return features, hidden, num_features_out, num_hidden_out 
Example 12
Project: pytorch-segmentation-toolbox   Author: speedinghzl   File: bn.py    License: MIT License 6 votes vote down vote up
def __init__(self, num_features, activation=nn.ReLU(inplace=True), **kwargs):
        """Creates an Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        activation : nn.Module
            Module used as an activation function.
        kwargs
            All other arguments are forwarded to the `BatchNorm2d` constructor.
        """
        super(ABN, self).__init__(OrderedDict([
            ("bn", nn.BatchNorm2d(num_features, **kwargs)),
            ("act", activation)
        ])) 
Example 13
Project: H3DNet   Author: zaiweizhang   File: pytorch_utils.py    License: MIT License 6 votes vote down vote up
def __init__(
            self, model, bn_lambda, last_epoch=-1,
            setter=set_bn_momentum_default
    ):
        if not isinstance(model, nn.Module):
            raise RuntimeError(
                "Class '{}' is not a PyTorch nn Module".format(
                    type(model).__name__
                )
            )

        self.model = model
        self.setter = setter
        self.lmbd = bn_lambda

        self.step(last_epoch + 1)
        self.last_epoch = last_epoch 
Example 14
Project: ScenarioMeta   Author: THUDM   File: modules.py    License: MIT License 6 votes vote down vote up
def __init__(self, input_size, query_size, value_size, head_num, dropout=0.0, concatenate=True, configurable=False,
                 use_dot=True):
        nn.Module.__init__(self)
        self.use_dot = use_dot
        if use_dot is True:
            self.query_heads = nn.Linear(input_size, head_num * query_size, bias=True)
        else:
            self.query_heads = nn.Linear(query_size + input_size, head_num, bias=False)
        self.head_num = head_num
        self.concatenate = concatenate
        self.input_size = input_size
        self.value_size = value_size
        if concatenate:
            self.value_proj = nn.Linear(value_size, input_size)
        else:
            self.value_proj = nn.Linear(value_size, input_size * head_num)
        if configurable:
            self.param_divide(self.query_heads, with_query=True)
            self.param_divide(self.value_proj, with_query=True)
        if dropout > 0.0:
            self.attn_dropout = nn.Dropout(dropout)
        else:
            self.attn_dropout = None
        self.attn = None 
Example 15
Project: ScenarioMeta   Author: THUDM   File: modules.py    License: MIT License 6 votes vote down vote up
def __init__(self, useritem_embeds, source_ratings, item_padding_idx, input_size, hidden_layers):
        nn.Module.__init__(self)
        self.useritem_embeds = useritem_embeds
        self.source_ratings = source_ratings
        self.item_padding_idx = item_padding_idx
        last_size = input_size * 2
        layers1, layers2, transfer_layers = [], [], []
        for hidden_size in hidden_layers:
            layers1.append(nn.Linear(last_size, hidden_size))
            layers2.append(nn.Linear(last_size, hidden_size))
            transfer_layers.append(nn.Linear(last_size, hidden_size))
            last_size = hidden_size
        self.target_layers = nn.ModuleList(layers1)
        self.auxiliary_layers = nn.ModuleList(layers2)
        self.transfer_layers = nn.ModuleList(transfer_layers)
        self.target_output = nn.Linear(last_size, 1)
        self.auxiliary_output = nn.Linear(last_size, 1) 
Example 16
Project: ScenarioMeta   Author: THUDM   File: meta.py    License: MIT License 6 votes vote down vote up
def __init__(self, hidden_size, layer_norm=False, input_gate=True, forget_gate=True):
            nn.Module.__init__(self)
            self.hidden_size = hidden_size
            # gradient(2), param(2), loss
            self.lstm = nn.LSTMCell(input_size=5, hidden_size=hidden_size)
            if layer_norm:
                self.layer_norm = nn.LayerNorm(hidden_size)
            else:
                self.layer_norm = None
            self.input_gate = input_gate
            self.forget_gate = forget_gate
            if self.input_gate:
                self.lr_layer = nn.Linear(hidden_size, 1)
                self.lrs = []
            else:
                self.output_layer = nn.Linear(hidden_size, 1)
                self.dets = []
            if forget_gate:
                self.fg_layer = nn.Linear(hidden_size, 1)
                self.fgs = []
            self.h_0 = nn.Parameter(torch.randn((hidden_size,), requires_grad=True))
            self.c_0 = nn.Parameter(torch.randn((hidden_size,), requires_grad=True)) 
Example 17
Project: Res2Net-maskrcnn   Author: Res2Net   File: checkpoint.py    License: MIT License 6 votes vote down vote up
def create_complex_model(self):
        m = nn.Module()
        m.block1 = nn.Module()
        m.block1.layer1 = nn.Linear(2, 3)
        m.layer2 = nn.Linear(3, 2)
        m.res = nn.Module()
        m.res.layer2 = nn.Linear(3, 2)

        state_dict = OrderedDict()
        state_dict["layer1.weight"] = torch.rand(3, 2)
        state_dict["layer1.bias"] = torch.rand(3)
        state_dict["layer2.weight"] = torch.rand(2, 3)
        state_dict["layer2.bias"] = torch.rand(2)
        state_dict["res.layer2.weight"] = torch.rand(2, 3)
        state_dict["res.layer2.bias"] = torch.rand(2)

        return m, state_dict 
Example 18
Project: metalearn-leap   Author: amzn   File: maml.py    License: Apache License 2.0 6 votes vote down vote up
def maml_inner_step(input, output, model, optimizer, criterion, create_graph):
    """Create a computation graph through the gradient operation

    Arguments:
        input (torch.Tensor): input tensor.
        output (torch.Tensor): target tensor.
        model (torch.nn.Module): task learner.
        optimizer (maml.optim): optimizer for inner loop.
        criterion (func): loss criterion.
        create_graph (bool): create graph through gradient step.
    """
    new_parameters = None

    prediction = model(input)
    loss = criterion(prediction, output)
    loss.backward(create_graph=create_graph, retain_graph=create_graph)

    if create_graph:
        _, new_parameters = optimizer.step(retain_graph=create_graph)
    else:
        optimizer.step(retain_graph=create_graph)

    return loss, prediction, new_parameters 
Example 19
def __init__(self):
    nn.Module.__init__(self)
    self._predictions = {}
    self._losses = {}
    self._anchor_targets = {}
    self._proposal_targets = {}
    self._layers = {}
    self._gt_image = None
    self._act_summaries = {}
    self._score_summaries = {}
    self._event_summaries = {}
    self._image_gt_summaries = {}
    self._variables_to_fix = {} 
Example 20
def load_state_dict(self, state_dict):
    """
    Because we remove the definition of fc layer in resnet now, it will fail when loading 
    the model trained before.
    To provide back compatibility, we overwrite the load_state_dict
    """
    nn.Module.load_state_dict(self, {k: state_dict[k] for k in list(self.state_dict())}) 
Example 21
Project: mmdetection   Author: open-mmlab   File: resnet.py    License: Apache License 2.0 5 votes vote down vote up
def norm1(self):
        """nn.Module: normalization layer after the first convolution layer"""
        return getattr(self, self.norm1_name) 
Example 22
Project: mmdetection   Author: open-mmlab   File: resnet.py    License: Apache License 2.0 5 votes vote down vote up
def norm2(self):
        """nn.Module: normalization layer after the second convolution layer"""
        return getattr(self, self.norm2_name) 
Example 23
Project: mmdetection   Author: open-mmlab   File: resnet.py    License: Apache License 2.0 5 votes vote down vote up
def norm1(self):
        """nn.Module: normalization layer after the first convolution layer"""
        return getattr(self, self.norm1_name) 
Example 24
Project: mmdetection   Author: open-mmlab   File: resnet.py    License: Apache License 2.0 5 votes vote down vote up
def norm2(self):
        """nn.Module: normalization layer after the second convolution layer"""
        return getattr(self, self.norm2_name) 
Example 25
Project: mmdetection   Author: open-mmlab   File: resnet.py    License: Apache License 2.0 5 votes vote down vote up
def norm1(self):
        """nn.Module: the normalization layer named "norm1" """
        return getattr(self, self.norm1_name) 
Example 26
Project: mmdetection   Author: open-mmlab   File: hrnet.py    License: Apache License 2.0 5 votes vote down vote up
def norm1(self):
        """nn.Module: the normalization layer named "norm1" """
        return getattr(self, self.norm1_name) 
Example 27
Project: mmdetection   Author: open-mmlab   File: hrnet.py    License: Apache License 2.0 5 votes vote down vote up
def norm2(self):
        """nn.Module: the normalization layer named "norm2" """
        return getattr(self, self.norm2_name) 
Example 28
Project: mmdetection   Author: open-mmlab   File: accuracy.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, topk=(1, ), thresh=None):
        """Module to calculate the accuracy.

        Args:
            topk (tuple, optional): The criterion used to calculate the
                accuracy. Defaults to (1,).
            thresh (float, optional): If not None, predictions with scores
                under this threshold are considered incorrect. Default to None.
        """
        super().__init__()
        self.topk = topk
        self.thresh = thresh 
Example 29
Project: neural-pipeline   Author: toodef   File: train_config.py    License: MIT License 5 votes vote down vote up
def __init__(self, model: Module, train_stages: [], loss: Module, optimizer: Optimizer):
        self._train_stages = train_stages
        self._loss = loss
        self._optimizer = optimizer
        self._model = model 
Example 30
Project: neural-pipeline   Author: toodef   File: train_config.py    License: MIT License 5 votes vote down vote up
def loss(self) -> Module:
        """
        Get loss object

        :return: loss object
        """
        return self._loss