Python torch.nn.__dict__() Examples

The following are 9 code examples of torch.nn.__dict__(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: attention.py    From torchsupport with MIT License 6 votes vote down vote up
def __init__(self, N, branches, in_channels, preprocess=None, activation=func.tanh):
    """Pixel-wise branch selection layer using attention.

    Args:
      N (int): dimensionality of convolutions.
      branches (iterable nn.Module): neural network branches to choose from.
      in_channels (int): number of input channels.
      preprocess (nn.Module): module performing feature preprocessing for attention.
      activation (nn.Module): activation function for attention computation. 
    """
    super(AttentionBranch, self).__init__()
    self.is_module = False
    if isinstance(branches, nn.Module):
      self.branches = branches
      self.is_module = True
    else:
      self.branches = nn.ModuleList(branches)
    branch_size = len(self.branches)
    self.attention_preprocess = preprocess
    if self.attention_preprocess == None:
      self.attention_preprocess = nn.__dict__[f"Conv{N}d"](in_channels, in_channels, 3)
    self.attention_activation = activation
    self.attention_calculation = nn.__dict__[f"Conv{N}d"](in_channels, branch_size, 1) 
Example #2
Source File: attention.py    From torchsupport with MIT License 6 votes vote down vote up
def __init__(self, N, in_channels, out_channels, hidden=32,
               inner_activation=func.relu, outer_activation=func.tanh,
               reduce=True):
    """Pixel-wise attention gated by a guide image.

    Args:
      N (int): dimensionality of convolutions.
      in_channels (int): number of input channels.
      out_channels (int): number of attention heads.
      hidden (int): number of hidden channels.
      inner_activation (nn.Module): activation on guide and input sum.
      outer_activation (nn.Module): activation on attention.
      reduce (bool): reduce or concatenate the results of the attention heads.
    """
    super(GuidedAttention, self).__init__()
    self.input_embedding = nn.__dict__[f"Conv{N}d"](in_channels, hidden, 1)
    self.guide_embedding = nn.__dict__[f"Conv{N}d"](in_channels, hidden, 1)
    self.attention_computation = nn.__dict__[f"Conv{N}d"](hidden, out_channels, 1)
    self.inner_activation = inner_activation
    self.outer_activation = outer_activation
    self.reduce = reduce 
Example #3
Source File: utils.py    From actor-observer with GNU General Public License v3.0 6 votes vote down vote up
def generic_load(arch, pretrained, weights, args):
    if arch in tmodels.__dict__:  # torchvision models
        if pretrained:
            print("=> using pre-trained model '{}'".format(arch))
            model = tmodels.__dict__[arch](pretrained=True)
            model = model.cuda()
        else:
            print("=> creating model '{}'".format(arch))
            model = tmodels.__dict__[arch]()
    else:  # defined as script in this directory
        model = importlib.import_module('.' + arch, package='models')
        model = model.__dict__[arch](args)

    if not weights == '':
        print('loading pretrained-weights from {}'.format(weights))
        chkpoint = torch.load(weights)
        if isinstance(chkpoint, dict) and 'state_dict' in chkpoint:
            chkpoint = chkpoint['state_dict']
        load_partial_state(model, chkpoint)
    return model 
Example #4
Source File: net_util.py    From ConvLab with MIT License 5 votes vote down vote up
def get_nn_name(uncased_name):
    '''Helper to get the proper name in PyTorch nn given a case-insensitive name'''
    for nn_name in nn.__dict__:
        if uncased_name.lower() == nn_name.lower():
            return nn_name
    raise ValueError(f'Name {uncased_name} not found in {nn.__dict__}') 
Example #5
Source File: compact.py    From torchsupport with MIT License 5 votes vote down vote up
def __init__(self, width, stride, input, kernels, kernels11,
               activation=func.leaky_relu,
               activation_1x1=func.leaky_relu,
               dim=2):
    super(Conv1x1, self).__init__()
    assert(dim in [1, 2, 3])
    self.conv_op = nn.__dict__[f"Conv{dim}d"]
    self.bn_op = nn.__dict__[f"BatchNorm{dim}d"]
    self.conv = self.conv_op(input, kernels, width, stride, 1)
    self.bn = self.bn_op(kernels)
    self.x11 = self.conv_op(kernels, kernels11, 1, 1)
    self.bn11 = self.bn_op(kernels11)
    self.activation = activation
    self.activation_1x1 = activation_1x1 
Example #6
Source File: compact.py    From torchsupport with MIT License 5 votes vote down vote up
def __init__(self, width, stride, input, kernels, kernels11,
               activation=func.leaky_relu,
               activation_1x1=func.leaky_relu,
               dim=2):
    super(UpConv1x1, self).__init__()
    assert(dim in [1, 2, 3])
    self.upsampling = nn.__dict__[f"UpsamplingBilinear{dim}d"]
    self.conv_op = nn.__dict__[f"Conv{dim}d"]
    self.bn_op = nn.__dict__[f"BatchNorm{dim}d"]
    self.conv = self.conv_op(input, kernels, width, stride, 1)
    self.bn = self.bn_op(kernels)
    self.x11 = self.conv_op(kernels, kernels11, 1, 1)
    self.bn11 = self.bn_op(kernels11)
    self.activation = activation
    self.activation_1x1 = activation_1x1 
Example #7
Source File: utils.py    From actor-observer with GNU General Public License v3.0 5 votes vote down vote up
def load_criterion(args):
    if hasattr(nn, args.loss):
        criterion = nn.__dict__[args.loss]().cuda()
    else:
        criterion = importlib.import_module('models.layers.' + args.loss)
        criterion = criterion.__dict__[args.loss](args).cuda()
    return criterion 
Example #8
Source File: abn.py    From catalyst with Apache License 2.0 5 votes vote down vote up
def __init__(
        self,
        num_features: int,
        activation: str = "leaky_relu",
        batchnorm_params: Dict = None,
        activation_params: Dict = None,
        use_batchnorm: bool = True,
    ):
        """
        Args:
            num_features (int): number of feature channels
                in the input and output
            activation (str): name of the activation functions, one of:
                ``'leaky_relu'``, ``'elu'`` or ``'none'``.
            batchnorm_params (dict): additional ``nn.BatchNorm2d`` params
            activation_params (dict): additional params for activation fucntion
            use_batchnorm (bool): @TODO: Docs. Contribution is welcome
        """
        super().__init__()
        batchnorm_params = batchnorm_params or {}
        activation_params = activation_params or {}

        layers = []
        if use_batchnorm:
            layers.append(
                nn.BatchNorm2d(num_features=num_features, **batchnorm_params)
            )
        if activation is not None and activation.lower() != "none":
            layers.append(
                nn.__dict__[activation](inplace=True, **activation_params)
            )

        self.net = nn.Sequential(*layers) 
Example #9
Source File: net_util.py    From SLM-Lab with MIT License 5 votes vote down vote up
def get_nn_name(uncased_name):
    '''Helper to get the proper name in PyTorch nn given a case-insensitive name'''
    for nn_name in nn.__dict__:
        if uncased_name.lower() == nn_name.lower():
            return nn_name
    raise ValueError(f'Name {uncased_name} not found in {nn.__dict__}')