Python torch.nn.SELU Examples

The following are 30 code examples of torch.nn.SELU(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: supervised_topic_model.py    From causal-text-embeddings with MIT License 7 votes vote down vote up
def get_activation(self, act):
        if act == 'tanh':
            act = nn.Tanh()
        elif act == 'relu':
            act = nn.ReLU()
        elif act == 'softplus':
            act = nn.Softplus()
        elif act == 'rrelu':
            act = nn.RReLU()
        elif act == 'leakyrelu':
            act = nn.LeakyReLU()
        elif act == 'elu':
            act = nn.ELU()
        elif act == 'selu':
            act = nn.SELU()
        elif act == 'glu':
            act = nn.GLU()
        else:
            print('Defaulting to tanh activations...')
            act = nn.Tanh()
        return act 
Example #2
Source File: utils.py    From pnn.pytorch.update with MIT License 7 votes vote down vote up
def act_fn(act):
    if act == 'relu':
        act_ = nn.ReLU(inplace=False)
    elif act == 'lrelu':
        act_ = nn.LeakyReLU(inplace=True)
    elif act == 'prelu':
        act_ = nn.PReLU()
    elif act == 'rrelu':
        act_ = nn.RReLU(inplace=True)
    elif act == 'elu':
        act_ = nn.ELU(inplace=True)
    elif act == 'selu':
        act_ = nn.SELU(inplace=True)
    elif act == 'tanh':
        act_ = nn.Tanh()
    elif act == 'sigmoid':
        act_ = nn.Sigmoid()
    else:
        print('\n\nActivation function {} is not supported/understood\n\n'.format(act))
        act_ = None
    return act_ 
Example #3
Source File: layers.py    From USIP with GNU General Public License v3.0 7 votes vote down vote up
def __init__(self, in_features, out_features, activation=None, normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
        super(MyLinear, self).__init__()
        self.activation = activation
        self.normalization = normalization

        self.linear = nn.Linear(in_features, out_features, bias=True)
        if self.normalization == 'batch':
            self.norm = MyBatchNorm1d(out_features, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
        elif self.normalization == 'instance':
            self.norm = nn.InstanceNorm1d(out_features, momentum=momentum, affine=True)
        if self.activation == 'relu':
            self.act = nn.ReLU()
        elif 'elu' == activation:
            self.act = nn.ELU(alpha=1.0)
        elif 'swish' == self.activation:
            self.act = Swish()
        elif 'leakyrelu' == self.activation:
            self.act = nn.LeakyReLU(0.01)
        elif 'selu' == self.activation:
            self.act = nn.SELU()

        self.weight_init() 
Example #4
Source File: layers.py    From USIP with GNU General Public License v3.0 7 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, activation=None, normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1):
        super(MyConv2d, self).__init__()
        self.activation = activation
        self.normalization = normalization

        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
        if self.normalization == 'batch':
            self.norm = MyBatchNorm2d(out_channels, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay)
        elif self.normalization == 'instance':
            self.norm = nn.InstanceNorm2d(out_channels, momentum=momentum, affine=True)
        if self.activation == 'relu':
            self.act = nn.ReLU()
        elif self.activation == 'elu':
            self.act = nn.ELU(alpha=1.0)
        elif 'swish' == self.activation:
            self.act = Swish()
        elif 'leakyrelu' == self.activation:
            self.act = nn.LeakyReLU(0.01)
        elif 'selu' == self.activation:
            self.act = nn.SELU()

        self.weight_init() 
Example #5
Source File: etm.py    From ETM with MIT License 7 votes vote down vote up
def get_activation(self, act):
        if act == 'tanh':
            act = nn.Tanh()
        elif act == 'relu':
            act = nn.ReLU()
        elif act == 'softplus':
            act = nn.Softplus()
        elif act == 'rrelu':
            act = nn.RReLU()
        elif act == 'leakyrelu':
            act = nn.LeakyReLU()
        elif act == 'elu':
            act = nn.ELU()
        elif act == 'selu':
            act = nn.SELU()
        elif act == 'glu':
            act = nn.GLU()
        else:
            print('Defaulting to tanh activations...')
            act = nn.Tanh()
        return act 
Example #6
Source File: MobileNetV2.py    From Text_Segmentation_Image_Inpainting with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, num_class, width_mult=2, add_sece=False):
        super(MobileNetV2Classifier, self).__init__()
        self.num_class = num_class
        self.act_fn = nn.LeakyReLU(0.3, inplace=True)  # nn.SELU(inplace=True)
        self.encoder = DilatedMobileNetV2(width_mult=width_mult, activation=self.act_fn,
                                          bias=False, add_sece=add_sece, add_partial=False)

        # if width multiple is 1.4, then there are 944 channels
        cat_feat_num = sum([i[0].out_channels for i in self.encoder.features[3:]])
        # self.conv_classifier = self.make_conv_classifier(cat_feat_num, num_class)
        self.feature_conv = InvertedResidual(cat_feat_num, num_class, stride=1, expand_ratio=1, dilation=1,
                                             activation=self.act_fn, bias=False,
                                             add_sece=True)
        self.global_avg = nn.AdaptiveAvgPool2d(1)
        lstm_hidden = 256
        self.lstm = nn.LSTM(num_class, lstm_hidden, num_layers=1, batch_first=True)
        self.lstm_linear_z = nn.Sequential(nn.Linear(lstm_hidden, lstm_hidden // 4), self.act_fn)
        self.lstm_linear_score = nn.Linear(lstm_hidden, num_class)
        self.st_theta_linear = nn.Sequential(nn.Linear(lstm_hidden // 4, 2 * 3))
        self.anchor_box = FloatTensor([(0, 0), (0.4, 0.4), (0.4, -0.4), (-0.4, -0.4), (-0.4, 0.4)
                                       ]) 
Example #7
Source File: classifiers.py    From swagaf with MIT License 6 votes vote down vote up
def __init__(self, input_dim=5, hidden_dim=1024):
        """
        Averaged embeddings of ending -> label
        :param embed_dim: dimension to use
        """
        super(LMFeatsModel, self).__init__()
        self.mapping = nn.Sequential(
            nn.Linear(input_dim, hidden_dim, bias=True),
            nn.SELU(),
            nn.AlphaDropout(p=0.2),
        )
        self.prediction = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim, bias=True),
            nn.SELU(),
            nn.AlphaDropout(p=0.2),
            nn.Linear(hidden_dim, 1, bias=False),
        ) 
Example #8
Source File: classifiers.py    From swagaf with MIT License 6 votes vote down vote up
def __init__(self, vocab):
        super(Ensemble, self).__init__()

        self.fasttext_model = BoWModel(vocab, use_mean=True, embed_dim=100)
        self.mlp_model = LMFeatsModel(input_dim=8, hidden_dim=1024)
        self.lstm_pos_model = BLSTMModel(vocab, use_postags_only=True, maxpool=True)
        # self.lstm_lex_model = BLSTMModel(vocab, use_postags_only=False, maxpool=True)
        self.cnn_model = CNNModel(vocab)

        self.mlp = nn.Sequential(
            nn.Linear(100 + 1024 + 400 + 4 * 128, 2048, bias=True),
            # nn.SELU(),
            # nn.AlphaDropout(p=0.2),
            # nn.Linear(2048, 2048, bias=True),
            nn.SELU(),
            nn.AlphaDropout(p=0.2),
            nn.Linear(2048, 1, bias=False),
        ) 
Example #9
Source File: ba_tracknet_mirror_b.py    From sanet_relocal_demo with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, in_1_channels, in_2_channels, out_channels):
        super(AggregateUnit, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels=(in_1_channels + in_2_channels), out_channels=128,
                      kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(128),
            nn.SELU(inplace=True)
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(128),
            nn.SELU(inplace=True)
        )
        self.conv3 = nn.Sequential(
            nn.Conv2d(in_channels=128, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(out_channels),
            nn.SELU(inplace=True)
        ) 
Example #10
Source File: ba_tracknet.py    From sanet_relocal_demo with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, in_1_channels, in_2_channels, out_channels):
        super(AggregateUnit, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels=(in_1_channels + in_2_channels), out_channels=128,
                      kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(128),
            nn.SELU(inplace=True)
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(128),
            nn.SELU(inplace=True)
        )
        self.conv3 = nn.Sequential(
            nn.Conv2d(in_channels=128, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=True),
            nn.BatchNorm2d(out_channels),
            nn.SELU(inplace=True)
        ) 
Example #11
Source File: fc.py    From Attention-on-Attention-for-VQA with MIT License 6 votes vote down vote up
def get_act(act):
    if act == 'ReLU':
        act_layer = nn.ReLU
    elif act == 'LeakyReLU':
        act_layer = nn.LeakyReLU
    elif act == 'PReLU':
        act_layer = nn.PReLU
    elif act == 'RReLU':
        act_layer = nn.RReLU
    elif act == 'ELU':
        act_layer = nn.ELU
    elif act == 'SELU':
        act_layer = nn.SELU
    elif act == 'Tanh':
        act_layer = nn.Tanh
    elif act == 'Hardtanh':
        act_layer = nn.Hardtanh
    elif act == 'Sigmoid':
        act_layer = nn.Sigmoid
    else:
        print("Invalid activation function")
        raise Exception("Invalid activation function")
    return act_layer 
Example #12
Source File: nn_utils.py    From chemprop with MIT License 6 votes vote down vote up
def get_activation_function(activation: str) -> nn.Module:
    """
    Gets an activation function module given the name of the activation.

    :param activation: The name of the activation function.
    :return: The activation function module.
    """
    if activation == 'ReLU':
        return nn.ReLU()
    elif activation == 'LeakyReLU':
        return nn.LeakyReLU(0.1)
    elif activation == 'PReLU':
        return nn.PReLU()
    elif activation == 'tanh':
        return nn.Tanh()
    elif activation == 'SELU':
        return nn.SELU()
    else:
        raise ValueError(f'Activation "{activation}" not supported.') 
Example #13
Source File: activations.py    From lumin with Apache License 2.0 6 votes vote down vote up
def lookup_act(act:str) -> Any:
    r'''
    Map activation name to class
    
    Arguments:
        act: string representation of activation function

    Returns:
        Class implementing requested activation function
    '''

    if act == 'relu':       return nn.ReLU()
    if act == 'prelu':      return nn.PReLU()
    if act == 'selu':       return nn.SELU()
    if act == 'sigmoid':    return nn.Sigmoid()
    if act == 'logsoftmax': return nn.LogSoftmax(1)
    if act == 'softmax':    return nn.Softmax(1)
    if act == 'linear':     return lambda x: x
    if 'swish' in act:      return Swish()
    raise ValueError("Activation not implemented") 
Example #14
Source File: selu.py    From onnx2keras with MIT License 5 votes vote down vote up
def __init__(self):
        super(LayerSELUTest, self).__init__()
        self.selu = nn.SELU() 
Example #15
Source File: networks_pono.py    From PONO with MIT License 5 votes vote down vote up
def __init__(self, input_dim, output_dim, norm='none', activation='relu'):
        super(LinearBlock, self).__init__()
        use_bias = True
        # initialize fully connected layer
        if norm == 'sn':
            self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias))
        else:
            self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)

        # initialize normalization
        norm_dim = output_dim
        if norm == 'bn':
            self.norm = nn.BatchNorm1d(norm_dim)
        elif norm == 'in':
            self.norm = nn.InstanceNorm1d(norm_dim)
        elif norm == 'ln':
            self.norm = LayerNorm(norm_dim)
        elif norm == 'none' or norm == 'sn':
            self.norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(norm)

        # initialize activation
        if activation == 'relu':
            self.activation = nn.ReLU(inplace=True)
        elif activation == 'lrelu':
            self.activation = nn.LeakyReLU(0.2, inplace=True)
        elif activation == 'prelu':
            self.activation = nn.PReLU()
        elif activation == 'selu':
            self.activation = nn.SELU(inplace=True)
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == 'none':
            self.activation = None
        else:
            assert 0, "Unsupported activation: {}".format(activation) 
Example #16
Source File: Common.py    From Waifu2x with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, dilation=1,
                 groups=1, activation=nn.SELU(), conv=nn.Conv2d):
        super(ResidualFixBlock, self).__init__()
        self.act_fn = activation
        self.m = nn.Sequential(
            conv(in_channels, out_channels, kernel_size, padding=padding, dilation=dilation, groups=groups),
            activation,
            # conv(out_channels, out_channels, kernel_size, padding=(kernel_size - 1) // 2, dilation=1, groups=groups),
            conv(in_channels, out_channels, kernel_size, padding=padding, dilation=dilation, groups=groups),
        ) 
Example #17
Source File: Common.py    From Waifu2x with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, dilation=1, groups=1,
                 activation=nn.SELU(), conv=nn.Conv2d):
        super(ConvBlock, self).__init__()
        self.m = nn.Sequential(conv(in_channels, out_channels, kernel_size, padding=padding,
                                    dilation=dilation, groups=groups),
                               activation) 
Example #18
Source File: test_operators.py    From onnx-fb-universe with MIT License 5 votes vote down vote up
def test_selu(self):
        x = Variable(torch.randn(1, 2, 3, 4), requires_grad=True)
        self.assertONNX(nn.SELU(), x) 
Example #19
Source File: attention.py    From KOBE with MIT License 5 votes vote down vote up
def __init__(self, hidden_size, emb_size, prob=0.1):
        super(luong_gate_attention, self).__init__()
        self.hidden_size, self.emb_size = hidden_size, emb_size
        self.linear_enc = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob),
                                        nn.Linear(hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob))
        self.linear_in = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob),
                                       nn.Linear(hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob))
        self.linear_out = nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob),
                                        nn.Linear(hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob))
        self.softmax = nn.Softmax(dim=-1)
        self.dropout = nn.Dropout(p=prob)
        self.sigmoid = nn.Sigmoid() 
Example #20
Source File: rebalance_dataset_mlp.py    From swagaf with MIT License 5 votes vote down vote up
def __init__(self):
        super(MLPModel, self).__init__()
        # self.mapping = nn.Linear(train_data.feats.shape[2], 1, bias=False)

        self.mapping = nn.Sequential(
            nn.Linear(all_data.shape[-1], 2048, bias=True),
            nn.SELU(),
            nn.AlphaDropout(p=0.2),
            nn.Linear(2048, 2048, bias=True),
            nn.SELU(),
            nn.AlphaDropout(p=0.2),
            nn.Linear(2048, 1, bias=False),
        ) 
Example #21
Source File: model.py    From hdr-expandnet with BSD 3-Clause Clear License 5 votes vote down vote up
def __init__(self):
        super(ExpandNet, self).__init__()

        def layer(nIn, nOut, k, s, p, d=1):
            return nn.Sequential(
                nn.Conv2d(nIn, nOut, k, s, p, d), nn.SELU(inplace=True)
            )

        self.nf = 64
        self.local_net = nn.Sequential(
            layer(3, 64, 3, 1, 1), layer(64, 128, 3, 1, 1)
        )

        self.mid_net = nn.Sequential(
            layer(3, 64, 3, 1, 2, 2),
            layer(64, 64, 3, 1, 2, 2),
            layer(64, 64, 3, 1, 2, 2),
            nn.Conv2d(64, 64, 3, 1, 2, 2),
        )

        self.glob_net = nn.Sequential(
            layer(3, 64, 3, 2, 1),
            layer(64, 64, 3, 2, 1),
            layer(64, 64, 3, 2, 1),
            layer(64, 64, 3, 2, 1),
            layer(64, 64, 3, 2, 1),
            layer(64, 64, 3, 2, 1),
            nn.Conv2d(64, 64, 4, 1, 0),
        )

        self.end_net = nn.Sequential(
            layer(256, 64, 1, 1, 0), nn.Conv2d(64, 3, 1, 1, 0), nn.Sigmoid()
        ) 
Example #22
Source File: partial_convolution.py    From Text_Segmentation_Image_Inpainting with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, groups=1, bias=True, BN=False, activation=nn.SELU()):
        super(PartialGatedConv, self).__init__()
        self.feature_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride,
                                      padding, dilation, groups, bias)
        self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride,
                                   padding, dilation, groups, bias)
        if BN:
            self.bn_act = nn.Sequential(nn.BatchNorm2d(out_channels), activation)
        else:
            self.bn_acf = activation 
Example #23
Source File: gcn.py    From cogdl with MIT License 5 votes vote down vote up
def __init__(self, nfeat, nhid, nclass, dropout):
        super(TKipfGCN, self).__init__()

        self.gc1 = GraphConvolution(nfeat, nhid)
        self.gc2 = GraphConvolution(nhid, nclass)
        self.dropout = dropout
        # self.nonlinear = nn.SELU() 
Example #24
Source File: models.py    From pydlt with BSD 3-Clause Clear License 5 votes vote down vote up
def __init__(self, num_hidden, num_chan, num_pix):
        super(DiscriminatorBEGAN, self).__init__()
        self.num_pix = num_pix
        self.num_chan = num_chan
        self.main = nn.Sequential(
            nn.Linear(num_chan*num_pix*num_pix, num_hidden), nn.SELU(),
            nn.Linear(num_hidden, num_hidden), nn.SELU(),
            nn.Linear(num_hidden, num_chan*num_pix*num_pix),
        )
        selu_init(self) 
Example #25
Source File: models.py    From pydlt with BSD 3-Clause Clear License 5 votes vote down vote up
def __init__(self, num_hidden, num_chan, num_pix):
        super(Discriminator, self).__init__()
        self.num_pix = num_pix
        self.num_chan = num_chan
        self.main = nn.Sequential(
            nn.Linear(num_chan*num_pix*num_pix, num_hidden),
            nn.SELU(),
            nn.Linear(num_hidden, num_hidden),
            nn.SELU()
        )
        self.last_layer = nn.Linear(num_hidden, 1)
        selu_init(self)

    # The correction term is for WGAN-CT 
Example #26
Source File: models.py    From pydlt with BSD 3-Clause Clear License 5 votes vote down vote up
def __init__(self, num_hidden, z_dim, num_chan, num_pix):
        super(Generator, self).__init__()
        self.num_pix = num_pix
        self.num_chan = num_chan
        self.main = nn.Sequential(
            nn.Linear(z_dim, num_hidden),
            nn.SELU(),
            nn.Linear(num_hidden, num_hidden),
            nn.SELU(),
            nn.Linear(num_hidden, num_chan*num_pix*num_pix),
            nn.Tanh()
        )
        selu_init(self) 
Example #27
Source File: attention.py    From Global-Encoding with MIT License 5 votes vote down vote up
def __init__(self, hidden_size, emb_size, prob=0.1):
        super(luong_gate_attention, self).__init__()
        self.hidden_size, self.emb_size = hidden_size, emb_size
        self.linear_enc = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob), 
                                        nn.Linear(hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob))
        self.linear_in = nn.Sequential(nn.Linear(hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob), 
                                       nn.Linear(hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob))
        self.linear_out = nn.Sequential(nn.Linear(2*hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob), 
                                        nn.Linear(hidden_size, hidden_size), nn.SELU(), nn.Dropout(p=prob))
        self.softmax = nn.Softmax(dim=-1) 
Example #28
Source File: unet_models.py    From kaggle_carvana_segmentation with MIT License 5 votes vote down vote up
def __init__(self, in_: int, out: int, bn=False):
        super().__init__()
        self.conv = conv3x3(in_, out)
        self.bn = nn.BatchNorm2d(out) if bn else None
        self.activation = nn.SELU(inplace=True) 
Example #29
Source File: selu.py    From pytorch2keras with MIT License 5 votes vote down vote up
def __init__(self):
        super(LayerTest, self).__init__()
        self.selu = nn.SELU() 
Example #30
Source File: utils.py    From hypersearch with MIT License 5 votes vote down vote up
def str2act(a):
    if a == 'relu':
        return nn.ReLU()
    elif a == 'selu':
        return nn.SELU()
    elif a == 'elu':
        return nn.ELU()
    elif a == 'tanh':
        return nn.Tanh()
    elif a == 'sigmoid':
        return nn.Sigmoid()
    else:
        raise ValueError('[!] Unsupported activation.')