Python torch.nn.ELU Examples
The following are 30
code examples of torch.nn.ELU().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn
, or try the search function
.
Example #1
Source File: etm.py From ETM with MIT License | 7 votes |
def get_activation(self, act): if act == 'tanh': act = nn.Tanh() elif act == 'relu': act = nn.ReLU() elif act == 'softplus': act = nn.Softplus() elif act == 'rrelu': act = nn.RReLU() elif act == 'leakyrelu': act = nn.LeakyReLU() elif act == 'elu': act = nn.ELU() elif act == 'selu': act = nn.SELU() elif act == 'glu': act = nn.GLU() else: print('Defaulting to tanh activations...') act = nn.Tanh() return act
Example #2
Source File: dan.py From qb with MIT License | 7 votes |
def __init__(self, embedding_dim, n_hidden_layers, n_hidden_units, dropout_prob): super(DanEncoder, self).__init__() encoder_layers = [] for i in range(n_hidden_layers): if i == 0: input_dim = embedding_dim else: input_dim = n_hidden_units encoder_layers.extend([ nn.Linear(input_dim, n_hidden_units), nn.BatchNorm1d(n_hidden_units), nn.ELU(), nn.Dropout(dropout_prob), ]) self.encoder = nn.Sequential(*encoder_layers)
Example #3
Source File: main_pytorch.py From deep_architect with MIT License | 7 votes |
def nonlinearity(h_nonlin_name): def Nonlinearity(nonlin_name): if nonlin_name == 'relu': m = nn.ReLU() elif nonlin_name == 'tanh': m = nn.Tanh() elif nonlin_name == 'elu': m = nn.ELU() else: raise ValueError return m return hpt.siso_pytorch_module_from_pytorch_layer_fn( Nonlinearity, {'nonlin_name': h_nonlin_name})
Example #4
Source File: neural_networks.py From pase with MIT License | 7 votes |
def act_fun(act_type): if act_type=="relu": return nn.ReLU() if act_type=="tanh": return nn.Tanh() if act_type=="sigmoid": return nn.Sigmoid() if act_type=="leaky_relu": return nn.LeakyReLU(0.2) if act_type=="elu": return nn.ELU() if act_type=="softmax": return nn.LogSoftmax(dim=1) if act_type=="linear": return nn.LeakyReLU(1) # initializzed like this, but not used in forward!
Example #5
Source File: neural_networks.py From pase with MIT License | 7 votes |
def act_fun(act_type): if act_type=="relu": return nn.ReLU() if act_type=="tanh": return nn.Tanh() if act_type=="sigmoid": return nn.Sigmoid() if act_type=="leaky_relu": return nn.LeakyReLU(0.2) if act_type=="elu": return nn.ELU() if act_type=="softmax": return nn.LogSoftmax(dim=1) if act_type=="linear": return nn.LeakyReLU(1) # initializzed like this, but not used in forward!
Example #6
Source File: neural_networks.py From pase with MIT License | 7 votes |
def act_fun(act_type): if act_type=="relu": return nn.ReLU() if act_type=="tanh": return nn.Tanh() if act_type=="sigmoid": return nn.Sigmoid() if act_type=="leaky_relu": return nn.LeakyReLU(0.2) if act_type=="elu": return nn.ELU() if act_type=="softmax": return nn.LogSoftmax(dim=1) if act_type=="linear": return nn.LeakyReLU(1) # initializzed like this, but not used in forward!
Example #7
Source File: neural_networks.py From pase with MIT License | 7 votes |
def act_fun(act_type): if act_type=="relu": return nn.ReLU() if act_type=="tanh": return nn.Tanh() if act_type=="sigmoid": return nn.Sigmoid() if act_type=="leaky_relu": return nn.LeakyReLU(0.2) if act_type=="elu": return nn.ELU() if act_type=="softmax": return nn.LogSoftmax(dim=1) if act_type=="linear": return nn.LeakyReLU(1) # initializzed like this, but not used in forward!
Example #8
Source File: scorenet.py From ncsn with GNU General Public License v3.0 | 7 votes |
def __init__(self, config): super().__init__() self.config = config self.main = nn.Sequential( nn.Linear(10 * 10, 1024), nn.LayerNorm(1024), nn.ELU(), nn.Linear(1024, 1024), nn.LayerNorm(1024), nn.ELU(), nn.Linear(1024, 512), nn.LayerNorm(512), nn.ELU(), nn.Linear(512, 100), nn.LayerNorm(100) )
Example #9
Source File: lwopenpose_cmupan.py From imgclsmob with MIT License | 7 votes |
def __init__(self, in_channels, out_channels): super(LwopEncoderFinalBlock, self).__init__() self.pre_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=True, use_bn=False) self.body = nn.Sequential() for i in range(3): self.body.add_module("block{}".format(i + 1), dwsconv3x3_block( in_channels=out_channels, out_channels=out_channels, use_bn=False, dw_activation=(lambda: nn.ELU(inplace=True)), pw_activation=(lambda: nn.ELU(inplace=True)))) self.post_conv = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=True, use_bn=False)
Example #10
Source File: supervised_topic_model.py From causal-text-embeddings with MIT License | 7 votes |
def get_activation(self, act): if act == 'tanh': act = nn.Tanh() elif act == 'relu': act = nn.ReLU() elif act == 'softplus': act = nn.Softplus() elif act == 'rrelu': act = nn.RReLU() elif act == 'leakyrelu': act = nn.LeakyReLU() elif act == 'elu': act = nn.ELU() elif act == 'selu': act = nn.SELU() elif act == 'glu': act = nn.GLU() else: print('Defaulting to tanh activations...') act = nn.Tanh() return act
Example #11
Source File: utils.py From pnn.pytorch.update with MIT License | 7 votes |
def act_fn(act): if act == 'relu': act_ = nn.ReLU(inplace=False) elif act == 'lrelu': act_ = nn.LeakyReLU(inplace=True) elif act == 'prelu': act_ = nn.PReLU() elif act == 'rrelu': act_ = nn.RReLU(inplace=True) elif act == 'elu': act_ = nn.ELU(inplace=True) elif act == 'selu': act_ = nn.SELU(inplace=True) elif act == 'tanh': act_ = nn.Tanh() elif act == 'sigmoid': act_ = nn.Sigmoid() else: print('\n\nActivation function {} is not supported/understood\n\n'.format(act)) act_ = None return act_
Example #12
Source File: resnet_unet.py From seismic-deeplearning with MIT License | 7 votes |
def __init__(self, n_classes=1): super(Res34Unetv4, self).__init__() self.resnet = torchvision.models.resnet34(True) self.conv1 = nn.Sequential(self.resnet.conv1, self.resnet.bn1, self.resnet.relu) self.encode2 = nn.Sequential(self.resnet.layer1, SCse(64)) self.encode3 = nn.Sequential(self.resnet.layer2, SCse(128)) self.encode4 = nn.Sequential(self.resnet.layer3, SCse(256)) self.encode5 = nn.Sequential(self.resnet.layer4, SCse(512)) self.center = nn.Sequential(FPAv2(512, 256), nn.MaxPool2d(2, 2)) self.decode5 = Decoderv2(256, 512, 64) self.decode4 = Decoderv2(64, 256, 64) self.decode3 = Decoderv2(64, 128, 64) self.decode2 = Decoderv2(64, 64, 64) self.decode1 = Decoder(64, 32, 64) self.logit = nn.Sequential( nn.Conv2d(320, 64, kernel_size=3, padding=1), nn.ELU(True), nn.Conv2d(64, n_classes, kernel_size=1, bias=False), )
Example #13
Source File: compute_flops.py From TreeFilter-Torch with MIT License | 7 votes |
def compute_flops(module, inp, out): if isinstance(module, nn.Conv2d): return compute_Conv2d_flops(module, inp, out), 'Conv2d' elif isinstance(module, nn.BatchNorm2d): return compute_BatchNorm2d_flops(module, inp, out), 'BatchNorm2d' elif isinstance(module, ( nn.AvgPool2d, nn.MaxPool2d, nn.AdaptiveAvgPool2d, nn.AdaptiveMaxPool2d)): return compute_Pool2d_flops(module, inp, out), 'Pool2d' elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.Sigmoid)): return compute_ReLU_flops(module, inp, out), 'Activation' elif isinstance(module, nn.Upsample): return compute_Upsample_flops(module, inp, out), 'Upsample' elif isinstance(module, nn.Linear): return compute_Linear_flops(module, inp, out), 'Linear' else: print("[Flops]: {} is not supported!".format(type(module).__name__)) return 0, -1 pass
Example #14
Source File: enc_resnet_v2.py From vae-lagging-encoder with MIT License | 7 votes |
def __init__(self, inplanes, planes, stride=1): super(ResNetBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.activation = nn.ELU() self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) downsample = None if stride != 1 or inplanes != planes: downsample = nn.Sequential( nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes), ) self.downsample = downsample self.stride = stride self.reset_parameters()
Example #15
Source File: resnet_unet.py From seismic-deeplearning with MIT License | 7 votes |
def __init__(self): super(Res34Unetv5, self).__init__() self.resnet = torchvision.models.resnet34(True) self.conv1 = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False), self.resnet.bn1, self.resnet.relu, ) self.encode2 = nn.Sequential(self.resnet.layer1, SCse(64)) self.encode3 = nn.Sequential(self.resnet.layer2, SCse(128)) self.encode4 = nn.Sequential(self.resnet.layer3, SCse(256)) self.encode5 = nn.Sequential(self.resnet.layer4, SCse(512)) self.center = nn.Sequential(FPAv2(512, 256), nn.MaxPool2d(2, 2)) self.decode5 = Decoderv2(256, 512, 64) self.decode4 = Decoderv2(64, 256, 64) self.decode3 = Decoderv2(64, 128, 64) self.decode2 = Decoderv2(64, 64, 64) self.logit = nn.Sequential( nn.Conv2d(256, 32, kernel_size=3, padding=1), nn.ELU(True), nn.Conv2d(32, 1, kernel_size=1, bias=False), )
Example #16
Source File: submodules.py From DeeperInverseCompositionalAlgorithm with MIT License | 7 votes |
def convLayer(batchNorm, in_planes, out_planes, kernel_size=3, stride=1, dilation=1, bias=False): """ A wrapper of convolution-batchnorm-ReLU module """ if batchNorm: return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2 + dilation-1, dilation=dilation, bias=bias), nn.BatchNorm2d(out_planes), #nn.LeakyReLU(0.1,inplace=True) # deprecated nn.ELU(inplace=True) ) else: return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2 + dilation-1, dilation=dilation, bias=True), #nn.LeakyReLU(0.1,inplace=True) # deprecated nn.ELU(inplace=True) )
Example #17
Source File: HardNet.py From affnet with MIT License | 7 votes |
def __init__(self,sm): super(HardNetNarELU, self).__init__() self.features = nn.Sequential( nn.Conv2d(1, 16, kernel_size=3, padding=1), nn.ELU(), nn.Conv2d(16, 16, kernel_size=3, padding=1), nn.ELU(), nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1), nn.ELU(), nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ELU(), nn.Conv2d(32, 64, kernel_size=3, stride=2,padding=1), nn.ELU(), nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.ELU() ) self.classifier = nn.Sequential( nn.Dropout(0.1), nn.Conv2d(64, 128, kernel_size=8), nn.BatchNorm2d(128, affine=False)) self.SIFT = sm return
Example #18
Source File: layers01.py From packnet-sfm with MIT License | 7 votes |
def __init__(self, in_channels, out_channels, stride, dropout=None): """ Initializes a ResidualConv object. Parameters ---------- in_channels : int Number of input channels out_channels : int Number of output channels stride : int Stride dropout : float Dropout value """ super().__init__() self.conv1 = Conv2D(in_channels, out_channels, 3, stride) self.conv2 = Conv2D(out_channels, out_channels, 3, 1) self.conv3 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride) self.normalize = torch.nn.GroupNorm(16, out_channels) self.activ = nn.ELU(inplace=True) if dropout: self.conv3 = nn.Sequential(self.conv3, nn.Dropout2d(dropout))
Example #19
Source File: flows.py From torchkit with MIT License | 7 votes |
def __init__(self, dim, hid_dim, context_dim, num_layers, activation=nn.ELU(), fixed_order=False, num_ds_dim=4, num_ds_layers=1, num_ds_multiplier=3): super(IAF_DSF, self).__init__() self.dim = dim self.context_dim = context_dim self.num_ds_dim = num_ds_dim self.num_ds_layers = num_ds_layers if type(dim) is int: self.mdl = iaf_modules.cMADE( dim, hid_dim, context_dim, num_layers, num_ds_multiplier*(hid_dim//dim)*num_ds_layers, activation, fixed_order) self.out_to_dsparams = nn.Conv1d( num_ds_multiplier*(hid_dim//dim)*num_ds_layers, 3*num_ds_layers*num_ds_dim, 1) self.reset_parameters() self.sf = SigmoidFlow(num_ds_dim)
Example #20
Source File: autoencoders.py From torchkit with MIT License | 7 votes |
def __init__(self, dimz, dimc, act=nn.ELU()): super(MNISTConvDec, self).__init__() self.dec = nn.Sequential( nn_.ResLinear(dimz,dimc), act, nn_.ResLinear(dimc,32*4*4), act, nn_.Reshape((-1,32,4,4)), nn.Upsample(scale_factor=2,mode='bilinear'), nn_.ResConv2d(32,32,3,1,padding=1,activation=act), act, nn_.ResConv2d(32,32,3,1,padding=1,activation=act), act, nn_.slicer[:,:,:-1,:-1], nn.Upsample(scale_factor=2,mode='bilinear'), nn_.ResConv2d(32,16,3,1,padding=1,activation=act), act, nn_.ResConv2d(16,16,3,1,padding=1,activation=act), act, nn.Upsample(scale_factor=2,mode='bilinear'), nn_.ResConv2d(16,1,3,1,padding=1,activation=act), )
Example #21
Source File: autoencoders.py From torchkit with MIT License | 7 votes |
def __init__(self, dimc, act=nn.ELU()): super(MNISTConvEnc, self).__init__() self.enc = nn.Sequential( nn_.ResConv2d(1,16,3,2,padding=1,activation=act), act, nn_.ResConv2d(16,16,3,1,padding=1,activation=act), act, nn_.ResConv2d(16,32,3,2,padding=1,activation=act), act, nn_.ResConv2d(32,32,3,1,padding=1,activation=act), act, nn_.ResConv2d(32,32,3,2,padding=1,activation=act), act, nn_.Reshape((-1,32*4*4)), nn_.ResLinear(32*4*4,dimc), act )
Example #22
Source File: basenet.py From pytorch-saltnet with MIT License | 7 votes |
def replace_bn(bn, act=None): slop = 0.01 if isinstance(act, nn.ReLU): activation = 'leaky_relu' # approximate relu elif isinstance(act, nn.LeakyReLU): activation = 'leaky_relu' slope = act.negative_slope elif isinstance(act, nn.ELU): activation = 'elu' else: activation = 'none' abn = ActivatedBatchNorm(num_features=bn.num_features, eps=bn.eps, momentum=bn.momentum, affine=bn.affine, track_running_stats=bn.track_running_stats, activation=activation, slope=slop) abn.load_state_dict(bn.state_dict()) return abn
Example #23
Source File: layers.py From USIP with GNU General Public License v3.0 | 7 votes |
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, activation=None, normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1): super(MyConv2d, self).__init__() self.activation = activation self.normalization = normalization self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias) if self.normalization == 'batch': self.norm = MyBatchNorm2d(out_channels, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay) elif self.normalization == 'instance': self.norm = nn.InstanceNorm2d(out_channels, momentum=momentum, affine=True) if self.activation == 'relu': self.act = nn.ReLU() elif self.activation == 'elu': self.act = nn.ELU(alpha=1.0) elif 'swish' == self.activation: self.act = Swish() elif 'leakyrelu' == self.activation: self.act = nn.LeakyReLU(0.01) elif 'selu' == self.activation: self.act = nn.SELU() self.weight_init()
Example #24
Source File: layers.py From USIP with GNU General Public License v3.0 | 7 votes |
def __init__(self, in_features, out_features, activation=None, normalization=None, momentum=0.1, bn_momentum_decay_step=None, bn_momentum_decay=1): super(MyLinear, self).__init__() self.activation = activation self.normalization = normalization self.linear = nn.Linear(in_features, out_features, bias=True) if self.normalization == 'batch': self.norm = MyBatchNorm1d(out_features, momentum=momentum, affine=True, momentum_decay_step=bn_momentum_decay_step, momentum_decay=bn_momentum_decay) elif self.normalization == 'instance': self.norm = nn.InstanceNorm1d(out_features, momentum=momentum, affine=True) if self.activation == 'relu': self.act = nn.ReLU() elif 'elu' == activation: self.act = nn.ELU(alpha=1.0) elif 'swish' == self.activation: self.act = Swish() elif 'leakyrelu' == self.activation: self.act = nn.LeakyReLU(0.01) elif 'selu' == self.activation: self.act = nn.SELU() self.weight_init()
Example #25
Source File: dec_pixelcnn_v2.py From vae-lagging-encoder with MIT License | 7 votes |
def __init__(self, in_channels, kernel_size): super(PixelCNNBlock, self).__init__() self.mask_type = 'B' padding = kernel_size // 2 out_channels = in_channels // 2 self.main = nn.Sequential( nn.Conv2d(in_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ELU(), MaskedConv2d(self.mask_type, out_channels, out_channels, out_channels, kernel_size, padding=padding, bias=False), nn.BatchNorm2d(out_channels), nn.ELU(), nn.Conv2d(out_channels, in_channels, 1, bias=False), nn.BatchNorm2d(in_channels), ) self.activation = nn.ELU() self.reset_parameters()
Example #26
Source File: compute_memory.py From TreeFilter-Torch with MIT License | 7 votes |
def compute_memory(module, inp, out): if isinstance(module, (nn.ReLU, nn.ReLU6, nn.ELU, nn.LeakyReLU)): return compute_ReLU_memory(module, inp, out) elif isinstance(module, nn.PReLU): return compute_PReLU_memory(module, inp, out) elif isinstance(module, nn.Conv2d): return compute_Conv2d_memory(module, inp, out) elif isinstance(module, nn.BatchNorm2d): return compute_BatchNorm2d_memory(module, inp, out) elif isinstance(module, nn.Linear): return compute_Linear_memory(module, inp, out) elif isinstance(module, ( nn.AvgPool2d, nn.MaxPool2d, nn.AdaptiveAvgPool2d, nn.AdaptiveMaxPool2d)): return compute_Pool2d_memory(module, inp, out) else: print("[Memory]: {} is not supported!".format(type(module).__name__)) return 0, 0 pass
Example #27
Source File: helper.py From torchscope with Apache License 2.0 | 6 votes |
def compute_flops(module, inp, out): if isinstance(module, nn.Conv2d): return compute_Conv2d_flops(module, inp, out) // 2 elif isinstance(module, nn.BatchNorm2d): return compute_BatchNorm2d_flops(module, inp, out) // 2 elif isinstance(module, (nn.AvgPool2d, nn.MaxPool2d)): return compute_Pool2d_flops(module, inp, out) // 2 elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU)): return compute_ReLU_flops(module, inp, out) // 2 elif isinstance(module, nn.Upsample): return compute_Upsample_flops(module, inp, out) // 2 elif isinstance(module, nn.Linear): return compute_Linear_flops(module, inp, out) // 2 else: return 0
Example #28
Source File: norms.py From JEM with Apache License 2.0 | 6 votes |
def __init__(self, num_features, num_classes): super().__init__() del num_classes self.num_features = num_features self.embed = nn.Sequential(nn.Linear(1, 256), nn.ELU(inplace=True), nn.Linear(256, 256), nn.ELU(inplace=True), nn.Linear(256, self.num_features*2), )
Example #29
Source File: adaptation_networks.py From cnaps with MIT License | 6 votes |
def __init__(self, in_size, out_size): super(DenseResidualBlock, self).__init__() self.linear1 = nn.Linear(in_size, out_size) self.linear2 = nn.Linear(out_size, out_size) self.linear3 = nn.Linear(out_size, out_size) self.elu = nn.ELU()
Example #30
Source File: helper.py From torchscope with Apache License 2.0 | 6 votes |
def compute_ReLU_flops(module, inp, out): assert isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU)) batch_size = inp.size()[0] active_elements_count = batch_size for s in inp.size()[1:]: active_elements_count *= s return active_elements_count