Python torch.nn.functional.leaky_relu_() Examples

The following are 16 code examples of torch.nn.functional.leaky_relu_(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: networks.py    From cycle-consistent-vae with MIT License 5 votes vote down vote up
def forward(self, style_embeddings, class_embeddings):
        style_embeddings = F.leaky_relu_(self.style_input(style_embeddings), negative_slope=0.2)
        class_embeddings = F.leaky_relu_(self.class_input(class_embeddings), negative_slope=0.2)

        x = torch.cat((style_embeddings, class_embeddings), dim=1)
        x = x.view(x.size(0), 128, 2, 2)
        x = self.deconv_model(x)

        return x 
Example #2
Source File: networks.py    From disentangling-factors-of-variation-using-adversarial-training with MIT License 5 votes vote down vote up
def forward(self, style_embeddings, class_embeddings):
        style_embeddings = F.leaky_relu_(self.style_input(style_embeddings), negative_slope=0.2)
        class_embeddings = F.leaky_relu_(self.class_input(class_embeddings), negative_slope=0.2)

        x = torch.cat((style_embeddings, class_embeddings), dim=1)
        x = x.view(x.size(0), 128, 2, 2)
        x = self.deconv_model(x)

        return x 
Example #3
Source File: modules.py    From source_separation with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        real, img = x.chunk(2, 1)
        return torch.cat([F.leaky_relu_(real), torch.tanh(img) * np.pi], dim=1) 
Example #4
Source File: models_encoder.py    From generative-graph-transformer with MIT License 5 votes vote down vote up
def forward(self, x):
        x = F.leaky_relu_(self.conv1(x))
        x = self.conv1_bn(self.pool1(x))
        x = self.conv2_bn(F.leaky_relu_(self.conv2(x)))
        x = x.reshape(x.shape[0], -1)
        x = self.fc1_bn(F.leaky_relu_(self.fc1(x)))
        x = self.fc2(x)
        return x 
Example #5
Source File: models_encoder.py    From generative-graph-transformer with MIT License 5 votes vote down vote up
def forward(self, x):
        x = self.fc1_bn(F.leaky_relu_(self.fc1(x)))
        x = self.fc2_bn(F.leaky_relu_(self.fc2(x)))
        x = x.reshape(x.shape[0], 32, 16, 16)
        x = self.conv1_bn(F.leaky_relu_(self.conv1(x)))
        x = nn.Tanh()(self.conv2(x))
        
        return x 
Example #6
Source File: models_encoder.py    From generative-graph-transformer with MIT License 5 votes vote down vote up
def forward(self, x):
        x = F.leaky_relu_(self.conv1(x))
        x = self.conv1_bn(self.pool1(x))
        x = self.conv2_bn(F.leaky_relu_(self.conv2(x)))
        x = self.conv3(x)
        x = x.reshape(x.shape[0], -1)
        return x 
Example #7
Source File: shift_unet.py    From Shift-Net_pytorch with MIT License 5 votes vote down vote up
def forward(self, input, flip_feat=None):
        # Encoder
        # No norm on the first layer
        e1 = self.e1_c(input)
        e2 = self.e2_norm(self.e2_c(F.leaky_relu_(e1, negative_slope=0.2)))
        e3 = self.e3_norm(self.e3_c(F.leaky_relu_(e2, negative_slope=0.2)))
        e4 = self.e4_norm(self.e4_c(F.leaky_relu_(e3, negative_slope=0.2)))
        e5 = self.e5_norm(self.e5_c(F.leaky_relu_(e4, negative_slope=0.2)))
        e6 = self.e6_norm(self.e6_c(F.leaky_relu_(e5, negative_slope=0.2)))

        e7 = self.e7_norm(self.e7_c(F.leaky_relu_(e6, negative_slope=0.2)))
        # No norm in the inner_most layer
        e8 = self.e8_c(F.leaky_relu_(e7, negative_slope=0.2))

        # Decoder
        d1 = self.d1_norm(self.d1_dc(F.relu_(e8)))
        d2 = self.d2_norm(self.d2_dc(F.relu_(self.cat_feat(d1, e7))))
        d3 = self.d3_norm(self.d3_dc(F.relu_(self.cat_feat(d2, e6))))
        d4 = self.d4_norm(self.d4_dc(F.relu_(self.cat_feat(d3, e5))))
        d5 = self.d5_norm(self.d5_dc(F.relu_(self.cat_feat(d4, e4))))
        tmp, innerFeat = self.shift(self.innerCos(F.relu_(self.cat_feat(d5, e3))), flip_feat)
        d6 = self.d6_norm(self.d6_dc(tmp))
        d7 = self.d7_norm(self.d7_dc(F.relu_(self.cat_feat(d6, e2))))
        # No norm on the last layer
        d8 = self.d8_dc(F.relu_(self.cat_feat(d7, e1)))

        d8 = torch.tanh(d8)

        return d8, innerFeat 
Example #8
Source File: unet.py    From Shift-Net_pytorch with MIT License 5 votes vote down vote up
def forward(self, input):
        # Encoder
        # No norm on the first layer
        e1 = self.e1_c(input)
        e2 = self.e2_norm(self.e2_c(F.leaky_relu_(e1, negative_slope=0.2)))
        e3 = self.e3_norm(self.e3_c(F.leaky_relu_(e2, negative_slope=0.2)))
        e4 = self.e4_norm(self.e4_c(F.leaky_relu_(e3, negative_slope=0.2)))
        e5 = self.e5_norm(self.e5_c(F.leaky_relu_(e4, negative_slope=0.2)))
        e6 = self.e6_norm(self.e6_c(F.leaky_relu_(e5, negative_slope=0.2)))
        e7 = self.e7_norm(self.e7_c(F.leaky_relu_(e6, negative_slope=0.2)))
        # No norm on the inner_most layer
        e8 = self.e8_c(F.leaky_relu_(e7, negative_slope=0.2))

        # Decoder
        d1 = self.d1_norm(self.d1_c(F.relu_(e8)))
        d2 = self.d2_norm(self.d2_c(F.relu_(torch.cat([d1, e7], dim=1))))
        d3 = self.d3_norm(self.d3_c(F.relu_(torch.cat([d2, e6], dim=1))))
        d4 = self.d4_norm(self.d4_c(F.relu_(torch.cat([d3, e5], dim=1))))
        d5 = self.d5_norm(self.d5_c(F.relu_(torch.cat([d4, e4], dim=1))))
        d6 = self.d6_norm(self.d6_c(F.relu_(torch.cat([d5, e3], dim=1))))
        d7 = self.d7_norm(self.d7_c(F.relu_(torch.cat([d6, e2], dim=1))))
        # No norm on the last layer
        d8 = self.d8_c(F.relu_(torch.cat([d7, e1], 1)))

        d8 = torch.tanh(d8)

        return d8 
Example #9
Source File: activation.py    From torch2trt with MIT License 5 votes vote down vote up
def aten_leaky_relu_(inputs, attributes, scope):
    inp, leak = inputs[:2]
    ctx = current_context()
    net = current_context().network
    if ctx.is_tensorrt and has_trt_tensor(inputs):
        layer = net.add_activation(inp, trt.ActivationType.LEAKY_RELU)
        layer.alpha = leak
        output = layer.get_output(0)
        output.name = scope
        layer.name = scope
        return [output]
    elif ctx.is_tvm and has_tvm_tensor(inputs):
        return [_op.nn.leaky_relu(inputs[0], leak)]

    return [F.leaky_relu_(inp, leak)] 
Example #10
Source File: wideresnet.py    From gluoncv-torch with MIT License 5 votes vote down vote up
def forward(self, x):
        y = F.batch_norm(
            x, self.running_mean, self.running_var, self.weight, self.bias,
            self.training or not self.track_running_stats,
            self.momentum, self.eps)
        return F.leaky_relu_(y, self.slope) 
Example #11
Source File: test_pyprof_nvtx.py    From apex with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_leaky_relu_(self):
        inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
        output = F.leaky_relu_(inp, negative_slope=0.01) 
Example #12
Source File: gnn.py    From GraphNAS with Apache License 2.0 5 votes vote down vote up
def __init__(self, input_dim, pooling_dim=512, num_fc=1, act=F.leaky_relu_):
        super(MaxPoolingAggregator, self).__init__()
        out_dim = input_dim
        self.fc = nn.ModuleList()
        self.act = act
        if num_fc > 0:
            for i in range(num_fc - 1):
                self.fc.append(nn.Linear(out_dim, pooling_dim))
                out_dim = pooling_dim
            self.fc.append(nn.Linear(out_dim, input_dim)) 
Example #13
Source File: gnn.py    From GraphNAS with Apache License 2.0 5 votes vote down vote up
def __init__(self, input_dim, pooling_dim=512, num_fc=1, act=F.leaky_relu_):
        super(MeanPoolingAggregator, self).__init__(input_dim, pooling_dim, num_fc, act) 
Example #14
Source File: gnn.py    From GraphNAS with Apache License 2.0 5 votes vote down vote up
def __init__(self, input_dim, pooling_dim=512, num_fc=1, act=F.leaky_relu_):
        super(MLPAggregator, self).__init__(input_dim, pooling_dim, num_fc, act) 
Example #15
Source File: util.py    From FlowNetPytorch with MIT License 5 votes vote down vote up
def correlate(input1, input2):
    out_corr = spatial_correlation_sample(input1,
                                          input2,
                                          kernel_size=1,
                                          patch_size=21,
                                          stride=1,
                                          padding=0,
                                          dilation_patch=2)
    # collate dimensions 1 and 2 in order to be treated as a
    # regular 4D tensor
    b, ph, pw, h, w = out_corr.size()
    out_corr = out_corr.view(b, ph * pw, h, w)/input1.size(1)
    return F.leaky_relu_(out_corr, 0.1) 
Example #16
Source File: model.py    From PWC-Net_pytorch with MIT License 4 votes vote down vote up
def forward(self, x):
        args = self.args

        if args.input_norm:
            rgb_mean = x.contiguous().view(x.size()[:2]+(-1,)).mean(dim=-1).view(x.size()[:2] + (1,1,1,))
            x = (x - rgb_mean) / args.rgb_max
        
        x1_raw = x[:,:,0,:,:].contiguous()
        x2_raw = x[:,:,1,:,:].contiguous()

        # on the bottom level are original images
        x1_pyramid = self.feature_pyramid_extractor(x1_raw) + [x1_raw]
        x2_pyramid = self.feature_pyramid_extractor(x2_raw) + [x2_raw]


        # outputs
        flows = []

        # tensors for summary
        summaries = {
            'x2_warps': [],

        }

        for l, (x1, x2) in enumerate(zip(x1_pyramid, x2_pyramid)):
            # upsample flow and scale the displacement
            if l == 0:
                shape = list(x1.size()); shape[1] = 2
                flow = torch.zeros(shape).to(args.device)
            else:
                flow = F.upsample(flow, scale_factor = 2, mode = 'bilinear') * 2
            
            x2_warp = self.warping_layer(x2, flow)
            
            # correlation
            corr = self.corr(x1, x2_warp)
            if args.corr_activation: F.leaky_relu_(corr)

            # concat and estimate flow
            # ATTENTION: `+ flow` makes flow estimator learn to estimate residual flow
            if args.residual:
                flow_coarse = self.flow_estimators[l](torch.cat([x1, corr, flow], dim = 1)) + flow
            else:
                flow_coarse = self.flow_estimators[l](torch.cat([x1, corr, flow], dim = 1))

            
            flow_fine = self.context_networks[l](torch.cat([x1, flow], dim = 1))
            flow = flow_coarse + flow_fine


            if l == args.output_level:
                flow = F.upsample(flow, scale_factor = 2 ** (args.num_levels - args.output_level - 1), mode = 'bilinear') * 2 ** (args.num_levels - args.output_level - 1)
                flows.append(flow)
                summaries['x2_warps'].append(x2_warp.data)
                break
            else:
                flows.append(flow)
                summaries['x2_warps'].append(x2_warp.data)

        return flows, summaries