Python torch.nn.functional.elu() Examples

The following are 30 code examples of torch.nn.functional.elu(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: bn.py    From openseg.pytorch with MIT License 6 votes vote down vote up
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
        """Creates an InPlace Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        eps : float
            Small constant to prevent numerical issues.
        momentum : float
            Momentum factor applied to compute running statistics as.
        affine : bool
            If `True` apply learned scale and shift transformation after normalization.
        activation : str
            Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
        slope : float
            Negative slope for the `leaky_relu` activation.
        """
        super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope) 
Example #2
Source File: gcn.py    From GraphIE with GNU General Public License v3.0 6 votes vote down vote up
def forward(self, x, adjs, mask=None):
        # x: (batch, N, d_input)
        # adjs: (batch, n_graph, N, N)
        assert len(adjs.size()) == 4
        batch, n_node, _ = x.size()
        assert adjs.size(1) == self.n_graph

        h = x.clone()
        x = x.unsqueeze(1).expand(-1, self.n_graph, -1, -1)
        h_gcn = torch.matmul(adjs, x).transpose(1, 2).contiguous().view(batch, n_node,
                                                                        -1)  # (batch, N, n_graph * d_input)

        # self.linear_gcn.weight.transpose(1,0).data.size():[384, 128]

        h_gcn = self.linear_gcn(h_gcn)
        d = adjs.sum(dim=3).sum(dim=1).unsqueeze(2)
        d = d + d.eq(0).float()
        h = h + h_gcn / d  # (batch, N, d_model)

        h = self.elu(h)
        h = self.norm(h)
        h = self.dropout(h)
        return h, None
        # _h1 = self.sparse_mm(a1, x)
        # _h2 = self.sparse_mm(a2, x) 
Example #3
Source File: bn.py    From ACAN with MIT License 6 votes vote down vote up
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
        """Creates an InPlace Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        eps : float
            Small constant to prevent numerical issues.
        momentum : float
            Momentum factor applied to compute running statistics as.
        affine : bool
            If `True` apply learned scale and shift transformation after normalization.
        activation : str
            Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
        slope : float
            Negative slope for the `leaky_relu` activation.
        """
        super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope) 
Example #4
Source File: misc.py    From seamseg with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def forward(self, x):
        inv_var = torch.rsqrt(self.running_var + self.eps)
        if self.affine:
            alpha = self.weight * inv_var
            beta = self.bias - self.running_mean * alpha
        else:
            alpha = inv_var
            beta = - self.running_mean * alpha

        x.mul_(alpha.view(self._broadcast_shape(x)))
        x.add_(beta.view(self._broadcast_shape(x)))

        if self.activation == "relu":
            return functional.relu(x, inplace=True)
        elif self.activation == "leaky_relu":
            return functional.leaky_relu(x, negative_slope=self.activation_param, inplace=True)
        elif self.activation == "elu":
            return functional.elu(x, alpha=self.activation_param, inplace=True)
        elif self.activation == "identity":
            return x
        else:
            raise RuntimeError("Unknown activation function {}".format(self.activation)) 
Example #5
Source File: sequence_labeling.py    From GraphIE with GNU General Public License v3.0 6 votes vote down vote up
def _get_rnn_output(self, input_word_orig, input_word, input_char,
                        mask=None, length=None, hx=None, show_net=False):

        input, length = self._get_word_enc(
            input_word_orig, input_word, input_char, mask=mask, length=length, show_net=show_net)

        output, hn = self._get_rnn_enc(input, length, mask, hx, show_net=show_net)

        if self.tag_space:
            # [batch, length, tag_space]
            output = self.dropout_tag(F.elu(self.lstm_to_tag_space(output)))
            if show_net:
                print("[Net] to_tag")
                show_var(["self.lstm_to_tag_space"])
                show_var(["F.elu"])
                show_var(["self.dropout_tag"])

        return output, hn, mask, length 
Example #6
Source File: gat.py    From DeepInf with MIT License 6 votes vote down vote up
def forward(self, x, vertices, adj):
        emb = self.embedding(vertices)
        if self.inst_norm:
            emb = self.norm(emb.transpose(1, 2)).transpose(1, 2)
        x = torch.cat((x, emb), dim=2)
        if self.use_vertex_feature:
            vfeature = self.vertex_feature(vertices)
            x = torch.cat((x, vfeature), dim=2)
        bs, n = adj.size()[:2]
        for i, gat_layer in enumerate(self.layer_stack):
            x = gat_layer(x, adj) # bs x n_head x n x f_out
            if i + 1 == self.n_layer:
                x = x.mean(dim=1)
            else:
                x = F.elu(x.transpose(1, 2).contiguous().view(bs, n, -1))
                x = F.dropout(x, self.dropout, training=self.training)
        return F.log_softmax(x, dim=-1) 
Example #7
Source File: residual.py    From seamseg with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def forward(self, x):
        if hasattr(self, "proj_conv"):
            residual = self.proj_conv(x)
            residual = self.proj_bn(residual)
        else:
            residual = x

        x = self.convs(x) + residual

        if self.convs.bn1.activation == "relu":
            return functional.relu(x, inplace=True)
        elif self.convs.bn1.activation == "leaky_relu":
            return functional.leaky_relu(x, negative_slope=self.convs.bn1.activation_param, inplace=True)
        elif self.convs.bn1.activation == "elu":
            return functional.elu(x, alpha=self.convs.bn1.activation_param, inplace=True)
        elif self.convs.bn1.activation == "identity":
            return x
        else:
            raise RuntimeError("Unknown activation function {}".format(self.activation)) 
Example #8
Source File: gnn.py    From GraphIE with GNU General Public License v3.0 6 votes vote down vote up
def forward(self, x, adjs, mask=None):
        # x: (batch, N, input_dim)
        # adjs: (batch, n_graph, N, N)
        if len(adjs.size()) == 3:
            adjs = adjs.unsqueeze(1)
        batch, num_sent, d_input = x.size()
        assert adjs.size(1) == self.n_graph
        h = self.linear(x)
        x = x.unsqueeze(1).expand(-1, self.n_graph, -1, -1)
        h_gcn = torch.matmul(adjs, x).transpose(1, 2).contiguous().view(batch, num_sent, -1)
        h_gcn = self.linear_gcn(h_gcn)
        d = adjs.sum(dim=3).sum(dim=1).unsqueeze(2)
        d = d + d.eq(0).float()
        h = h + h_gcn / d # batch_size * docu_len * dim
        if self.globalnode:
            h = h + self.g_node(x, mask).unsqueeze(1).expand_as(h)
        h = F.elu(h)
        return h 
Example #9
Source File: layers.py    From NLP_Toolkit with Apache License 2.0 6 votes vote down vote up
def forward(self, input, adj):
        h = torch.mm(input, self.W)
        N = h.size()[0]

        a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1).view(N, -1, 2 * self.out_features)
        e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))

        zero_vec = -9e15*torch.ones_like(e)
        attention = torch.where(adj > 0, e, zero_vec)
        attention = F.softmax(attention, dim=1)
        attention = F.dropout(attention, self.dropout, training=self.training)
        h_prime = torch.matmul(attention, h)

        if self.concat:
            return F.elu(h_prime)
        else:
            return h_prime 
Example #10
Source File: layers.py    From graph-cnn.pytorch with MIT License 6 votes vote down vote up
def forward(self, input, adj):
        h = torch.mm(input, self.W)
        N = h.size()[0]

        f_1 = torch.matmul(h, self.a1)
        f_2 = torch.matmul(h, self.a2)
        e = self.leakyrelu(f_1 + f_2.transpose(0,1))

        zero_vec = -9e15*torch.ones_like(e)
        attention = torch.where(adj > 0, e, zero_vec)
        attention = F.softmax(attention, dim=1)
        attention = F.dropout(attention, self.dropout, training=self.training)
        h_prime = torch.matmul(attention, h)

        if self.concat:
            return F.elu(h_prime)
        else:
            return h_prime 
Example #11
Source File: generative.py    From torchsupport with MIT License 6 votes vote down vote up
def __init__(self, in_size, out_size,
               size=None, upsample=2,
               activation=func.elu):
    super(UpsampleBlock, self).__init__()
    self.is_first = False
    self.size = size
    if size is not None:
      self.is_first = True
      total_size = torch.Size(size).numel()
      self.input = nn.Linear(in_size, out_size * total_size)
    self.pixnorm = tsn.PixelNorm()
    self.convs = nn.ModuleList([
      nn.Conv2d(in_size, in_size, 3),
      nn.Conv2d(in_size, out_size, 3)
    ])
    self.activation = activation
    self.upsample = upsample 
Example #12
Source File: test_nn.py    From numpy-ml with GNU General Public License v3.0 6 votes vote down vote up
def test_elu_activation(N=15):
    from numpy_ml.neural_nets.activations import ELU

    np.random.seed(12345)

    N = np.inf if N is None else N

    i = 0
    while i < N:
        n_dims = np.random.randint(1, 10)
        z = random_tensor((1, n_dims))

        alpha = np.random.uniform(0, 10)

        mine = ELU(alpha)
        gold = lambda z, a: F.elu(torch.from_numpy(z), alpha).numpy()

        assert_almost_equal(mine.fn(z), gold(z, alpha))
        print("PASSED")
        i += 1 
Example #13
Source File: losses.py    From centerpose with MIT License 6 votes vote down vote up
def lovasz_hinge_flat(logits, labels):
    """
    Binary Lovasz hinge loss
      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
      labels: [P] Tensor, binary ground truth labels (0 or 1)
      ignore: label to ignore
    """
    if len(labels) == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.
    signs = 2. * labels.float() - 1.
    errors = (1. - logits * Variable(signs))
    errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
    perm = perm.data
    gt_sorted = labels[perm]
    grad = lovasz_grad(gt_sorted)
    loss = torch.dot(F.elu(errors_sorted) + 1, Variable(grad))
    return loss 
Example #14
Source File: bn.py    From DeepLab-v3-plus-cityscapes with MIT License 6 votes vote down vote up
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
        """Creates an InPlace Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        eps : float
            Small constant to prevent numerical issues.
        momentum : float
            Momentum factor applied to compute running statistics as.
        affine : bool
            If `True` apply learned scale and shift transformation after normalization.
        activation : str
            Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
        slope : float
            Negative slope for the `leaky_relu` activation.
        """
        super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope) 
Example #15
Source File: models.py    From cerl with Apache License 2.0 6 votes vote down vote up
def forward(self, input):
        """Method to forward propagate through the actor's graph

            Parameters:
                  input (tensor): states

            Returns:
                  action (tensor): actions


        """
        #Hidden Layer 1
        out = F.elu(self.f1(input))
        out = self.ln1(out)

        #Hidden Layer 2
        out = F.elu(self.f2(out))
        out = self.ln2(out)

        #Out
        return torch.sigmoid(self.w_out(out)) 
Example #16
Source File: lovasz.py    From argus-tgs-salt with MIT License 6 votes vote down vote up
def lovasz_hinge_flat(logits, labels):
    """
    Binary Lovasz hinge loss
      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
      labels: [P] Tensor, binary ground truth labels (0 or 1)
      ignore: label to ignore
    """
    if len(labels) == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.
    signs = 2. * labels.float() - 1.
    errors = (1. - logits * Variable(signs))
    errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
    perm = perm.data
    gt_sorted = labels[perm]
    grad = lovasz_grad(gt_sorted)
    loss = torch.dot(F.elu(errors_sorted) + 1, Variable(grad))
    return loss 
Example #17
Source File: lovasz_losses.py    From open-solution-salt-identification with MIT License 6 votes vote down vote up
def lovasz_hinge_flat(logits, labels):
    """
    Binary Lovasz hinge loss
      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
      labels: [P] Tensor, binary ground truth labels (0 or 1)
      ignore: label to ignore
    """
    if len(labels) == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.
    signs = 2. * labels.float() - 1.
    errors = (1. - logits * signs)

    errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
    perm = perm.data
    gt_sorted = labels[perm]
    grad = lovasz_grad(gt_sorted)
    loss = torch.dot(F.elu(errors_sorted), grad)
    return loss 
Example #18
Source File: mnist_voxel_grid.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        cluster = voxel_grid(data.pos, data.batch, size=5, start=0, end=28)
        data.edge_attr = None
        data = max_pool(cluster, data, transform=transform)

        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        cluster = voxel_grid(data.pos, data.batch, size=7, start=0, end=28)
        data.edge_attr = None
        data = max_pool(cluster, data, transform=transform)

        data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr))
        cluster = voxel_grid(data.pos, data.batch, size=14, start=0, end=27.99)
        x, _ = max_pool_x(cluster, data.x, data.batch, size=4)

        x = x.view(-1, self.fc1.weight.size(1))
        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1) 
Example #19
Source File: conditional_mnist_score_classifier.py    From torchsupport with MIT License 6 votes vote down vote up
def forward(self, inputs, noise):
    out = self.input(inputs)
    cond = torch.zeros(
      inputs.size(0), 10,
      dtype=inputs.dtype,
      device=inputs.device
    )
    offset = (torch.log(noise) / torch.log(torch.tensor(0.60))).long()
    cond[torch.arange(inputs.size(0)), offset.view(-1)] = 1
    connections = []
    for norm, block in zip(self.down_norm, self.down):
      out = func.elu(block(norm(out, cond)))
      connections.append(out)
    features = func.adaptive_avg_pool2d(out, 1)
    logits = self.predict(features.view(features.size(0), -1))
    for norm, block, shortcut in zip(self.up_norm, self.up, reversed(connections)):
      out = func.elu(block(norm(torch.cat((out, shortcut), dim=1), cond)))
    del connections
    return self.output(out), logits 
Example #20
Source File: layers.py    From KG-A2C with MIT License 6 votes vote down vote up
def forward(self, input, adj):
        h = torch.mm(input, self.W)
        N = h.size()[0]
        a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1).view(N, -1, 2 * self.out_features)

        e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))

        zero_vec = torch.zeros_like(e)
        zero_vec = zero_vec.fill_(9e-15)
        attention = torch.where(adj > 0, e, zero_vec)

        attention = F.softmax(attention, dim=1)

        attention = F.dropout(attention, self.dropout, training=self.training)
        h_prime = torch.matmul(attention, h)

        if self.concat:
            return F.elu(h_prime)
        else:
            return h_prime 
Example #21
Source File: gnn.py    From GraphIE with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, x, adj, mask=None):
        # x: (N, input_dim)
        graph_mask = adj.ne(0) # .data
        h_gcn, attn = self.attention(x, x, x, graph_mask)
        h = self.linear(x) + h_gcn # batch_size * docu_len * dim
        if self.globalnode:
            h = h + self.g_node(x, mask).unsqueeze(1).expand_as(h)
        h = F.elu(h)
        return h 
Example #22
Source File: gnn.py    From GraphIE with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, x, pos, adj):
        # x: (N, input_dim)
        h_gcn, attn = self.attention(x, pos, adj)
        h = self.linear1(x) + self.linear2(h_gcn) # batch_size * docu_len * dim
        h = F.elu(h)
        return h 
Example #23
Source File: bn.py    From DeepLab-v3-plus-cityscapes with MIT License 5 votes vote down vote up
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
        """Creates an Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        eps : float
            Small constant to prevent numerical issues.
        momentum : float
            Momentum factor applied to compute running statistics as.
        affine : bool
            If `True` apply learned scale and shift transformation after normalization.
        activation : str
            Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
        slope : float
            Negative slope for the `leaky_relu` activation.
        """
        super(ABN, self).__init__()
        self.num_features = num_features
        self.affine = affine
        self.eps = eps
        self.momentum = momentum
        self.activation = activation
        self.slope = slope
        if self.affine:
            self.weight = nn.Parameter(torch.ones(num_features))
            self.bias = nn.Parameter(torch.zeros(num_features))
        else:
            self.register_parameter('weight', None)
            self.register_parameter('bias', None)
        self.register_buffer('running_mean', torch.zeros(num_features))
        self.register_buffer('running_var', torch.ones(num_features))
        self.reset_parameters() 
Example #24
Source File: flowers_consistent_gan.py    From torchsupport with MIT License 5 votes vote down vote up
def forward(self, sample, prior, restricted_inputs, available, requested):
    prior = func.interpolate(prior, scale_factor=2, mode='bilinear')
    inputs = torch.cat((prior, restricted_inputs, available, requested), dim=1)
    out = self.preprocess(inputs)
    skip = []
    for idx, (block, bn) in enumerate(zip(self.encoder, self.encoder_norm)):
      out = func.elu(block(bn(out))) + out
      skip.append(out)
      if (idx + 1) % 2 == 0:
        out = func.avg_pool2d(out, 2)

    rec = self.bg
    for ridx, (noise, block, bn) in enumerate(zip(self.noise, self.decoder, self.decoder_norm)):
      idx = 6 - ridx
      if idx % 2 == 0:
        rec = func.interpolate(rec, scale_factor=2, mode='bilinear')
      normed = bn(rec + noise * torch.randn_like(rec), sample)
      combined = torch.cat((normed, skip[idx - 1]), dim=1)
      rec = func.elu(block(combined))

    for idx, (noise, block, bn) in enumerate(zip(self.post_noise, self.post, self.post_norm)):
      normed = bn(rec + noise * torch.randn_like(rec), sample)
      rec = func.elu(block(normed)) + rec
    result = self.color(rec).sigmoid()

    return result 
Example #25
Source File: bn.py    From openseg.pytorch with MIT License 5 votes vote down vote up
def __init__(self, num_features, devices=None, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu",
                 slope=0.01):
        """Creates a synchronized, InPlace Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        devices : list of int or None
            IDs of the GPUs that will run the replicas of this module.
        eps : float
            Small constant to prevent numerical issues.
        momentum : float
            Momentum factor applied to compute running statistics as.
        affine : bool
            If `True` apply learned scale and shift transformation after normalization.
        activation : str
            Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
        slope : float
            Negative slope for the `leaky_relu` activation.
        """
        super(InPlaceABNSync, self).__init__(num_features, eps, momentum, affine, activation, slope)
        self.devices = devices if devices else list(range(torch.cuda.device_count()))

        # Initialize queues
        self.worker_ids = self.devices[1:]
        self.master_queue = Queue(len(self.worker_ids))
        self.worker_queues = [Queue(1) for _ in self.worker_ids] 
Example #26
Source File: selu.py    From verb-attributes with MIT License 5 votes vote down vote up
def selu(x, inplace=False):
    alpha = 1.6732632423543772848170429916717
    scale = 1.0507009873554804934193349852946
    temp1 = scale * F.relu(x)
    temp2 = scale * alpha * (F.elu(-1*F.relu(-1*x)))
    return temp1 + temp2 
Example #27
Source File: bn.py    From openseg.pytorch with MIT License 5 votes vote down vote up
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01):
        """Creates an Activated Batch Normalization module

        Parameters
        ----------
        num_features : int
            Number of feature channels in the input and output.
        eps : float
            Small constant to prevent numerical issues.
        momentum : float
            Momentum factor applied to compute running statistics as.
        affine : bool
            If `True` apply learned scale and shift transformation after normalization.
        activation : str
            Name of the activation functions, one of: `leaky_relu`, `elu` or `none`.
        slope : float
            Negative slope for the `leaky_relu` activation.
        """
        super(ABN, self).__init__()
        self.num_features = num_features
        self.affine = affine
        self.eps = eps
        self.momentum = momentum
        self.activation = activation
        self.slope = slope
        if self.affine:
            self.weight = nn.Parameter(torch.ones(num_features))
            self.bias = nn.Parameter(torch.zeros(num_features))
        else:
            self.register_parameter('weight', None)
            self.register_parameter('bias', None)
        self.register_buffer('running_mean', torch.zeros(num_features))
        self.register_buffer('running_var', torch.ones(num_features))
        self.reset_parameters() 
Example #28
Source File: bn.py    From openseg.pytorch with MIT License 5 votes vote down vote up
def forward(self, x):
        x = functional.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias,
                                  self.training, self.momentum, self.eps)

        if self.activation == ACT_RELU:
            return functional.relu(x, inplace=True)
        elif self.activation == ACT_LEAKY_RELU:
            return functional.leaky_relu(x, negative_slope=self.slope, inplace=True)
        elif self.activation == ACT_ELU:
            return functional.elu(x, inplace=True)
        else:
            return x 
Example #29
Source File: bn.py    From DeepLab-v3-plus-cityscapes with MIT License 5 votes vote down vote up
def forward(self, x):
        x = functional.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias,
                                  self.training, self.momentum, self.eps)

        if self.activation == ACT_RELU:
            return functional.relu(x, inplace=True)
        elif self.activation == ACT_LEAKY_RELU:
            return functional.leaky_relu(x, negative_slope=self.slope, inplace=True)
        elif self.activation == ACT_ELU:
            return functional.elu(x, inplace=True)
        else:
            return x 
Example #30
Source File: pytorch_util.py    From leap with MIT License 5 votes vote down vote up
def selu(
        x,
        alpha=1.6732632423543772848170429916717,
        scale=1.0507009873554804934193349852946,
):
    """
    Based on https://github.com/dannysdeng/selu/blob/master/selu.py
    """
    return scale * (
        F.relu(x) + alpha * (F.elu(-1 * F.relu(-1 * x)))
    )