Python torch.nn.functional.logsigmoid() Examples

The following are 30 code examples of torch.nn.functional.logsigmoid(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: skip_gram.py    From kor2vec with Apache License 2.0 6 votes vote down vote up
def forward(self, u_pos, v_pos, v_neg):
        batch_size = u_pos.size(0)
        positive_size = v_pos.size(1)
        negative_size = v_neg.size(1)

        embed_u = self.embedding(u_pos)
        embed_v = self.embedding(v_pos)

        score = torch.bmm(embed_v, embed_u.unsqueeze(2)).squeeze(-1)
        score = torch.sum(score, dim=1) / positive_size
        log_target = fnn.logsigmoid(score).squeeze()

        neg_embed_v = self.embedding(v_neg)

        neg_score = torch.bmm(neg_embed_v, embed_u.unsqueeze(2)).squeeze(-1)
        neg_score = torch.sum(neg_score, dim=1) / negative_size
        sum_log_sampled = fnn.logsigmoid(-1 * neg_score).squeeze()

        loss = log_target + sum_log_sampled

        return -1 * loss.sum() / batch_size 
Example #2
Source File: train.py    From bpr with MIT License 6 votes vote down vote up
def forward(self, u, i, j):
        """Return loss value.
        
        Args:
            u(torch.LongTensor): tensor stored user indexes. [batch_size,]
            i(torch.LongTensor): tensor stored item indexes which is prefered by user. [batch_size,]
            j(torch.LongTensor): tensor stored item indexes which is not prefered by user. [batch_size,]
        
        Returns:
            torch.FloatTensor
        """
        u = self.W[u, :]
        i = self.H[i, :]
        j = self.H[j, :]
        x_ui = torch.mul(u, i).sum(dim=1)
        x_uj = torch.mul(u, j).sum(dim=1)
        x_uij = x_ui - x_uj
        log_prob = F.logsigmoid(x_uij).sum()
        regularization = self.weight_decay * (u.norm(dim=1).pow(2).sum() + i.norm(dim=1).pow(2).sum() + j.norm(dim=1).pow(2).sum())
        return -log_prob + regularization 
Example #3
Source File: epi_functional.py    From machina with MIT License 6 votes vote down vote up
def compute_pseudo_rews(data, rew_giver, state_only=False):
    if isinstance(data, Traj):
        epis = data.current_epis
    else:
        epis = data

    for epi in epis:
        obs = torch.tensor(epi['obs'], dtype=torch.float, device=get_device())
        if state_only:
            logits, _ = rew_giver(obs)
        else:
            acs = torch.tensor(
                epi['acs'], dtype=torch.float, device=get_device())
            logits, _ = rew_giver(obs, acs)
        with torch.no_grad():
            rews = -F.logsigmoid(-logits).cpu().numpy()
        epi['real_rews'] = copy.deepcopy(epi['rews'])
        epi['rews'] = rews

    return data 
Example #4
Source File: dynamic_halters.py    From attn2d with MIT License 6 votes vote down vote up
def step(self, x, n, total_computes=None, hard_decision=False, **kwargs):
        """
        n is the index of the previous block
        returns the binary decision, the halting signal and the logits
        """
        if self.detach_before_classifier:
            x = x.detach()

        # If adding an embedding of the total computes:
        if self.shift_block_input:
            computes_embed = F.embedding(total_computes, self.input_shifters)
            x = x + computes_embed
        x = self.halting_predictors[n if self.separate_halting_predictors else 0](x)
        if self.use_skewed_sigmoid:
            halt = F.logsigmoid(self.skewness * x)  # the log-p of halting
            halt_logits = torch.cat((halt, halt - self.skewnees * x), dim=-1)  # log-p of halting v. computing
        else:
            halt = F.logsigmoid(x)  # the log-p of halting
            halt_logits = torch.cat((halt, halt-x), dim=-1)  # log-p of halting v. computing
        if hard_decision:
            halt = torch.exp(halt.squeeze(-1))
            return halt.ge(self.thresholds[n])
        return halt_logits  # T, B, 2 
Example #5
Source File: model.py    From pytorch-nlp with MIT License 6 votes vote down vote up
def forward(self, pos_u, pos_v, neg_u, neg_v):
        losses = []
        emb_u = []
        for i in range(len(pos_u)):
            emb_ui = self.u_embeddings(Variable(torch.LongTensor(pos_u[i])))
            emb_u.append(np.sum(emb_ui.data.numpy(), axis=0).tolist())
        emb_u = Variable(torch.FloatTensor(emb_u))
        emb_v = self.v_embeddings(Variable(torch.LongTensor(pos_v)))
        score = torch.mul(emb_u, emb_v)
        score = torch.sum(score, dim=1)
        score = F.logsigmoid(score)
        losses.append(sum(score))

        neg_emb_u = []
        for i in range(len(neg_u)):
            neg_emb_ui = self.u_embeddings(Variable(torch.LongTensor(neg_u[i])))
            neg_emb_u.append(np.sum(neg_emb_ui.data.numpy(), axis=0).tolist())
        neg_emb_u = Variable(torch.FloatTensor(neg_emb_u))
        neg_emb_v = self.v_embeddings(Variable(torch.LongTensor(neg_v)))
        neg_score = torch.mul(neg_emb_u, neg_emb_v)
        neg_score = torch.sum(neg_score, dim=1)
        neg_score = F.logsigmoid(-1 * neg_score)
        losses.append(sum(neg_score))

        return -1 * sum(losses) 
Example #6
Source File: utils.py    From madminer with MIT License 6 votes vote down vote up
def get_activation_function(activation):
    if activation == "relu":
        return torch.relu
    elif activation == "tanh":
        return torch.tanh
    elif activation == "sigmoid":
        return torch.relu
    elif activation == "lrelu":
        return F.leaky_relu
    elif activation == "rrelu":
        return torch.rrelu
    elif activation == "prelu":
        return torch.prelu
    elif activation == "elu":
        return F.elu
    elif activation == "selu":
        return torch.selu
    elif activation == "log_sigmoid":
        return F.logsigmoid
    elif activation == "softplus":
        return F.softplus
    else:
        raise ValueError("Activation function %s unknown", activation) 
Example #7
Source File: trainer.py    From pykg2vec with MIT License 6 votes vote down vote up
def train_step_pairwise(self, pos_h, pos_r, pos_t, neg_h, neg_r, neg_t):
        pos_preds = self.model(pos_h, pos_r, pos_t)
        neg_preds = self.model(neg_h, neg_r, neg_t)

        if self.config.sampling == 'adversarial_negative_sampling':
            # RotatE: Adversarial Negative Sampling and alpha is the temperature.
            pos_preds = -pos_preds
            neg_preds = -neg_preds
            pos_preds = F.logsigmoid(pos_preds)
            neg_preds = neg_preds.view((-1, self.config.neg_rate))
            softmax = nn.Softmax(dim=1)(neg_preds*self.config.alpha).detach()
            neg_preds = torch.sum(softmax * (F.logsigmoid(-neg_preds)), dim=-1)
            loss = -neg_preds.mean() - pos_preds.mean()
        else:
            # others that use margin-based & pairwise loss function. (uniform or bern)
            loss = pos_preds + self.config.margin - neg_preds
            loss = torch.max(loss, torch.zeros_like(loss)).sum()
            
        if hasattr(self.model, 'get_reg'):
            # now only NTN uses regularizer,
            # other pairwise based KGE methods use normalization to regularize parameters.
            loss += self.model.get_reg()

        return loss 
Example #8
Source File: model.py    From dgl with Apache License 2.0 6 votes vote down vote up
def forward(self, pos_u, pos_v, neg_v):
        ''' Do forward and backward. It is designed for future use. '''
        emb_u = self.u_embeddings(pos_u)
        emb_v = self.v_embeddings(pos_v)
        emb_neg_v = self.v_embeddings(neg_v)

        score = torch.sum(torch.mul(emb_u, emb_v), dim=1)
        score = torch.clamp(score, max=6, min=-6)
        score = -F.logsigmoid(score)

        neg_score = torch.bmm(emb_neg_v, emb_u.unsqueeze(2)).squeeze()
        neg_score = torch.clamp(neg_score, max=6, min=-6)
        neg_score = -torch.sum(F.logsigmoid(-neg_score), dim=1)

        #return torch.mean(score + neg_score)
        return torch.sum(score), torch.sum(neg_score) 
Example #9
Source File: model.py    From dgl with Apache License 2.0 6 votes vote down vote up
def forward(self, pos_u, pos_v, neg_v):
        ''' Do forward and backward. It is designed for future use. '''
        emb_u = self.u_embeddings(pos_u)
        emb_v = self.v_embeddings(pos_v)
        emb_neg_v = self.v_embeddings(neg_v)

        score = torch.sum(torch.mul(emb_u, emb_v), dim=1)
        score = torch.clamp(score, max=6, min=-6)
        score = -F.logsigmoid(score)

        neg_score = torch.bmm(emb_neg_v, emb_u.unsqueeze(2)).squeeze()
        neg_score = torch.clamp(neg_score, max=6, min=-6)
        neg_score = -torch.sum(F.logsigmoid(-neg_score), dim=1)

        #return torch.mean(score + neg_score)
        return torch.sum(score), torch.sum(neg_score) 
Example #10
Source File: model.py    From word2vec_pytorch with MIT License 6 votes vote down vote up
def forward(self, pos_u, pos_v, neg_v):
        """Forward process.

        As pytorch designed, all variables must be batch format, so all input of this method is a list of word id.

        Args:
            pos_u: list of center word ids for positive word pairs.
            pos_v: list of neibor word ids for positive word pairs.
            neg_u: list of center word ids for negative word pairs.
            neg_v: list of neibor word ids for negative word pairs.

        Returns:
            Loss of this process, a pytorch variable.
        """
        emb_u = self.u_embeddings(pos_u)
        emb_v = self.v_embeddings(pos_v)
        score = torch.mul(emb_u, emb_v).squeeze()
        score = torch.sum(score, dim=1)
        score = F.logsigmoid(score)
        neg_emb_v = self.v_embeddings(neg_v)
        neg_score = torch.bmm(neg_emb_v, emb_u.unsqueeze(2)).squeeze()
        neg_score = F.logsigmoid(-1 * neg_score)
        return -1 * (torch.sum(score)+torch.sum(neg_score)) 
Example #11
Source File: conf_nll_loss.py    From DenseMatchingBenchmark with MIT License 6 votes vote down vote up
def loss_per_level(self, estConf, gtDisp):
        N, C, H, W = estConf.shape
        scaled_gtDisp = gtDisp
        scale = 1.0
        if gtDisp.shape[-2] != H or gtDisp.shape[-1] != W:
            # compute scale per level and scale gtDisp
            scale = gtDisp.shape[-1] / (W * 1.0)
            scaled_gtDisp = gtDisp / scale
            scaled_gtDisp = self.scale_func(scaled_gtDisp, (H, W))

        # mask for valid disparity
        # gt zero and lt max disparity
        mask = (scaled_gtDisp > self.start_disp) & (scaled_gtDisp < (self.max_disp / scale))
        mask = mask.detach_().type_as(gtDisp)

        # NLL loss
        valid_pixel_number = mask.float().sum()
        if valid_pixel_number < 1.0:
            valid_pixel_number = 1.0
        loss = (-1.0 * F.logsigmoid(estConf) * mask).sum() / valid_pixel_number

        return loss 
Example #12
Source File: losses.py    From segmentation-networks-benchmark with MIT License 5 votes vote down vote up
def forward(self, outputs: Tensor, targets: Tensor):

        outputs = F.logsigmoid(outputs)
        logpt = -F.binary_cross_entropy_with_logits(outputs, targets.float(), reduce=False)
        pt = torch.exp(logpt)

        # compute the loss
        loss = -((1 - pt).pow(self.gamma)) * logpt

        # averaging (or not) loss
        if self.size_average:
            return loss.mean()
        else:
            return loss.sum() 
Example #13
Source File: loss.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def forward(self, pred, target):
        zt = BF.logits_distribution(pred, target, self.classes)
        return BF.logits_nll_loss(- F.logsigmoid(zt),
                                  target, self.weight, self.reduction) 
Example #14
Source File: model.py    From pytorch_word2vec with MIT License 5 votes vote down vote up
def forwards(self, pos_u, pos_v, neg_u, neg_v):
        losses = []
        emb_v = []
        for i in range(len(pos_v)):
            emb_v_v = self.u_embeddings(Variable(torch.LongTensor(pos_v[i])))
            emb_v_v_numpy = emb_v_v.data.numpy()
            emb_v_v_numpy = np.sum(emb_v_v_numpy, axis=0)
            emb_v_v_list = emb_v_v_numpy.tolist()
            emb_v.append(emb_v_v_list)
        emb_v = Variable(torch.FloatTensor(emb_v))
        emb_u = self.v_embeddings(Variable(torch.LongTensor(pos_u)))
        score = torch.mul(emb_u, emb_v)
        score = torch.sum(score, dim=1)
        score = F.logsigmoid(score)
        losses.append(sum(score))

        neg_emb_v = []
        for i in range(len(neg_v)):
            neg_emb_v_v = self.u_embeddings(Variable(torch.LongTensor(neg_v[i])))
            neg_emb_v_v_numpy = neg_emb_v_v.data.numpy()
            neg_emb_v_v_numpy = np.sum(neg_emb_v_v_numpy, axis=0)
            neg_emb_v_v_list = neg_emb_v_v_numpy.tolist()
            neg_emb_v.append(neg_emb_v_v_list)
        neg_emb_v = Variable(torch.FloatTensor(neg_emb_v))

        neg_emb_u = self.v_embeddings(Variable(torch.LongTensor(neg_u)))
        neg_score = torch.mul(neg_emb_u, neg_emb_v)
        neg_score = torch.sum(neg_score, dim=1)
        neg_score = F.logsigmoid(-1 * neg_score)
        losses.append(sum(neg_score))
        return -1 * sum(losses) 
Example #15
Source File: log_sigmoid.py    From onnx2keras with MIT License 5 votes vote down vote up
def forward(self, x):
        from torch.nn import functional as F
        return F.logsigmoid(x) 
Example #16
Source File: test_pyprof_nvtx.py    From apex with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_logsigmoid(self):
        inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
        output = F.logsigmoid(inp) 
Example #17
Source File: dist.py    From beta-tcvae with MIT License 5 votes vote down vote up
def sample(self, size=None, params=None):
        presigm_ps = self._check_inputs(size, params)
        logp = F.logsigmoid(presigm_ps)
        logq = F.logsigmoid(-presigm_ps)
        l = self._sample_logistic(logp.size()).type_as(presigm_ps)
        z = logp - logq + l
        b = STHeaviside.apply(z)
        return b if self.stgradient else b.detach() 
Example #18
Source File: CBOW.py    From pytorch_word2vec with MIT License 5 votes vote down vote up
def forward(self, pos_u, pos_v, neg_u, neg_v):
        losses = []
        emb_v = []
        for i in range(len(pos_v)):
            emb_v_v = self.u_embeddings(Variable(torch.LongTensor(pos_v[i])))
            emb_v_v_numpy = emb_v_v.data.numpy()
            emb_v_v_numpy = np.sum(emb_v_v_numpy, axis=0)
            emb_v_v_list = emb_v_v_numpy.tolist()
            emb_v.append(emb_v_v_list)
        emb_v = Variable(torch.FloatTensor(emb_v))
        emb_u = self.v_embeddings(Variable(torch.LongTensor(pos_u)))
        score = torch.mul(emb_u, emb_v)
        score = torch.sum(score, dim=1)
        score = F.logsigmoid(score)
        losses.append(sum(score))

        neg_emb_v = []
        for i in range(len(neg_v)):
            neg_emb_v_v = self.u_embeddings(Variable(torch.LongTensor(neg_v[i])))
            neg_emb_v_v_numpy = neg_emb_v_v.data.numpy()
            neg_emb_v_v_numpy = np.sum(neg_emb_v_v_numpy, axis=0)
            neg_emb_v_v_list = neg_emb_v_v_numpy.tolist()
            neg_emb_v.append(neg_emb_v_v_list)
        neg_emb_v = Variable(torch.FloatTensor(neg_emb_v))

        neg_emb_u = self.v_embeddings(Variable(torch.LongTensor(neg_u)))
        neg_score = torch.mul(neg_emb_u, neg_emb_v)
        neg_score = torch.sum(neg_score, dim=1)
        neg_score = F.logsigmoid(-1 * neg_score)
        losses.append(sum(neg_score))
        return -1 * sum(losses) 
Example #19
Source File: hmm_controls3.py    From attn2d with MIT License 5 votes vote down vote up
def predict_read_write(self, x):
        """ Returns log(rho), log(1-rho) in B, Tt, Ts, 2 """
        if self.detach:
            x = self.gate(x.detach())
        else:
            x = self.gate(x)
        s = F.logsigmoid(x)
        return torch.cat((s, s-x), dim=-1).float() 
Example #20
Source File: hmm_controls.py    From attn2d with MIT License 5 votes vote down vote up
def predict_read_write(self, x):
        """ Returns log(rho), log(1-rho) in B, Tt, Ts, 2 """
        if self.detach:
            x = self.gate(x.detach())
        else:
            x = self.gate(x)
        s = F.logsigmoid(x)
        return torch.cat((s, s-x), dim=-1).float() 
Example #21
Source File: model_blocks.py    From AtlasNet with MIT License 5 votes vote down vote up
def get_activation(argument):
    getter = {
        "relu": F.relu,
        "sigmoid": F.sigmoid,
        "softplus": F.softplus,
        "logsigmoid": F.logsigmoid,
        "softsign": F.softsign,
        "tanh": F.tanh,
    }
    return getter.get(argument, "Invalid activation") 
Example #22
Source File: loss_functional.py    From machina with MIT License 5 votes vote down vote up
def cross_ent(discrim, batch, expert_or_agent, ent_beta):
    obs = batch['obs']
    acs = batch['acs']
    len = obs.shape[0]
    logits, _ = discrim(obs, acs)
    discrim_loss = F.binary_cross_entropy_with_logits(
        logits, torch.ones(len, device=get_device())*expert_or_agent)
    ent = (1 - torch.sigmoid(logits))*logits - F.logsigmoid(logits)
    discrim_loss -= ent_beta * torch.mean(ent)
    return discrim_loss 
Example #23
Source File: model.py    From pytorch-nlp with MIT License 5 votes vote down vote up
def forwards(self, pos_u, pos_v, neg_u, neg_v):
        losses = []
        emb_v = []
        for i in range(len(pos_v)):
            emb_v_v = self.u_embeddings(Variable(torch.LongTensor(pos_v[i])))
            emb_v_v_numpy = emb_v_v.data.numpy()
            emb_v_v_numpy = np.sum(emb_v_v_numpy, axis=0)
            emb_v_v_list = emb_v_v_numpy.tolist()
            emb_v.append(emb_v_v_list)
        emb_v = Variable(torch.FloatTensor(emb_v))
        emb_u = self.v_embeddings(Variable(torch.LongTensor(pos_u)))
        score = torch.mul(emb_u, emb_v)
        score = torch.sum(score, dim=1)
        score = F.logsigmoid(score)
        losses.append(sum(score))

        neg_emb_v = []
        for i in range(len(neg_v)):
            neg_emb_v_v = self.u_embeddings(Variable(torch.LongTensor(neg_v[i])))
            neg_emb_v_v_numpy = neg_emb_v_v.data.numpy()
            neg_emb_v_v_numpy = np.sum(neg_emb_v_v_numpy, axis=0)
            neg_emb_v_v_list = neg_emb_v_v_numpy.tolist()
            neg_emb_v.append(neg_emb_v_v_list)
        neg_emb_v = Variable(torch.FloatTensor(neg_emb_v))

        neg_emb_u = self.v_embeddings(Variable(torch.LongTensor(neg_u)))
        neg_score = torch.mul(neg_emb_u, neg_emb_v)
        neg_score = torch.sum(neg_score, dim=1)
        neg_score = F.logsigmoid(-1 * neg_score)
        losses.append(sum(neg_score))
        return -1 * sum(losses) 
Example #24
Source File: focal_loss.py    From fvcore with Apache License 2.0 5 votes vote down vote up
def sigmoid_focal_loss_star(
    inputs: torch.Tensor,
    targets: torch.Tensor,
    alpha: float = -1,
    gamma: float = 1,
    reduction: str = "none",
) -> torch.Tensor:
    """
    FL* described in RetinaNet paper Appendix: https://arxiv.org/abs/1708.02002.
    Args:
        inputs: A float tensor of arbitrary shape.
                The predictions for each example.
        targets: A float tensor with the same shape as inputs. Stores the binary
                 classification label for each element in inputs
                (0 for the negative class and 1 for the positive class).
        alpha: (optional) Weighting factor in range (0,1) to balance
                positive vs negative examples. Default = -1 (no weighting).
        gamma: Gamma parameter described in FL*. Default = 1 (no weighting).
        reduction: 'none' | 'mean' | 'sum'
                 'none': No reduction will be applied to the output.
                 'mean': The output will be averaged.
                 'sum': The output will be summed.
    Returns:
        Loss tensor with the reduction option applied.
    """
    shifted_inputs = gamma * (inputs * (2 * targets - 1))
    loss = -(F.logsigmoid(shifted_inputs)) / gamma

    if alpha >= 0:
        alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
        loss *= alpha_t

    if reduction == "mean":
        loss = loss.mean()
    elif reduction == "sum":
        loss = loss.sum()

    return loss 
Example #25
Source File: model.py    From pytorch-nlp with MIT License 5 votes vote down vote up
def forward(self, pos_u, pos_v, neg_u, neg_v):
        losses = []
        emb_u = self.u_embeddings(Variable(torch.LongTensor(pos_u)))
        emb_v = self.v_embeddings(Variable(torch.LongTensor(pos_v)))
        score = torch.mul(emb_u, emb_v)
        score = torch.sum(score, dim=1)
        score = F.logsigmoid(score)
        losses.append(sum(score))
        neg_emb_u = self.u_embeddings(Variable(torch.LongTensor(neg_u)))
        neg_emb_v = self.v_embeddings(Variable(torch.LongTensor(neg_v)))
        neg_score = torch.mul(neg_emb_u, neg_emb_v)
        neg_score = torch.sum(neg_score, dim=1)
        neg_score = F.logsigmoid(-1 * neg_score)
        losses.append(sum(neg_score))
        return -1 * sum(losses) 
Example #26
Source File: Metapath2vec.py    From OpenHINE with MIT License 5 votes vote down vote up
def forward(self, pos_u, pos_v, neg_v):
        emb_u = self.u_embeddings(pos_u)
        emb_v = self.v_embeddings(pos_v)
        emb_neg_v = self.v_embeddings(neg_v)

        score = torch.sum(torch.mul(emb_u, emb_v), dim=1)
        score = torch.clamp(score, max=10, min=-10)
        score = -F.logsigmoid(score)

        neg_score = torch.bmm(emb_neg_v, emb_u.unsqueeze(2)).squeeze()
        neg_score = torch.clamp(neg_score, max=10, min=-10)
        neg_score = -torch.sum(F.logsigmoid(-neg_score), dim=1)

        return torch.mean(score + neg_score) 
Example #27
Source File: loss_factory.py    From kaggle-hpa with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def binary_focal_loss(gamma=2, **_):
    def func(input, target):
        assert target.size() == input.size()

        max_val = (-input).clamp(min=0)

        loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
        invprobs = F.logsigmoid(-input * (target * 2 - 1))
        loss = (invprobs * gamma).exp() * loss
        return loss.mean()

    return func 
Example #28
Source File: distributions.py    From integer_discrete_flows with MIT License 5 votes vote down vote up
def log_logistic(x, mean, logscale):
    """
       pdf = sigma([x - mean] / scale) * [1 - sigma(...)] * 1/scale
    """
    scale = torch.exp(logscale)

    u = (x - mean) / scale

    logp = F.logsigmoid(u) + F.logsigmoid(-u) - logscale

    return logp 
Example #29
Source File: modeling.py    From VLP with Apache License 2.0 5 votes vote down vote up
def forward(self, pair_x, pair_y, pair_r, pair_pos_neg_mask):
        # (batch, num_pair, hidden)
        xy = self.R_xy(pair_x, pair_y)
        r = self.rel_emb(pair_r)
        _batch, _num_pair, _hidden = xy.size()
        pair_score = (xy * r).sum(-1)
        # torch.bmm(xy.view(-1, 1, _hidden),r.view(-1, _hidden, 1)).view(_batch, _num_pair)
        # .mul_(-1.0): objective to loss
        return F.logsigmoid(pair_score * pair_pos_neg_mask.type_as(pair_score)).mul_(-1.0) 
Example #30
Source File: distributions.py    From integer_discrete_flows with MIT License 5 votes vote down vote up
def log_discretized_logistic(x, mean, logscale, inverse_bin_width):
    scale = torch.exp(logscale)

    logp = log_min_exp(
        F.logsigmoid((x + 0.5 / inverse_bin_width - mean) / scale),
        F.logsigmoid((x - 0.5 / inverse_bin_width - mean) / scale))

    return logp