Python torch.nn.functional.binary_cross_entropy() Examples

The following are 30 code examples of torch.nn.functional.binary_cross_entropy(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: model.py    From augmented_cyclegan with MIT License 6 votes vote down vote up
def criterion_GAN(pred, target_is_real, use_sigmoid=True):
    if use_sigmoid:
        if target_is_real:
            target_var = Variable(pred.data.new(pred.size()).long().fill_(1.))
        else:
            target_var = Variable(pred.data.new(pred.size()).long().fill_(0.))

        loss = F.binary_cross_entropy(pred, target_var)
    else:
        if target_is_real:
            target_var = Variable(pred.data.new(pred.size()).fill_(1.))
        else:
            target_var = Variable(pred.data.new(pred.size()).fill_(0.))

        loss = F.mse_loss(pred, target_var)

    return loss 
Example #2
Source File: mrcnn.py    From medicaldetectiontoolkit with Apache License 2.0 6 votes vote down vote up
def compute_mrcnn_mask_loss(target_masks, pred_masks, target_class_ids):
    """
    :param target_masks: (n_sampled_rois, y, x, (z)) A float32 tensor of values 0 or 1. Uses zero padding to fill array.
    :param pred_masks: (n_sampled_rois, n_classes, y, x, (z)) float32 tensor with values between [0, 1].
    :param target_class_ids: (n_sampled_rois)
    :return: loss: torch 1D tensor.
    """
    if 0 not in torch.nonzero(target_class_ids > 0).size():
        # Only positive ROIs contribute to the loss. And only
        # the class specific mask of each ROI.
        positive_ix = torch.nonzero(target_class_ids > 0)[:, 0]
        positive_class_ids = target_class_ids[positive_ix].long()
        y_true = target_masks[positive_ix, :, :].detach()
        y_pred = pred_masks[positive_ix, positive_class_ids, :, :]
        loss = F.binary_cross_entropy(y_pred, y_true)
    else:
        loss = torch.FloatTensor([0]).cuda()

    return loss


############################################################
#  Helper Layers
############################################################ 
Example #3
Source File: ufrcnn.py    From medicaldetectiontoolkit with Apache License 2.0 6 votes vote down vote up
def compute_mrcnn_mask_loss(target_masks, pred_masks, target_class_ids):
    """
    :param target_masks: (n_sampled_rois, y, x, (z)) A float32 tensor of values 0 or 1. Uses zero padding to fill array.
    :param pred_masks: (n_sampled_rois, n_classes, y, x, (z)) float32 tensor with values between [0, 1].
    :param target_class_ids: (n_sampled_rois)
    :return: loss: torch 1D tensor.
    """
    if 0 not in torch.nonzero(target_class_ids > 0).size():
        # Only positive ROIs contribute to the loss. And only
        # the class specific mask of each ROI.
        positive_ix = torch.nonzero(target_class_ids > 0)[:, 0]
        positive_class_ids = target_class_ids[positive_ix].long()
        y_true = target_masks[positive_ix, :, :].detach()
        y_pred = pred_masks[positive_ix, positive_class_ids, :, :]
        loss = F.binary_cross_entropy(y_pred, y_true)
    else:
        loss = torch.FloatTensor([0]).cuda()

    return loss


############################################################
#  Helper Layers
############################################################ 
Example #4
Source File: v8_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def compute_loss(model, device, data_loader):
    model.eval()
    loss = 0
    scores  = {}

    with torch.no_grad():
        for id_list, X1, X2, target in data_loader:
            X1, X2, target = X1.to(device), X2.to(device), target.to(device)
            target = target.view(-1,1).float()
            y = model(X1, X2)
            loss += F.binary_cross_entropy(y, target, size_average=False)
            for i,id in enumerate(id_list):
                scores[id] = y[i].data.cpu().numpy()

    loss /= len(data_loader.dataset) # average loss

    return loss, scores 
Example #5
Source File: v4_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def compute_loss(model, device, data_loader, rnn):
    model.eval()
    loss = 0
    scores  = {}

    with torch.no_grad():
        for id_list, data, target in data_loader:
            data, target = data.to(device), target.to(device)
            target = target.view(-1,1).float()
            if rnn == True:
                model.hidden = model.init_hidden(data.size()[0]) # clear out the hidden state of the LSTM
            output = model(data) 
            loss += F.binary_cross_entropy(output, target, size_average=False)
            for i,id in enumerate(id_list):
                scores[id] = output[i].data.cpu().numpy()

    loss /= len(data_loader.dataset) # average loss

    return loss, scores 
Example #6
Source File: v1_training.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def train(args, model, device, train_loader, optimizer, epoch, rnn=False):
    model.train()
    for batch_idx, (_, data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        target = target.view(-1,1).float()
        optimizer.zero_grad()
        if rnn == True:
            model.hidden = model.init_hidden(data.size()[0]) # clear out the hidden state of the LSTM
        output = model(data)
        loss = F.binary_cross_entropy(output, target)
        loss.backward()
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item())) 
Example #7
Source File: v2_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def compute_loss(model, device, data_loader, threshold=0.5):
    model.eval()
    loss = 0
    correct = 0
    scores  = []

    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            target = target.view(-1,1).float()
            #output, hidden = model(data, None)
            output = model(data)
            loss += F.binary_cross_entropy(output, target, size_average=False)
            pred = output > 0.5
            correct += pred.byte().eq(target.byte()).sum().item() # not really meaningful

            scores.append(output.data.cpu().numpy())

    loss /= len(data_loader.dataset) # average loss
    scores = np.vstack(scores) # scores per frame

    return loss, scores, correct 
Example #8
Source File: v7_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def compute_loss(model, device, data_loader):
    model.eval()
    loss = 0
    scores  = []

    with torch.no_grad():
        for X1, X2, target in data_loader:
            X1, X2, target = X1.to(device), X2.to(device), target.to(device)
            target = target.view(-1,1).float()
            y = model(X1, X2)
            loss += F.binary_cross_entropy(y, target, size_average=False)
            scores.append(y.data.cpu().numpy())

    loss /= len(data_loader.dataset) # average loss
    scores = np.vstack(scores) # scores per utterance

    return loss, scores 
Example #9
Source File: free_anchor_retina_head.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def negative_bag_loss(self, cls_prob, box_prob):
        """Compute negative bag loss.

        :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`.

        :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples.

        :math:`P_{j}^{bg}`: Classification probability of negative samples.

        Args:
            cls_prob (Tensor): Classification probability, in shape
                (num_img, num_anchors, num_classes).
            box_prob (Tensor): Box probability, in shape
                (num_img, num_anchors, num_classes).

        Returns:
            Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes).
        """  # noqa: E501, W605
        prob = cls_prob * (1 - box_prob)
        negative_bag_loss = prob**self.gamma * F.binary_cross_entropy(
            prob, torch.zeros_like(prob), reduction='none')
        return (1 - self.alpha) * negative_bag_loss 
Example #10
Source File: v3_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def compute_loss(model, device, data_loader):
    model.eval()
    loss = 0
    correct = 0
    scores  = []

    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            target = target.view(-1,1).float()
            #output, hidden = model(data, None)
            output = model(data)
            loss += F.binary_cross_entropy(output, target, size_average=False)

            scores.append(output.data.cpu().numpy())

    loss /= len(data_loader.dataset) # average loss
    scores = np.vstack(scores) # scores per frame

    return loss, scores 
Example #11
Source File: model.py    From commonsense-rc with MIT License 6 votes vote down vote up
def train(self, train_data):
        self.network.train()
        self.updates = 0
        iter_cnt, num_iter = 0, (len(train_data) + self.batch_size - 1) // self.batch_size
        for batch_input in self._iter_data(train_data):
            feed_input = [x for x in batch_input[:-1]]
            y = batch_input[-1]
            pred_proba = self.network(*feed_input)

            loss = F.binary_cross_entropy(pred_proba, y)
            self.optimizer.zero_grad()
            loss.backward()

            torch.nn.utils.clip_grad_norm(self.network.parameters(), self.args.grad_clipping)

            # Update parameters
            self.optimizer.step()
            self.network.embedding.weight.data[self.finetune_topk:] = self.network.fixed_embedding
            self.updates += 1
            iter_cnt += 1

            if self.updates % 20 == 0:
                print('Iter: %d/%d, Loss: %f' % (iter_cnt, num_iter, loss.data[0]))
        self.scheduler.step()
        print('LR:', self.scheduler.get_lr()[0]) 
Example #12
Source File: LossFunctions.py    From GMVAE with MIT License 6 votes vote down vote up
def reconstruction_loss(self, real, predicted, rec_type='mse' ):
      """Reconstruction loss between the true and predicted outputs
         mse = (1/n)*Σ(real - predicted)^2
         bce = (1/n) * -Σ(real*log(predicted) + (1 - real)*log(1 - predicted))

      Args:
          real: (array) corresponding array containing the true labels
          predictions: (array) corresponding array containing the predicted labels
 
      Returns:
          output: (array/float) depending on average parameters the result will be the mean
                                of all the sample losses or an array with the losses per sample
      """
      if rec_type == 'mse':
        loss = (real - predicted).pow(2)
      elif rec_type == 'bce':
        loss = F.binary_cross_entropy(predicted, real, reduction='none')
      else:
        raise "invalid loss function... try bce or mse..."
      return loss.sum(-1).mean() 
Example #13
Source File: trainer.py    From pykg2vec with MIT License 6 votes vote down vote up
def train_step_projection(self, h, r, t, hr_t, tr_h):
        if self.model.model_name.lower() == "conve" or self.model.model_name.lower() == "tucker":
            if hasattr(self.config, 'label_smoothing'):
                hr_t = hr_t * (1.0 - self.config.label_smoothing) + 1.0 / self.config.tot_entity
                tr_h = tr_h * (1.0 - self.config.label_smoothing) + 1.0 / self.config.tot_entity

            pred_tails = self.model(h, r, direction="tail")  # (h, r) -> hr_t forward
            pred_heads = self.model(t, r, direction="head")  # (t, r) -> tr_h backward

            loss_tails = torch.mean(F.binary_cross_entropy(pred_tails, hr_t))
            loss_heads = torch.mean(F.binary_cross_entropy(pred_heads, tr_h))

            loss = loss_tails + loss_heads

        else:
            loss_tails = self.model(h, r, hr_t, direction="tail")  # (h, r) -> hr_t forward
            loss_heads = self.model(t, r, tr_h, direction="head")  # (t, r) -> tr_h backward

            loss = loss_tails + loss_heads

            if hasattr(self.model, 'get_reg'):
                # now only complex distmult uses regularizer in algorithms,
                loss += self.model.get_reg()

        return loss 
Example #14
Source File: train.py    From torch-light with MIT License 6 votes vote down vote up
def dev(i):
    mention_pair_score.eval()
    total_loss = corrects = recall = ground_truth = 0

    for doc in tqdm(train_and_test_data.test_docs, mininterval=1, desc='pre-Dev Processing', leave=False):
        with torch.no_grad():
            scores, labels = mention_pair_score(doc, corpus["word2idx"])
            loss = binary_cross_entropy(scores, labels, reduction='mean')
            total_loss += loss.data.item()
            predict = scores.gt(0.5).float()
            corrects += (predict*labels).sum().item()
            recall += predict.sum().item()
            ground_truth += labels.sum().item()

    f1 = 2*corrects/(recall+ground_truth)
    print(f"dev epoch {i+1}/{args.epochs} loss: {total_loss/len(train_and_test_data.test_docs):.4f} corrects: {corrects} recall: {recall} ground_truth: {ground_truth} f1: {f1:.4f}")
    return f1 
Example #15
Source File: models.py    From D-VAE with MIT License 6 votes vote down vote up
def loss(self, mu, logvar, G_true, beta=0.005):
        # g_true: [batch_size * max_n-1 * xs]
        z = self.reparameterize(mu, logvar)
        type_scores, edge_scores = self._decode(z)
        res = 0
        true_types = torch.LongTensor([[g_true.vs[v_true]['type'] if v_true < g_true.vcount() 
                                      else self.START_TYPE for v_true in range(1, self.max_n)] 
                                      for g_true in G_true]).to(self.get_device())
        res += F.cross_entropy(type_scores.transpose(1, 2), true_types, reduction='sum')
        true_edges = torch.FloatTensor([np.pad(np.array(g_true.get_adjacency().data).transpose()[1:, :-1],
                                        ((0, self.max_n-g_true.vcount()), (0, self.max_n-g_true.vcount())),
                                        mode='constant', constant_values=(0, 0))
                                       for g_true in G_true]).to(self.get_device())
        res += F.binary_cross_entropy(edge_scores, true_edges, reduction='sum')
        kld = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
        return res + beta*kld, res, kld 
Example #16
Source File: train.py    From torch-light with MIT License 6 votes vote down vote up
def train(i):
    mention_pair_score.train()
    total_loss = corrects = recall = ground_truth = 0
    for doc in tqdm(train_and_test_data.train_docs, mininterval=1, desc='pre-Train Processing', leave=False):
        optimizer.zero_grad()
        scores, labels = mention_pair_score(doc, corpus["word2idx"])
        loss = binary_cross_entropy(scores, labels, reduction='mean')
        loss.backward()
        optimizer.step()
        total_loss += loss.data.item()
        predict = scores.gt(0.5).float()
        corrects += (predict*labels).sum().item()
        recall += predict.sum().item()
        ground_truth += labels.sum().item()

    f1 = 2*corrects/(recall+ground_truth)
    print(f"train epoch {i+1}/{args.epochs} loss: {total_loss/100:.4f} corrects: {corrects} recall: {recall} ground_truth: {ground_truth} f1: {f1:.4f}") 
Example #17
Source File: trainer.py    From AMNRE with MIT License 6 votes vote down vote up
def fool_dis(self,wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh):
        if dis_lambda==0:
            return
        self.model.D.eval()
        self.model.share_encoder.train()
        x,y=self.get_dis_xy(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
        pred=self.model.D(x)
        loss=F.binary_cross_entropy(pred,1-y)
        loss=dis_lambda*loss
        if (loss!=loss).data.any():
            print("NaN Loss (fooling discriminator)")
            exit()
        self.encoder_optim.zero_grad()
        loss.backward()
        self.encoder_optim.step()
        return loss.data[0] 
Example #18
Source File: trainer.py    From AMNRE with MIT License 6 votes vote down vote up
def fool_dis(self,wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh):
        if dis_lambda==0:
            return
        self.model.D.eval()
        self.model.share_encoder.train()
        x,y=self.get_dis_xy(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
        pred=self.model.D(x)
        loss=F.binary_cross_entropy(pred,1-y)
        loss=dis_lambda*loss
        if (loss!=loss).data.any():
            print("NaN Loss (fooling discriminator)")
            exit()
        self.encoder_optim.zero_grad()
        loss.backward()
        self.encoder_optim.step()
        return loss.data[0] 
Example #19
Source File: main.py    From ArtificialIntelligenceEngines with MIT License 6 votes vote down vote up
def loss_function(recon_x, x, mu, logvar):
    # next 2 lines are equivalent
    BCE = -F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum')
    #BCE = -F.binary_cross_entropy(recon_x, x.view(-1, 784), size_average=False) # deprecated
    # for binary_cross_entropy, see https://pytorch.org/docs/stable/nn.html
    
    # KLD is Kullback–Leibler divergence -- how much does one learned
    # distribution deviate from another, in this specific case the
    # learned distribution from the unit Gaussian
    
    # see Appendix B from VAE paper:
    # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
    # https://arxiv.org/abs/1312.6114
    # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
    KLD = 0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
    
    # JVS: Kingma's repo = https://github.com/dpkingma/examples/blob/master/vae/main.py
    # BCE tries to make our reconstruction as accurate as possible
    # KLD tries to push the distributions as close as possible to unit Gaussian
    
    ELBO = BCE + KLD
    loss = -ELBO
    return loss 
Example #20
Source File: model.py    From GraphRNN with MIT License 6 votes vote down vote up
def binary_cross_entropy_weight(y_pred, y,has_weight=False, weight_length=1, weight_max=10):
    '''

    :param y_pred:
    :param y:
    :param weight_length: how long until the end of sequence shall we add weight
    :param weight_value: the magnitude that the weight is enhanced
    :return:
    '''
    if has_weight:
        weight = torch.ones(y.size(0),y.size(1),y.size(2))
        weight_linear = torch.arange(1,weight_length+1)/weight_length*weight_max
        weight_linear = weight_linear.view(1,weight_length,1).repeat(y.size(0),1,y.size(2))
        weight[:,-1*weight_length:,:] = weight_linear
        loss = F.binary_cross_entropy(y_pred, y, weight=weight.cuda())
    else:
        loss = F.binary_cross_entropy(y_pred, y)
    return loss 
Example #21
Source File: model.py    From graph-generation with MIT License 6 votes vote down vote up
def binary_cross_entropy_weight(y_pred, y,has_weight=False, weight_length=1, weight_max=10):
    '''

    :param y_pred:
    :param y:
    :param weight_length: how long until the end of sequence shall we add weight
    :param weight_value: the magnitude that the weight is enhanced
    :return:
    '''
    if has_weight:
        weight = torch.ones(y.size(0),y.size(1),y.size(2))
        weight_linear = torch.arange(1,weight_length+1)/weight_length*weight_max
        weight_linear = weight_linear.view(1,weight_length,1).repeat(y.size(0),1,y.size(2))
        weight[:,-1*weight_length:,:] = weight_linear
        loss = F.binary_cross_entropy(y_pred, y, weight=weight.cuda())
    else:
        loss = F.binary_cross_entropy(y_pred, y)
    return loss 
Example #22
Source File: network.py    From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License 6 votes vote down vote up
def _add_losses(self, sigma_rpn=3.0): 

    # classification loss
    image_prob = self._predictions["image_prob"]
    
#    assert ((image_prob.data>=0).sum()+(image_prob.data<=1).sum())==image_prob.data.size(1)*2, image_prob
#    assert ((self._labels.data>=0).sum()+(self._labels.data<=1).sum())==self._labels.data.size(1)*2, self._labels

    cross_entropy = F.binary_cross_entropy(image_prob.clamp(0,1),self._labels)
    
    fast_loss = self._add_losses_fast()
    self._losses['wsddn_loss'] = cross_entropy
    self._losses['fast_loss'] = fast_loss
    
    loss = cross_entropy + fast_loss
    self._losses['total_loss'] = loss
    
    for k in self._losses.keys():
      self._event_summaries[k] = self._losses[k]    
    return loss 
Example #23
Source File: nn_torch_models.py    From numpy-ml with GNU General Public License v3.0 6 votes vote down vote up
def extract_grads(self, X, X_recon, t_mean, t_log_var):
        eps = np.finfo(float).eps
        X = torchify(X, requires_grad=False)
        X_recon = torchify(np.clip(X_recon, eps, 1 - eps))
        t_mean = torchify(t_mean)
        t_log_var = torchify(t_log_var)

        BCE = torch.sum(F.binary_cross_entropy(X_recon, X, reduction="none"), dim=1)

        # see Appendix B from VAE paper:
        # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
        # https://arxiv.org/abs/1312.6114
        # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
        KLD = -0.5 * torch.sum(1 + t_log_var - t_mean.pow(2) - t_log_var.exp(), dim=1)

        loss = torch.mean(BCE + KLD)
        loss.backward()

        grads = {
            "loss": loss.detach().numpy(),
            "dX_recon": X_recon.grad.numpy(),
            "dt_mean": t_mean.grad.numpy(),
            "dt_log_var": t_log_var.grad.numpy(),
        }
        return grads 
Example #24
Source File: mrcnn.py    From RegRCNN with Apache License 2.0 6 votes vote down vote up
def compute_mrcnn_mask_loss(pred_masks, target_masks, target_class_ids):
    """
    :param target_masks: (n_sampled_rois, y, x, (z)) A float32 tensor of values 0 or 1. Uses zero padding to fill array.
    :param pred_masks: (n_sampled_rois, n_classes, y, x, (z)) float32 tensor with values between [0, 1].
    :param target_class_ids: (n_sampled_rois)
    :return: loss: torch 1D tensor.
    """
    #print("targ masks", target_masks.unique(return_counts=True))
    if not 0 in torch.nonzero(target_class_ids > 0).size():
        # Only positive ROIs contribute to the loss. And only
        # the class-specific mask of each ROI.
        positive_ix = torch.nonzero(target_class_ids > 0)[:, 0]
        positive_class_ids = target_class_ids[positive_ix].long()
        y_true = target_masks[positive_ix, :, :].detach()
        y_pred = pred_masks[positive_ix, positive_class_ids, :, :]
        loss = F.binary_cross_entropy(y_pred, y_true)
    else:
        loss = torch.FloatTensor([0]).cuda()

    return loss 
Example #25
Source File: models.py    From gnn-model-explainer with Apache License 2.0 5 votes vote down vote up
def loss(self, pred, label, type="softmax"):
        # softmax + CE
        if type == "softmax":
            return F.cross_entropy(pred, label, size_average=True)
        elif type == "margin":
            batch_size = pred.size()[0]
            label_onehot = torch.zeros(batch_size, self.label_dim).long().cuda()
            label_onehot.scatter_(1, label.view(-1, 1), 1)
            return torch.nn.MultiLabelMarginLoss()(pred, label_onehot)

        # return F.binary_cross_entropy(F.sigmoid(pred[:,0]), label.float()) 
Example #26
Source File: wikisql_models.py    From sqlova with Apache License 2.0 5 votes vote down vote up
def Loss_wc(s_wc, g_wc):

    # Construct index matrix
    bS, max_h_len = s_wc.shape
    im = torch.zeros([bS, max_h_len]).to(device)
    for b, g_wc1 in enumerate(g_wc):
        for g_wc11 in g_wc1:
            im[b, g_wc11] = 1.0
    # Construct prob.
    p = F.sigmoid(s_wc)
    loss = F.binary_cross_entropy(p, im)

    return loss 
Example #27
Source File: NPR.py    From nispat with GNU General Public License v3.0 5 votes vote down vote up
def np_loss(y_hat, y_hat_84, y, z_all, z_context):
    #PBL = pinball_loss(y, y_hat, 0.05)
    BCE = F.binary_cross_entropy(torch.squeeze(y_hat), torch.mean(y,dim=1), reduction="sum")
    idx1 = (y >= y_hat_84).squeeze()
    idx2 = (y < y_hat_84).squeeze()
    BCE84 = 0.84 * F.binary_cross_entropy(torch.squeeze(y_hat_84[idx1,:]), torch.mean(y[idx1,:],dim=1), reduction="sum") + \
            0.16 * F.binary_cross_entropy(torch.squeeze(y_hat_84[idx2,:]), torch.mean(y[idx2,:],dim=1), reduction="sum")
    KLD = kl_div_gaussians(z_all[0], z_all[1], z_context[0], z_context[1])
    return BCE + KLD + BCE84 
Example #28
Source File: encoders.py    From diffpool with MIT License 5 votes vote down vote up
def loss(self, pred, label, type='softmax'):
        # softmax + CE
        if type == 'softmax':
            return F.cross_entropy(pred, label, reduction='mean')
        elif type == 'margin':
            batch_size = pred.size()[0]
            label_onehot = torch.zeros(batch_size, self.label_dim).long().cuda()
            label_onehot.scatter_(1, label.view(-1,1), 1)
            return torch.nn.MultiLabelMarginLoss()(pred, label_onehot)
            
        #return F.binary_cross_entropy(F.sigmoid(pred[:,0]), label.float()) 
Example #29
Source File: network.py    From PiCANet-Implementation with MIT License 5 votes vote down vote up
def forward(self, *input):
        if len(input) == 2:
            x = input[0]
            tar = input[1]
            test_mode = False
        if len(input) == 3:
            x = input[0]
            tar = input[1]
            test_mode = input[2]
        if len(input) == 1:
            x = input[0]
            tar = None
            test_mode = True
        en_out = self.encoder(x)
        dec = None
        pred = []
        for i in range(6):
            # print(En_out[5 - i].size())
            dec, _pred = self.decoder[i](en_out[5 - i], dec)
            pred.append(_pred)
        loss = 0
        if not test_mode:
            for i in range(6):
                loss += F.binary_cross_entropy(pred[5 - i], tar) * self.cfg['loss_ratio'][5 - i]
                # print(float(loss))
                if tar.size()[2] > 28:
                    tar = F.max_pool2d(tar, 2, 2)
        return pred, loss 
Example #30
Source File: model.py    From GraphRNN with MIT License 5 votes vote down vote up
def adj_recon_loss(self, adj_truth, adj_pred):
        return F.binary_cross_entropy(adj_truth, adj_pred)