Python torch.nn.functional.multilabel_soft_margin_loss() Examples

The following are 7 code examples of torch.nn.functional.multilabel_soft_margin_loss(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: utils.py    From VQA2.0-Recent-Approachs-2018.pytorch with MIT License 6 votes vote down vote up
def calculate_loss(answer, pred, method):
    """
    answer = [batch, 3129]
    pred = [batch, 3129]
    """
    if method == 'binary_cross_entropy_with_logits':
        loss = F.binary_cross_entropy_with_logits(pred, answer) * config.max_answers
    elif method == 'soft_cross_entropy':
        nll = -F.log_softmax(pred, dim=1)
        loss = (nll * answer).sum(dim=1).mean()   # this is worse than binary_cross_entropy_with_logits
    elif method == 'KL_divergence':
        pred = F.softmax(pred, dim=1)
        kl = ((answer / (pred + 1e-12)) + 1e-12).log()
        loss = (kl * answer).sum(1).mean()
    elif method == 'multi_label_soft_margin':
        loss = F.multilabel_soft_margin_loss(pred, answer)
    else:
        print('Error, pls define loss function')
    return loss 
Example #2
Source File: train_cls_ser.py    From SSENet-pytorch with MIT License 6 votes vote down vote up
def validate(model, data_loader):
    print('\nvalidating ... ', flush=True, end='')

    val_loss_meter = pyutils.AverageMeter('loss')

    model.eval()

    with torch.no_grad():
        for pack in data_loader:
            img = pack[1]
            label = pack[2].cuda(non_blocking=True)
            label = label.unsqueeze(2).unsqueeze(3)

            x = model(img)
            x = F.adaptive_avg_pool2d(x, (1,1))	
            loss = F.multilabel_soft_margin_loss(x, label)

            val_loss_meter.add({'loss': loss.item()})

    model.train()

    print('loss:', val_loss_meter.pop('loss'))

    return 
Example #3
Source File: train_cls.py    From psa with MIT License 6 votes vote down vote up
def validate(model, data_loader):
    print('\nvalidating ... ', flush=True, end='')

    val_loss_meter = pyutils.AverageMeter('loss')

    model.eval()

    with torch.no_grad():
        for pack in data_loader:
            img = pack[1]
            label = pack[2].cuda(non_blocking=True)

            x = model(img)
            loss = F.multilabel_soft_margin_loss(x, label)

            val_loss_meter.add({'loss': loss.item()})

    model.train()

    print('loss:', val_loss_meter.pop('loss'))

    return 
Example #4
Source File: train_cam.py    From irn with MIT License 6 votes vote down vote up
def validate(model, data_loader):
    print('validating ... ', flush=True, end='')

    val_loss_meter = pyutils.AverageMeter('loss1', 'loss2')

    model.eval()

    with torch.no_grad():
        for pack in data_loader:
            img = pack['img']

            label = pack['label'].cuda(non_blocking=True)

            x = model(img)
            loss1 = F.multilabel_soft_margin_loss(x, label)

            val_loss_meter.add({'loss1': loss1.item()})

    model.train()

    print('loss: %.4f' % (val_loss_meter.pop('loss1')))

    return 
Example #5
Source File: losses.py    From ultra-thin-PRM with MIT License 6 votes vote down vote up
def multilabel_soft_margin_loss(
    input: Tensor, 
    target: Tensor,
    weight: Optional[Tensor] = None,
    size_average: bool = True,
    reduce: bool = True,
    difficult_samples: bool = False) -> Tensor:
    """Multilabel soft margin loss.
    """

    if difficult_samples:
        # label 1: positive samples
        # label 0: difficult samples
        # label -1: negative samples
        gt_label = target.clone()
        gt_label[gt_label == 0] = 1
        gt_label[gt_label == -1] = 0
    else:
        gt_label = target
        
    return F.multilabel_soft_margin_loss(input, gt_label, weight, size_average, reduce) 
Example #6
Source File: model.py    From SlowFast-Network-pytorch with MIT License 5 votes vote down vote up
def loss(self, proposal_classes: Tensor,gt_proposal_classes: Tensor, batch_size,batch_indices) -> Tuple[Tensor, Tensor]:
            # assert np.any(np.isnan(np.array(proposal_classes)))==False
            # assert np.any(np.isnan(np.array(gt_proposal_classes))) == False
            cross_entropies = torch.zeros(batch_size, dtype=torch.float, device=proposal_classes.device).cuda()
            #batch_indices=torch.tensor(batch_indices,dtype=torch.float)
            for batch_index in range(batch_size):
                selected_indices = (batch_indices == batch_index).nonzero().view(-1)
                input=proposal_classes[selected_indices]
                target=gt_proposal_classes[selected_indices]
                if torch.numel(input)==0 or torch.numel(target)==0:
                    #print("Warning:None DATA:",batch_index)
                    continue
                assert torch.numel(input)==torch.numel(target)
                # print('input:',input)
                # print("input_sigmoid:", F.sigmoid(input))
                # print('target:',target)


                cross_entropy =F.multilabel_soft_margin_loss(input=proposal_classes[selected_indices],target=gt_proposal_classes[selected_indices],reduction="mean")

                # cross_entropy = F.binary_cross_entropy(input=F.sigmoid(proposal_classes[selected_indices]),
                #                                                target=gt_proposal_classes[selected_indices])
                torch.nn.MultiLabelSoftMarginLoss
                # print('cross_entropy:',cross_entropy)
                # print('cross_entropy:',cross_entropy)
                # cross_entropy = F.cross_entropy(input=proposal_classes[selected_indices],
                #                                 target=gt_proposal_classes[selected_indices])

                cross_entropies[batch_index] = cross_entropy
            return cross_entropies 
Example #7
Source File: batch_metrics.py    From poutyne with GNU Lesser General Public License v3.0 5 votes vote down vote up
def multilabel_soft_margin(y_pred, y_true):
    return F.multilabel_soft_margin_loss(y_pred, y_true)