Python torch.nn.NLLLoss2d() Examples

The following are 30 code examples of torch.nn.NLLLoss2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: focal_loss.py    From doom-net-pytorch with MIT License 6 votes vote down vote up
def test():
    loss_nll = nn.NLLLoss2d()
    loss_focal = FocalLoss(gamma=0)
    target = torch.Tensor(2, 1, 5).random_(3).long()

    data = torch.rand(2, 3, 1, 5)
    input1 = torch.Tensor(data, requires_grad=True)
    loss1 = loss_nll(F.log_softmax(input1), target)
    loss1.backward()
    print(loss1)
    print(input1.grad)

    input2 = torch.Tensor(data, requires_grad=True)
    loss2 = loss_focal(F.log_softmax(input2), target)
    loss2.backward()
    print(loss2)
    print(input2.grad)


#test() 
Example #2
Source File: misc.py    From ECN with Apache License 2.0 5 votes vote down vote up
def __init__(self, weight=None, size_average=True, ignore_index=255):
        super(CrossEntropyLoss2d, self).__init__()
        self.nll_loss = nn.NLLLoss2d(weight, size_average, ignore_index) 
Example #3
Source File: utils.py    From GCC-SFCN with MIT License 5 votes vote down vote up
def __init__(self, weight=None, size_average=True):
        super(CrossEntropyLoss2d, self).__init__()
        self.nll_loss = nn.NLLLoss2d(weight, size_average) 
Example #4
Source File: Criteria.py    From YNet with MIT License 5 votes vote down vote up
def __init__(self, weight=None):
        super().__init__()

        self.loss = nn.NLLLoss2d(weight) 
Example #5
Source File: Criteria.py    From YNet with MIT License 5 votes vote down vote up
def __init__(self, weight=None):
        super().__init__()

        self.loss = nn.NLLLoss2d(weight) 
Example #6
Source File: mask_losses.py    From neurips18_hierchical_image_manipulation with MIT License 5 votes vote down vote up
def __init__(self, use_nll=True): 
        super(MaskReconLoss, self).__init__()
        assert use_nll
        self.criterion = nn.NLLLoss2d(ignore_index=IGNORE_INDEX) 
Example #7
Source File: utils.py    From dlcv_for_beginners with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, weights, size_average=True):
        super(MSCrossEntropyLoss2D, self).__init__()
        self.nll_loss_2d = nn.NLLLoss2d(size_average=size_average)
        self.weights = weights 
Example #8
Source File: utils.py    From dlcv_for_beginners with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, size_average=True):
        super(CrossEntropyLoss2D, self).__init__()
        self.nll_loss_2d = nn.NLLLoss2d(size_average=size_average) 
Example #9
Source File: loss.py    From CGNet with MIT License 5 votes vote down vote up
def __init__(self, weight=None, ignore_label= 255):
        '''
        :param weight: 1D weight vector to deal with the class-imbalance
        Obtaining log-probabilities in a neural network is easily achieved by adding a LogSoftmax layer in the last layer of your network. 
        You may use CrossEntropyLoss instead, if you prefer not to add an extra layer.
        '''
        super().__init__()

        #self.loss = nn.NLLLoss2d(weight, ignore_index=255)
        self.loss = nn.NLLLoss(weight, ignore_index= ignore_label) 
Example #10
Source File: loss.py    From MCD_DA with MIT License 5 votes vote down vote up
def __init__(self, weight=None, size_average=True):
        super(CrossEntropyLoss2d, self).__init__()
        self.nll_loss = nn.NLLLoss2d(weight, size_average) 
Example #11
Source File: loss.py    From Efficient-Segmentation-Networks with MIT License 5 votes vote down vote up
def forward(self, output, target):
        """
        Forward pass
        :param output: torch.tensor (NxC)
        :param target: torch.tensor (N)
        :return: scalar
        """
        return self.nll_loss(output, target)


# class CrossEntropyLoss2d(nn.Module):
#     '''
#     This file defines a cross entropy loss for 2D images
#     '''
#
#     def __init__(self, weight=None, ignore_label=255):
#         '''
#         :param weight: 1D weight vector to deal with the class-imbalance
#         Obtaining log-probabilities in a neural network is easily achieved by adding a LogSoftmax layer in the last layer of your network.
#         You may use CrossEntropyLoss instead, if you prefer not to add an extra layer.
#         '''
#         super().__init__()
#
#         # self.loss = nn.NLLLoss2d(weight, ignore_index=255)
#         self.loss = nn.NLLLoss(weight, ignore_index=ignore_label)
#
#     def forward(self, outputs, targets):
#         return self.loss(F.log_softmax(outputs, dim=1), targets) 
Example #12
Source File: nnBuildUnits.py    From medSynthesisV1 with MIT License 5 votes vote down vote up
def __init__(self, weight=None, size_average=True):
        super(CrossEntropyLoss2d, self).__init__()
        self.nll_loss = nn.NLLLoss2d(weight, size_average) 
Example #13
Source File: Criteria.py    From ESPNet with MIT License 5 votes vote down vote up
def __init__(self, weight=None):
        '''
        :param weight: 1D weight vector to deal with the class-imbalance
        '''
        super().__init__()

        self.loss = nn.NLLLoss2d(weight) 
Example #14
Source File: map_train.py    From doom-net-pytorch with MIT License 5 votes vote down vote up
def test(model, data_loader):
    model.eval()

    epoch_loss_obj = 0
    epoch_loss_dist = 0
    epoch_accuracy_obj = 0
    epoch_accuracy_dist = 0
    batch = 0
    for batch, (screens, distances, objects) in enumerate(data_loader):
        screens, distances, objects = screens.to(device), distances.to(device), objects.to(device)

        pred_objects, pred_distances = model(screens)
        loss_obj = objects_criterion(pred_objects, objects)
        loss_dist = distances_criterion(pred_distances, distances)

        epoch_loss_obj += loss_obj.item()
        epoch_loss_dist += loss_dist.item()

        _, pred_objects = pred_objects.max(1)
        accuracy = (pred_objects == objects).float().mean()
        epoch_accuracy_obj += accuracy

        _, pred_distances = pred_distances.max(1)
        accuracy = (pred_distances == distances).float().mean()
        epoch_accuracy_dist += accuracy

    batch_num = batch + 1
    epoch_loss_obj /= batch_num
    epoch_loss_dist /= batch_num
    epoch_accuracy_obj /= batch_num
    epoch_accuracy_dist /= batch_num

    model.train()
    return (epoch_loss_obj, epoch_loss_dist), (epoch_accuracy_obj, epoch_accuracy_dist)

#objects_criterion = nn.NLLLoss2d()
#distances_criterion = nn.NLLLoss2d() 
Example #15
Source File: train_deeplab2D.py    From pytorch-mri-segmentation-3D with MIT License 5 votes vote down vote up
def loss_calc(out, label, gpu0):
    """
    This function returns cross entropy loss for semantic segmentation
    """
    # out shape batch_size x channels x h x w -> batch_size x channels x h x w
    # label shape h x w x 1 x batch_size  -> batch_size x 1 x h x w
    label = label[:,:,0,:].transpose(2,0,1)

    label = torch.from_numpy(label).long()
    if useGPU:
        label = Variable(label).cuda(gpu0)
        if onlyLesions:
            criterion = nn.NLLLoss2d(weight = torch.cuda.FloatTensor([1, 100000]))
        else:
            criterion = nn.NLLLoss2d(weight = torch.cuda.FloatTensor([1, 100000, 100000]))
    else:
        label = Variable(label)

        if onlyLesions:
            criterion = nn.NLLLoss2d(weight = torch.FloatTensor([1, 100000]))
        else:
            criterion = nn.NLLLoss2d(weight = torch.FloatTensor([1, 100000, 100000]))

    m = nn.LogSoftmax()
    out = m(out)

    return criterion(out,label) 
Example #16
Source File: loss.py    From DABNet with MIT License 5 votes vote down vote up
def __init__(self, weight=None, ignore_label=255):
        '''
        :param weight: 1D weight vector to deal with the class-imbalance
        Obtaining log-probabilities in a neural network is easily achieved by adding a LogSoftmax layer in the last layer of your network. 
        You may use CrossEntropyLoss instead, if you prefer not to add an extra layer.
        '''
        super().__init__()

        # self.loss = nn.NLLLoss2d(weight, ignore_index=255)
        self.loss = nn.NLLLoss(weight, ignore_index=ignore_label) 
Example #17
Source File: utils.py    From visual-pushing-grasping with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, weight=None, size_average=True):
        super(CrossEntropyLoss2d, self).__init__()
        self.nll_loss = nn.NLLLoss2d(weight, size_average) 
Example #18
Source File: criterion.py    From binseg_pytoch with Apache License 2.0 5 votes vote down vote up
def __init__(self, weights=None):
        super(CrossEntropyLoss2d, self).__init__()

        self.loss = nn.NLLLoss2d(weight=weights)
        self.loss.cuda() 
Example #19
Source File: losses.py    From DepthAwareCNN with MIT License 5 votes vote down vote up
def __init__(self, gamma=2., weight=None, size_average=True, ignore_index=255):
        super(FocalLoss2d, self).__init__()
        self.gamma = gamma
        self.nll_loss = nn.NLLLoss2d(weight, size_average, ignore_index) 
Example #20
Source File: losses.py    From DepthAwareCNN with MIT License 5 votes vote down vote up
def __init__(self, weight=None, size_average=False, ignore_index=255):
        super(CrossEntropyLoss2d, self).__init__()
        self.nll_loss = nn.NLLLoss2d(weight, size_average, ignore_index) 
Example #21
Source File: train.py    From pytorch-deeplab-resnet with MIT License 5 votes vote down vote up
def loss_calc(out, label,gpu0):
    """
    This function returns cross entropy loss for semantic segmentation
    """
    # out shape batch_size x channels x h x w -> batch_size x channels x h x w
    # label shape h x w x 1 x batch_size  -> batch_size x 1 x h x w
    label = label[:,:,0,:].transpose(2,0,1)
    label = torch.from_numpy(label).long()
    label = Variable(label).cuda(gpu0)
    m = nn.LogSoftmax()
    criterion = nn.NLLLoss2d()
    out = m(out)
    
    return criterion(out,label) 
Example #22
Source File: misc.py    From pytorch-semantic-segmentation with MIT License 5 votes vote down vote up
def __init__(self, gamma=2, weight=None, size_average=True, ignore_index=255):
        super(FocalLoss2d, self).__init__()
        self.gamma = gamma
        self.nll_loss = nn.NLLLoss2d(weight, size_average, ignore_index) 
Example #23
Source File: misc.py    From pytorch-semantic-segmentation with MIT License 5 votes vote down vote up
def __init__(self, weight=None, size_average=True, ignore_index=255):
        super(CrossEntropyLoss2d, self).__init__()
        self.nll_loss = nn.NLLLoss2d(weight, size_average, ignore_index) 
Example #24
Source File: criterion.py    From piwise with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, weight=None):
        super().__init__()

        self.loss = nn.NLLLoss2d(weight) 
Example #25
Source File: criterion.py    From sscdnet with MIT License 5 votes vote down vote up
def __init__(self, weight=None):
        super().__init__()
        self.loss = nn.NLLLoss2d(weight) 
Example #26
Source File: criterion.py    From pytorch-semantic-segmentation with MIT License 5 votes vote down vote up
def __init__(self, weight=None):
		super().__init__()
		self.loss = nn.NLLLoss2d(weight) 
Example #27
Source File: Criteria.py    From ext_portrait_segmentation with MIT License 5 votes vote down vote up
def __init__(self, weight=None, ignore = None):
        '''
        :param weight: 1D weight vector to deal with the class-imbalance
        '''

        super().__init__()
        if int(torch.__version__[2]) < 4:
            self.loss = nn.NLLLoss2d(weight, ignore_index=ignore)
        else:
            self.loss = nn.NLLLoss(weight, ignore_index=ignore) 
Example #28
Source File: train_deeplab3D.py    From pytorch-mri-segmentation-3D with MIT License 5 votes vote down vote up
def loss_calc(out, label, gpu0):
    """
    This function returns cross entropy loss for semantic segmentation
    """
    # out shape batch_size x channels x h x w -> batch_size x channels x h x w
    # label shape h x w x 1 x batch_size  -> batch_size x 1 x h x w
    label = label[:,:,0,:].transpose(2,0,1)

    label = torch.from_numpy(label).long()
    if useGPU:
        label = Variable(label).cuda(gpu0)
        if onlyLesions:
            criterion = nn.NLLLoss2d(weight = torch.cuda.FloatTensor([1, 100000]))
        else:
            criterion = nn.NLLLoss2d(weight = torch.cuda.FloatTensor([1, 100000, 100000]))
    else:
        label = Variable(label)

        if onlyLesions:
            criterion = nn.NLLLoss2d(weight = torch.FloatTensor([1, 100000]))
        else:
            criterion = nn.NLLLoss2d(weight = torch.FloatTensor([1, 100000, 100000]))

    m = nn.LogSoftmax()
    out = m(out)

    return criterion(out,label) 
Example #29
Source File: Deeplab.py    From DepthAwareCNN with MIT License 4 votes vote down vote up
def __init__(self, opt, dataset=None, encoder='VGG'):
        BaseModel.initialize(self, opt)
        self.encoder = encoder
        if encoder == 'VGG':
            self.model = Deeplab_VGG(self.opt.label_nc, self.opt.depthconv)

        if self.opt.isTrain:
            self.criterionSeg = torch.nn.CrossEntropyLoss(ignore_index=255).cuda()
            # self.criterionSeg = torch.nn.CrossEntropyLoss(ignore_index=255).cuda()
            # self.criterionSeg = nn.NLLLoss2d(ignore_index=255)#.cuda()

            if encoder == 'VGG':
                self.optimizer = torch.optim.SGD([{'params': self.model.Scale.get_1x_lr_params_NOscale(), 'lr': self.opt.lr},
                                                 {'params': self.model.Scale.get_10x_lr_params(), 'lr': self.opt.lr},
                                                 {'params': self.model.Scale.get_2x_lr_params_NOscale(), 'lr': self.opt.lr, 'weight_decay': 0.},
                                                 {'params': self.model.Scale.get_20x_lr_params(), 'lr': self.opt.lr, 'weight_decay': 0.}
                                                  ],
                                                 lr=self.opt.lr, momentum=self.opt.momentum, weight_decay=self.opt.wd)

            # self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.opt.lr, momentum=self.opt.momentum, weight_decay=self.opt.wd)

            self.old_lr = self.opt.lr
            self.averageloss = []
            # copy scripts
            self.model_path = './models' #os.path.dirname(os.path.realpath(__file__))
            self.data_path = './data' #os.path.dirname(os.path.realpath(__file__))
            shutil.copyfile(os.path.join(self.model_path, 'Deeplab.py'), os.path.join(self.model_dir, 'Deeplab.py'))

            if encoder == 'VGG':
                shutil.copyfile(os.path.join(self.model_path, 'VGG_Deeplab.py'), os.path.join(self.model_dir, 'VGG_Deeplab.py'))
            shutil.copyfile(os.path.join(self.model_path, 'model_utils.py'), os.path.join(self.model_dir, 'model_utils.py'))
            shutil.copyfile(os.path.join(self.data_path, dataset.datafile), os.path.join(self.model_dir, dataset.datafile))
            shutil.copyfile(os.path.join(self.data_path, 'base_dataset.py'), os.path.join(self.model_dir, 'base_dataset.py'))

            self.writer = SummaryWriter(self.tensorborad_dir)
            self.counter = 0

        if not self.isTrain or self.opt.continue_train:
            if self.opt.pretrained_model!='':
                self.load_pretrained_network(self.model, self.opt.pretrained_model, self.opt.which_epoch, strict=False)
                print("Successfully loaded from pretrained model with given path!")
            else:
                self.load()
                print("Successfully loaded model, continue training....!")

        self.model.cuda()
        self.normweightgrad=0.
        # if len(opt.gpu_ids):#opt.isTrain and
        #     self.model = torch.nn.DataParallel(self.model, device_ids=opt.gpu_ids) 
Example #30
Source File: loss.py    From UMNN with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def nll_loss(input, target, weight=None, size_average=True, ignore_index=-100, reduce=True):
    r"""
    Taken from the master branch of pytorch, accepts (N, C, d_1, d_2, ..., d_K) input shapes
    instead of only (N, C, d_1, d_2) or (N, C).
    The negative log likelihood loss.
    See :class:`~torch.nn.NLLLoss` for details.
    Args:
        input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`
            in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K > 1`
            in the case of K-dimensional loss.
        target: :math:`(N)` where each value is `0 <= targets[i] <= C-1`,
            or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K >= 1` for
            K-dimensional loss.
        weight (Tensor, optional): a manual rescaling weight given to each
            class. If given, has to be a Tensor of size `C`
        size_average (bool, optional): By default, the losses are averaged
            over observations for each minibatch. If size_average
            is False, the losses are summed for each minibatch. Default: ``True``
        ignore_index (int, optional): Specifies a target value that is ignored
            and does not contribute to the input gradient. When size_average is
            True, the loss is averaged over non-ignored targets. Default: -100
    """
    dim = input.dim()
    if dim == 2:
        return F.nll_loss(
            input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce
        )
    elif dim == 4:
        return F.nll_loss(
            input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce
        )
    elif dim == 3 or dim > 4:
        n = input.size(0)
        c = input.size(1)
        out_size = (n,) + input.size()[2:]
        if target.size()[1:] != input.size()[2:]:
            raise ValueError('Expected target size {}, got {}'.format(out_size, input.size()))
        input = input.contiguous().view(n, c, 1, -1)
        target = target.contiguous().view(n, 1, -1)
        if reduce:
            _loss = nn.NLLLoss2d(weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce)
            return _loss(input, target)
        out = F.nll_loss(
            input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce
        )
        return out.view(out_size)
    else:
        raise ValueError('Expected 2 or more dimensions (got {})'.format(dim))