Python torch.nn.MultiLabelMarginLoss() Examples

The following are 5 code examples of torch.nn.MultiLabelMarginLoss(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: test_con.py    From SlowFast-Network-pytorch with MIT License 7 votes vote down vote up
def test_grad():
    input=tensor(([1,2,3],[4,5,6],[7,8,9]),dtype=torch.float)
    #weight=tensor(([0.1,0.2,0.3,0.4],[0.1,0.2,0.3,0.4],[0.1,0.2,0.3,0.4]),requires_grad=True)
    weight=tensor(torch.rand(3, 4),requires_grad=True)
    #input=input.unsqueeze(0)
    print(input,weight)
    pre=torch.mm(input,weight)
    #loss1=f.multilabel_soft_margin_loss()
    loss2=nn.MultiLabelMarginLoss()
    lable1=tensor(([0, 1, 1,0],),dtype=torch.float)
    lable2 = tensor(([0, 1, 1,0], [1, 0, 0,0], [1, 0,1 ,1]), dtype=torch.long)
    print(pre,lable1)
    loss1=f.multilabel_soft_margin_loss(pre,lable1,reduction='sum')
    loss1.backward()
    print('weight.grad.data1:',weight.grad.data)

    # loss2 = loss2(pre, lable2)
    # loss2.backward()
    # print('weight.grad.data2:', weight.grad.data) 
Example #2
Source File: bamnet.py    From BAMnet with Apache License 2.0 5 votes vote down vote up
def set_loss_margin(self, scores, gold_mask, margin):
        """Since the pytorch built-in MultiLabelMarginLoss fixes the margin as 1.
        We simply work around this annoying feature by *modifying* the golden scores.
        E.g., if we want margin as 3, we decrease each golden score by 3 - 1 before
        feeding it to the built-in loss.
        """
        new_scores = scores - (margin - 1) * gold_mask
        return new_scores 
Example #3
Source File: ensemble_nn4.py    From kaggle-human-protein-atlas-image-classification with Apache License 2.0 5 votes vote down vote up
def eval_batch(data_all, logit_all, in_train=False):
        out_list = []
        for batch, logit in zip(grouper(data_all, bs), grouper(logit_all, bs)):
            batch = [b if isinstance(b, torch.Tensor) else torch.from_numpy(b) for b in batch if b is not None]
            logit = [b if isinstance(b, torch.Tensor) else torch.from_numpy(b) for b in logit if b is not None]
            out_batch = net(torch.stack(batch, dim=0).cuda(), torch.stack(logit, dim=0).cuda(), in_train)
            out_list.append(out_batch)
        out = torch.cat(out_list, dim=0)
        return out

    # loss_fn = MultiLabelMarginLoss()
    # loss_fn = FocalLoss()
    # loss_fn = BCELoss()
    # loss_fn = BCEWithLogitsLoss() 
Example #4
Source File: entnet.py    From BAMnet with Apache License 2.0 4 votes vote down vote up
def __init__(self, opt):
        opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()
        if opt['cuda']:
            print('[ Using CUDA ]')
            torch.cuda.set_device(opt['gpu'])
            # It enables benchmark mode in cudnn, which
            # leads to faster runtime when the input sizes do not vary.
            cudnn.benchmark = True

        self.opt = opt
        if self.opt['pre_word2vec']:
            pre_w2v = load_ndarray(self.opt['pre_word2vec'])
        else:
            pre_w2v = None

        self.ent_model = Entnet(opt['vocab_size'], opt['vocab_embed_size'], \
                opt['o_embed_size'], opt['hidden_size'], \
                opt['num_ent_types'], opt['num_relations'], \
                seq_enc_type=opt['seq_enc_type'], \
                word_emb_dropout=opt['word_emb_dropout'], \
                que_enc_dropout=opt['que_enc_dropout'], \
                ent_enc_dropout=opt['ent_enc_dropout'], \
                pre_w2v=pre_w2v, \
                num_hops=opt['num_ent_hops'], \
                att=opt['attention'], \
                use_cuda=opt['cuda'])
        if opt['cuda']:
            self.ent_model.cuda()

        self.loss_fn = MultiLabelMarginLoss()

        optim_params = [p for p in self.ent_model.parameters() if p.requires_grad]
        self.optimizers = {'entnet': optim.Adam(optim_params, lr=opt['learning_rate'])}
        self.scheduler = ReduceLROnPlateau(self.optimizers['entnet'], mode='min', \
                    patience=self.opt['valid_patience'] // 3, verbose=True)

        if opt.get('model_file') and os.path.isfile(opt['model_file']):
            print('Loading existing ent_model parameters from ' + opt['model_file'])
            self.load(opt['model_file'])
        else:
            self.save()
            self.load(opt['model_file'])
        super(EntnetAgent, self).__init__() 
Example #5
Source File: bamnet.py    From BAMnet with Apache License 2.0 4 votes vote down vote up
def __init__(self, opt, ctx_stops, vocab2id):
        self.ctx_stops = ctx_stops
        self.vocab2id = vocab2id
        opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()
        if opt['cuda']:
            print('[ Using CUDA ]')
            torch.cuda.set_device(opt['gpu'])
            # It enables benchmark mode in cudnn, which
            # leads to faster runtime when the input sizes do not vary.
            cudnn.benchmark = True

        self.opt = opt
        if self.opt['pre_word2vec']:
            pre_w2v = load_ndarray(self.opt['pre_word2vec'])
        else:
            pre_w2v = None

        self.model = BAMnet(opt['vocab_size'], opt['vocab_embed_size'], \
                opt['o_embed_size'], opt['hidden_size'], \
                opt['num_ent_types'], opt['num_relations'], \
                opt['num_query_words'], \
                word_emb_dropout=opt['word_emb_dropout'], \
                que_enc_dropout=opt['que_enc_dropout'], \
                ans_enc_dropout=opt['ans_enc_dropout'], \
                pre_w2v=pre_w2v, \
                num_hops=opt['num_hops'], \
                att=opt['attention'], \
                use_cuda=opt['cuda'])
        if opt['cuda']:
            self.model.cuda()

        # MultiLabelMarginLoss
        # For each sample in the mini-batch:
        # loss(x, y) = sum_ij(max(0, 1 - (x[y[j]] - x[i]))) / x.size(0)
        self.loss_fn = MultiLabelMarginLoss()

        optim_params = [p for p in self.model.parameters() if p.requires_grad]
        self.optimizers = {'bamnet': optim.Adam(optim_params, lr=opt['learning_rate'])}
        self.scheduler = ReduceLROnPlateau(self.optimizers['bamnet'], mode='min', \
                    patience=self.opt['valid_patience'] // 3, verbose=True)

        if opt.get('model_file') and os.path.isfile(opt['model_file']):
            print('Loading existing model parameters from ' + opt['model_file'])
            self.load(opt['model_file'])
        super(BAMnetAgent, self).__init__()