Python torch.nn.MultiMarginLoss() Examples

The following are 8 code examples of torch.nn.MultiMarginLoss(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: test_gnn.py    From coling2018-graph-neural-networks-question-answering with Apache License 2.0 6 votes vote down vote up
def test_load_parameters():
    encoder = ConvWordsEncoder(*wordembeddings.shape)
    encoder.load_word_embeddings_from_numpy(wordembeddings)
    net = GNNModel(encoder, hp_dropout=0.2)
    criterion = nn.MultiMarginLoss(margin=0.5)

    container = fackel.TorchContainer(
        torch_model=net,
        batch_size=8,
        max_epochs=5,
        model_checkpoint=False,
        save_to_dir="../trainedmodels/",
        early_stopping=5,
        criterion=criterion,
        init_model_weights=True,
        lr_decay=2
    )
    container.save_model()
    container.reload_from_saved()
    assert container._model._gnn._prop_model._dropout.p == 0.2 
Example #2
Source File: test_gnn.py    From coling2018-graph-neural-networks-question-answering with Apache License 2.0 6 votes vote down vote up
def test_ggnn():
    encoder = ConvWordsEncoder(*wordembeddings.shape)
    encoder.load_word_embeddings_from_numpy(wordembeddings)
    net = GNNModel(encoder)
    criterion = nn.MultiMarginLoss(margin=0.5)

    container = fackel.TorchContainer(
        torch_model=net,
        batch_size=8,
        max_epochs=5,
        model_checkpoint=False,
        early_stopping=5,
        criterion=criterion,
        init_model_weights=True,
        lr_decay=2
    )

    train_questions = V.encode_batch_questions(training_dataset, word2idx)[..., 0, :]
    train_graphs = V.encode_batch_graph_structure(training_dataset, word2idx)
    targets = np.zeros(len(training_dataset), dtype=np.int32)

    container.train(train=(train_questions, *train_graphs), train_targets=targets) 
Example #3
Source File: test_gnn.py    From coling2018-graph-neural-networks-question-answering with Apache License 2.0 6 votes vote down vote up
def test_gnn():
    encoder = ConvWordsEncoder(*wordembeddings.shape)
    encoder.load_word_embeddings_from_numpy(wordembeddings)
    net = GNNModel(encoder, hp_gated=False)
    criterion = nn.MultiMarginLoss(margin=0.5)

    container = fackel.TorchContainer(
        torch_model=net,
        batch_size=8,
        max_epochs=5,
        model_checkpoint=False,
        early_stopping=5,
        criterion=criterion,
        init_model_weights=True,
        lr_decay=2
    )

    train_questions = V.encode_batch_questions(training_dataset, word2idx)[..., 0, :]
    train_graphs = V.encode_batch_graph_structure(training_dataset, word2idx)
    targets = np.zeros(len(training_dataset), dtype=np.int32)

    container.train(train=(train_questions, *train_graphs), train_targets=targets) 
Example #4
Source File: test_models.py    From coling2018-graph-neural-networks-question-answering with Apache License 2.0 6 votes vote down vote up
def test_pool_edges_model():
    encoder = ConvWordsEncoder(*wordembeddings.shape)
    encoder.load_word_embeddings_from_numpy(wordembeddings)
    net = PooledEdgesModel(encoder)
    criterion = nn.MultiMarginLoss()

    container = fackel.TorchContainer(
        torch_model=net,
        batch_size=8,
        max_epochs=5,
        model_checkpoint=False,
        early_stopping=5,
        criterion=criterion
    )

    selected_questions = [s for s in training_dataset if any(scores[2] > 0.0 for g, scores in s.graphs)]
    targets = np.zeros((len(selected_questions)), dtype=np.int32)
    for qi, q in enumerate(selected_questions):
        random.shuffle(q.graphs)
        targets[qi] = np.argsort([g.scores[2] for g in q.graphs])[::-1][0]

    train_questions = V.encode_batch_questions(selected_questions, word2idx)[..., 0, :]
    train_edges = V.encode_batch_graphs(selected_questions, word2idx)[..., 0, :]

    container.train(train=(train_questions, train_edges), train_targets=targets) 
Example #5
Source File: linear.py    From macarico with MIT License 5 votes vote down vote up
def set_loss(self, loss_fn):
        assert loss_fn in ['multinomial', 'hinge', 'squared', 'huber']
        if loss_fn == 'hinge':
            l = nn.MultiMarginLoss(size_average=False)
            self.loss_fn = lambda p, t, _: l(p, Varng(torch.LongTensor([t])))
        elif loss_fn == 'multinomial':
            l = nn.NLLLoss(size_average=False)
            self.loss_fn = lambda p, t, _: l(F.log_softmax(p.unsqueeze(0), dim=1), Varng(torch.LongTensor([t])))
        elif loss_fn in ['squared', 'huber']:
            l = (nn.MSELoss if loss_fn == 'squared' else nn.SmoothL1Loss)(size_average=False)
            self.loss_fn = lambda p, t, sa: self._compute_loss(l, p, 1 - truth_to_vec(t, torch.zeros(self.n_actions)), sa) 
Example #6
Source File: trainer.py    From diora with Apache License 2.0 5 votes vote down vote up
def forward(self, sentences, neg_samples, diora, info):
        batch_size, length = sentences.shape
        input_size = self.embeddings.weight.shape[1]
        size = diora.outside_h.shape[-1]
        k = self.k_neg

        emb_pos = self.embeddings(sentences)
        emb_neg = self.embeddings(neg_samples)

        # Calculate scores.

        ## The predicted vector.
        cell = diora.outside_h[:, :length].view(batch_size, length, 1, -1)

        ## The projected samples.
        proj_pos = torch.matmul(emb_pos, torch.t(self.mat))
        proj_neg = torch.matmul(emb_neg, torch.t(self.mat))

        ## The score.
        xp = torch.einsum('abc,abxc->abx', proj_pos, cell)
        xn = torch.einsum('ec,abxc->abe', proj_neg, cell)
        score = torch.cat([xp, xn], 2)

        # Calculate loss.
        lossfn = nn.MultiMarginLoss(margin=self.margin)
        inputs = score.view(batch_size * length, k + 1)
        device = torch.cuda.current_device() if self._cuda else None
        outputs = torch.full((inputs.shape[0],), 0, dtype=torch.int64, device=device)

        self.loss_hook(sentences, neg_samples, inputs)

        loss = lossfn(inputs, outputs)

        ret = dict(reconstruction_loss=loss)

        return loss, ret 
Example #7
Source File: pytorchmodel.py    From starsem2018-entity-linking with Apache License 2.0 5 votes vote down vote up
def __init__(self, weight):
        super(CustomCombinedLoss, self).__init__()
        self._weight = weight
        self._criterion_choice = nn.MultiMarginLoss(size_average=False, margin=0.5) 
Example #8
Source File: test_models.py    From coling2018-graph-neural-networks-question-answering with Apache License 2.0 4 votes vote down vote up
def test_metrics():
    encoder = ConvWordsEncoder(*wordembeddings.shape)
    encoder.load_word_embeddings_from_numpy(wordembeddings)
    net = PooledEdgesModel(encoder)
    criterion = nn.MultiMarginLoss()

    def metrics(targets, predictions, validation=False):
        _, predicted_targets = torch.topk(predictions, 1, dim=-1)
        # _, targets = torch.topk(targets, 1, dim=-1)
        predicted_targets = predicted_targets.squeeze(1)
        cur_acc = torch.sum(predicted_targets == targets).float()
        cur_acc /= predicted_targets.size(0)
        cur_f1 = 0.0

        if validation:
            for i, q in enumerate(training_dataset):
                if i < predicted_targets.size(0):
                    idx = predicted_targets.data[i]
                    if idx < len(q.graphs):
                        cur_f1 += q.graphs[idx].scores[2]
            cur_f1 /= targets.size(0)
        return {'acc': cur_acc.data[0], 'f1': cur_f1}

    container = fackel.TorchContainer(
        torch_model=net,
        batch_size=8,
        max_epochs=5,
        model_checkpoint=False,
        early_stopping=5,
        criterion=criterion,
        metrics=metrics
    )

    selected_questions = [s for s in training_dataset if any(scores[2] > 0.0 for g, scores in s.graphs)]
    targets = np.zeros((len(selected_questions)), dtype=np.int32)
    for qi, q in enumerate(selected_questions):
        random.shuffle(q.graphs)
        targets[qi] = np.argsort([g.scores[2] for g in q.graphs])[::-1][0]

    train_questions = V.encode_batch_questions(selected_questions, word2idx)[..., 0, :]
    train_edges = V.encode_batch_graphs(selected_questions, word2idx)[..., 0, :]

    container.train(train=(train_questions, train_edges), train_targets=targets,
                    dev=(train_questions, train_edges), dev_targets=targets)