Python torch.nn.CosineEmbeddingLoss() Examples

The following are 8 code examples of torch.nn.CosineEmbeddingLoss(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: train.py    From atec-nlp with MIT License 6 votes vote down vote up
def eval(data_iter, model):
    loss_tot, y_list, y_pred_list = 0, [], []
    model.eval()
    for x1, x2, y in data_iter:
        # if args.cuda:
        #     x1, x2, y = Variable(x1).cuda(), Variable(x2).cuda(), Variable(y).cuda()
        # else:
        #     x1, x2, y = Variable(x1), Variable(x2), Variable(y)
        out1, out2, sim = model(x1, x2)
        # loss = F.cross_entropy(output, y, size_average=False)
        criterion = nn.CosineEmbeddingLoss()
        loss = criterion(out1, out2, (2*y-1).float())
        loss_tot += loss.item()  # 0-dim scaler
        y_pred = sim.data >= 0.5
        y_pred_list.append(y_pred)
        y_list.append(y)
    y_pred = torch.cat(y_pred_list, 0)
    y = torch.cat(y_list, 0)
    acc, p, r, f1 = metrics(y, y_pred)
    size = len(data_iter.dataset)
    loss_avg = loss_tot / float(size)
    model.train()
    return loss_avg, acc, p, r, f1 
Example #2
Source File: trainN.py    From DeepLiDAR with MIT License 5 votes vote down vote up
def nomal_loss(pred, targetN,mask1):
    valid_mask = (mask1 > 0.0).detach()
    pred_n = pred.permute(0,2,3,1)
    pred_n = pred_n[valid_mask]
    target_n = targetN[valid_mask]

    pred_n = pred_n.contiguous().view(-1,3)
    pred_n = F.normalize(pred_n)
    target_n = target_n.contiguous().view(-1, 3)

    loss_function = nn.CosineEmbeddingLoss()
    loss = loss_function(pred_n, target_n, Variable(torch.Tensor(pred_n.size(0)).cuda().fill_(1.0)))
    return loss 
Example #3
Source File: trainD.py    From DeepLiDAR with MIT License 5 votes vote down vote up
def nomal_loss(pred, targetN,params,depthI,depthJ):
    depthI = depthI.permute(0, 2, 3, 1)
    depthJ = depthJ.permute(0, 2, 3, 1)

    predN_1 = torch.zeros_like(targetN)
    predN_2 = torch.zeros_like(targetN)

    f = params[:, :, :, 0]
    cx = params[:, :, :, 1]
    cy = params[:, :, :, 2]

    z1 = depthJ - pred
    z1 = torch.squeeze(z1)
    depthJ = torch.squeeze(depthJ)
    predN_1[:, :, :, 0] = ((MatJ - cx) * z1 + depthJ) * 1.0 / f
    predN_1[:, :, :, 1] = (MatI - cy) * z1 * 1.0 / f
    predN_1[:, :, :, 2] = z1

    z2 = depthI - pred
    z2 = torch.squeeze(z2)
    depthI = torch.squeeze(depthI)
    predN_2[:, :, :, 0] = (MatJ - cx) * z2  * 1.0 / f
    predN_2[:, :, :, 1] = ((MatI - cy) * z2 + depthI) * 1.0 / f
    predN_2[:, :, :, 2] = z2

    predN = torch.cross(predN_1, predN_2)
    pred_n = F.normalize(predN)
    pred_n = pred_n.contiguous().view(-1, 3)
    target_n = targetN.contiguous().view(-1, 3)

    loss_function = nn.CosineEmbeddingLoss()
    loss = loss_function(pred_n, target_n, Variable(torch.Tensor(pred_n.size(0)).cuda().fill_(1.0)))
    return loss 
Example #4
Source File: train.py    From DeepLiDAR with MIT License 5 votes vote down vote up
def nomal_loss(pred, targetN,params,depthI,depthJ):
    depthI = depthI.permute(0, 2, 3, 1)
    depthJ = depthJ.permute(0, 2, 3, 1)

    predN_1 = torch.zeros_like(targetN)
    predN_2 = torch.zeros_like(targetN)

    f = params[:, :, :, 0]
    cx = params[:, :, :, 1]
    cy = params[:, :, :, 2]

    z1 = depthJ - pred
    z1 = torch.squeeze(z1)
    depthJ = torch.squeeze(depthJ)
    predN_1[:, :, :, 0] = ((MatJ - cx) * z1 + depthJ) * 1.0 / f
    predN_1[:, :, :, 1] = (MatI - cy) * z1 * 1.0 / f
    predN_1[:, :, :, 2] = z1

    z2 = depthI - pred
    z2 = torch.squeeze(z2)
    depthI = torch.squeeze(depthI)
    predN_2[:, :, :, 0] = (MatJ - cx) * z2  * 1.0 / f
    predN_2[:, :, :, 1] = ((MatI - cy) * z2 + depthI) * 1.0 / f
    predN_2[:, :, :, 2] = z2

    predN = torch.cross(predN_1, predN_2)
    pred_n = F.normalize(predN)
    pred_n = pred_n.contiguous().view(-1, 3)
    target_n = targetN.contiguous().view(-1, 3)

    loss_function = nn.CosineEmbeddingLoss()
    loss = loss_function(pred_n, target_n, Variable(torch.Tensor(pred_n.size(0)).cuda().fill_(1.0)))
    return loss 
Example #5
Source File: emb2emb.py    From DeMa-BWE with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def __init__(self, args, src_dict, tgt_dict, src_embedding, tgt_embedding, device):
        super(E2E, self).__init__(args)

        self.args = args
        self.src_dict = src_dict
        self.tgt_dict = tgt_dict

        # src_flow: assume tgt embeddings are transformed from the src mog space
        self.register_buffer('src_embedding', src_embedding)
        self.register_buffer('tgt_embedding', tgt_embedding)

        if args.init_var:
            # initialize with gaussian variance
            self.register_buffer("s2t_s_var", src_dict.var)
            self.register_buffer("s2t_t_var", tgt_dict.var)
            self.register_buffer("t2s_s_var", src_dict.var)
            self.register_buffer("t2s_t_var", tgt_dict.var)
        else:
            self.s2t_s_var = args.s_var
            self.s2t_t_var = args.s2t_t_var
            self.t2s_t_var = args.t_var
            self.t2s_s_var = args.t2s_s_var

        self.register_buffer('src_freqs', torch.tensor(src_dict.freqs, dtype=torch.float))
        self.register_buffer('tgt_freqs', torch.tensor(tgt_dict.freqs, dtype=torch.float))

        # backward: t2s
        self.src_flow = MogFlow_batch(args, self.t2s_s_var)
        # backward: s2t
        self.tgt_flow = MogFlow_batch(args, self.s2t_t_var)
        
        self.s2t_valid_dico = None
        self.t2s_valid_dico = None

        self.device = device
        # use dict pairs from train data (supervise) or identical words (supervise_id) as supervisions
        self.supervise = args.supervise_id
        if self.supervise:
            self.load_training_dico()
            if args.sup_obj == 'mse':
                self.sup_loss_func = nn.MSELoss()
            elif args.sup_obj == 'cosine':
                self.sup_loss_func = CosineEmbeddingLoss()

        optim_fn, optim_params= get_optimizer(args.flow_opt_params)
        self.flow_optimizer = optim_fn(list(self.src_flow.parameters()) + list(self.tgt_flow.parameters()), **optim_params)
        self.flow_scheduler = torch.optim.lr_scheduler.ExponentialLR(self.flow_optimizer, gamma=args.lr_decay)

        self.best_valid_metric = 1e-12

        self.sup_sw = args.sup_s_weight
        self.sup_tw = args.sup_t_weight

        self.mse_loss = nn.MSELoss()
        self.cos_loss = CosineEmbeddingLoss()

        # Evaluation on trained model
        if args.load_from_pretrain_s2t != "" or args.load_from_pretrain_t2s != "":
            self.load_from_pretrain() 
Example #6
Source File: test.py    From im2recipe-Pytorch with MIT License 4 votes vote down vote up
def main():
   
    model = im2recipe()
    model.visionMLP = torch.nn.DataParallel(model.visionMLP)
    model.to(device)

    # define loss function (criterion) and optimizer
    # cosine similarity between embeddings -> input1, input2, target
    cosine_crit = nn.CosineEmbeddingLoss(0.1).to(device)
    if opts.semantic_reg:
        weights_class = torch.Tensor(opts.numClasses).fill_(1)
        weights_class[0] = 0 # the background class is set to 0, i.e. ignore
        # CrossEntropyLoss combines LogSoftMax and NLLLoss in one single class
        class_crit = nn.CrossEntropyLoss(weight=weights_class).to(device)
        # we will use two different criteria
        criterion = [cosine_crit, class_crit]
    else:
        criterion = cosine_crit

    print("=> loading checkpoint '{}'".format(opts.model_path))
    if device.type=='cpu':
        checkpoint = torch.load(opts.model_path, encoding='latin1', map_location='cpu')
    else:
        checkpoint = torch.load(opts.model_path, encoding='latin1')
    opts.start_epoch = checkpoint['epoch']
    model.load_state_dict(checkpoint['state_dict'])
    print("=> loaded checkpoint '{}' (epoch {})"
          .format(opts.model_path, checkpoint['epoch']))

    # data preparation, loaders
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    
    # preparing test loader 
    test_loader = torch.utils.data.DataLoader(
        ImagerLoader(opts.img_path,
 	    transforms.Compose([
            transforms.Scale(256), # rescale the image keeping the original aspect ratio
            transforms.CenterCrop(224), # we get only the center of that rescaled
            transforms.ToTensor(),
            normalize,
        ]),data_path=opts.data_path,sem_reg=opts.semantic_reg,partition='test'),
        batch_size=opts.batch_size, shuffle=False,
        num_workers=opts.workers, pin_memory=True)
    print('Test loader prepared.')

    # run test
    test(test_loader, model, criterion) 
Example #7
Source File: test.py    From Recipe2ImageGAN with MIT License 4 votes vote down vote up
def main():
   
    model = im2recipe()
    #model.visionMLP = torch.nn.DataParallel(model.visionMLP, device_ids=[0,1,2,3])
    # barelo:
    model.visionMLP = torch.nn.DataParallel(model.visionMLP, device_ids=[0])
    # model.visionMLP = torch.nn.DataParallel(model.visionMLP, device_ids=[0,1])
    if not opts.no_cuda:
        model.cuda()

    # define loss function (criterion) and optimizer
    # cosine similarity between embeddings -> input1, input2, target
    cosine_crit = nn.CosineEmbeddingLoss(0.1)
    if not opts.no_cuda:
        cosine_crit.cuda()
    # cosine_crit = nn.CosineEmbeddingLoss(0.1)
    if opts.semantic_reg:
        weights_class = torch.Tensor(opts.numClasses).fill_(1)
        weights_class[0] = 0 # the background class is set to 0, i.e. ignore
        # CrossEntropyLoss combines LogSoftMax and NLLLoss in one single class
        class_crit = nn.CrossEntropyLoss(weight=weights_class)
        if not opts.no_cuda:
            class_crit.cuda()
        # class_crit = nn.CrossEntropyLoss(weight=weights_class)
        # we will use two different criteria
        criterion = [cosine_crit, class_crit]
    else:
        criterion = cosine_crit

    print("=> loading checkpoint '{}'".format(opts.model_path))
    checkpoint = torch.load(opts.model_path)
    opts.start_epoch = checkpoint['epoch']
    model.load_state_dict(checkpoint['state_dict'])
    print("=> loaded checkpoint '{}' (epoch {})"
          .format(opts.model_path, checkpoint['epoch']))

    # data preparation, loaders
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    
    # preparing test loader 
    test_loader = torch.utils.data.DataLoader(
        ImagerLoader(opts.img_path,
 	    transforms.Compose([
            transforms.Scale(256), # rescale the image keeping the original aspect ratio
            transforms.CenterCrop(224), # we get only the center of that rescaled
            transforms.ToTensor(),
            normalize,
        ]),data_path=opts.data_path,sem_reg=opts.semantic_reg,partition='test'),
        batch_size=opts.batch_size, shuffle=False,
        num_workers=opts.workers, pin_memory=(not opts.no_cuda))
    print 'Test loader prepared.'

    # run test
    test(test_loader, model, criterion) 
Example #8
Source File: train_lstm.py    From embeddings with Apache License 2.0 4 votes vote down vote up
def train(x, y, wv, model, epochs, abstracts_file):
    """
    This method takes the inputs, and the labels
    and trains the LSTM network to predict the
    embeddings based on the input sequnce.

    Arguments
    ---------
    x : List of input descriptions
    y : List of target entity embeddings
    wv : Keyed Word Vectors for pre-trained embeddings
    """
    # epochs = 100
    # embed_size = 100
    # hidden_size = 50
    # seq_len = 1
    # num_layers = 2
    inputs = x
    labels = y
    itr = len(inputs)
    progress = 0.0
    criterion = nn.CosineEmbeddingLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.2)
    count = 0
    itr_p = 0.0
    loss = 0
    losses = np.zeros(epochs)
    logging.info('training on {0} samples'.format(itr))
    for epoch in range(epochs):
        logging.info(
            'starting epoch {0}'.format(epoch + 1))
        flags = Variable(torch.ones(1))
        count = 0
        hidden = model.init_hidden()
        for i, l in zip(inputs, labels):
            count += 1
            progress = (count / itr) * 100
            print('INFO : PROGRESS : {0:.1f} %'.format(progress), end='\r')
            for word in i:
                output, hidden = model(
                    Variable(torch.tensor([[word]])),
                    hidden)
            optimizer.zero_grad()
            loss = criterion(output[0],
                             Variable(torch.tensor([l])),
                             flags)
            loss.backward(retain_graph=True)
            optimizer.step()
        logging.info(
            'completed epoch {0}, loss : {1}'.format(epoch + 1, loss.item()))
        losses[epoch] = loss.item()
    logging.info('saving the model to model/description_encoder')
    torch.save(model, 'model/description_encoder')
    validate(model, wv, abstracts_file)
    plt.plot(losses)
    plt.title('Model Loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.show()