Python torch.nn.functional.mse_loss() Examples

The following are 30 code examples of torch.nn.functional.mse_loss(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: train_eval.py    From IGMC with MIT License 7 votes vote down vote up
def train(model, optimizer, loader, device, regression=False, ARR=0):
    model.train()
    total_loss = 0
    for data in loader:
        optimizer.zero_grad()
        data = data.to(device)
        out = model(data)
        if regression:
            loss = F.mse_loss(out, data.y.view(-1))
        else:
            loss = F.nll_loss(out, data.y.view(-1))
        if ARR != 0:
            for gconv in model.convs:
                w = torch.matmul(
                    gconv.att, 
                    gconv.basis.view(gconv.num_bases, -1)
                ).view(gconv.num_relations, gconv.in_channels, gconv.out_channels)
                reg_loss = torch.sum((w[1:, :, :] - w[:-1, :, :])**2)
                loss += ARR * reg_loss
        loss.backward()
        total_loss += loss.item() * num_graphs(data)
        optimizer.step()
        torch.cuda.empty_cache()
    return total_loss / len(loader.dataset) 
Example #2
Source File: mse_loss.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def forward(self, pred, target, weight=None, avg_factor=None):
        """Forward function of loss.

        Args:
            pred (torch.Tensor): The prediction.
            target (torch.Tensor): The learning target of the prediction.
            weight (torch.Tensor, optional): Weight of the loss for each
                prediction. Defaults to None.
            avg_factor (int, optional): Average factor that is used to average
                the loss. Defaults to None.

        Returns:
            torch.Tensor: The calculated loss
        """
        loss = self.loss_weight * mse_loss(
            pred,
            target,
            weight,
            reduction=self.reduction,
            avg_factor=avg_factor)
        return loss 
Example #3
Source File: eTempensv2.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 6 votes vote down vote up
def __init__(self, model, optimizer, device, config):
        print('Tempens-v2 with epoch pseudo labels')
        self.model     = model
        self.optimizer = optimizer
        self.ce_loss   = torch.nn.CrossEntropyLoss(ignore_index=NO_LABEL)
        self.mse_loss  = mse_with_softmax # F.mse_loss 
        self.save_dir  = '{}-{}_{}-{}_{}'.format(config.arch, config.model,
                          config.dataset, config.num_labels,
                          datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"))
        self.save_dir  = os.path.join(config.save_dir, self.save_dir)
        self.device      = device
        self.usp_weight  = config.usp_weight
        self.ema_decay   = config.ema_decay
        self.rampup      = exp_rampup(config.rampup_length)
        self.save_freq   = config.save_freq
        self.print_freq  = config.print_freq
        self.epoch       = 0
        self.start_epoch = 0 
Example #4
Source File: PIv2.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 6 votes vote down vote up
def __init__(self, model, optimizer, device, config):
        print('PI-v2')
        self.model     = model
        self.optimizer = optimizer
        self.ce_loss   = torch.nn.CrossEntropyLoss(ignore_index=NO_LABEL)
        self.cons_loss = mse_with_softmax #F.mse_loss
        self.save_dir  = '{}-{}_{}-{}_{}'.format(config.arch, config.model,
                          config.dataset, config.num_labels,
                          datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"))
        self.save_dir  = os.path.join(config.save_dir, self.save_dir)
        self.usp_weight  = config.usp_weight
        self.rampup      = exp_rampup(config.weight_rampup)
        self.save_freq   = config.save_freq
        self.print_freq  = config.print_freq
        self.device      = device
        self.epoch       = 0 
Example #5
Source File: MeanTeacherv1.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 6 votes vote down vote up
def __init__(self, model, ema_model, optimizer, device, config):
        print("MeanTeacher-v1")
        self.model      = model
        self.ema_model  = ema_model
        self.optimizer  = optimizer
        self.ce_loss    = torch.nn.CrossEntropyLoss(ignore_index=NO_LABEL)
        self.cons_loss  = mse_with_softmax #F.mse_loss
        self.save_dir  = '{}-{}_{}-{}_{}'.format(config.arch, config.model,
                          config.dataset, config.num_labels,
                          datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"))
        self.save_dir  = os.path.join(config.save_dir, self.save_dir)
        self.usp_weight  = config.usp_weight
        self.ema_decay   = config.ema_decay
        self.rampup      = exp_rampup(config.weight_rampup)
        self.save_freq   = config.save_freq
        self.print_freq  = config.print_freq
        self.device      = device
        self.global_step = 0
        self.epoch       = 0 
Example #6
Source File: task.py    From metal with Apache License 2.0 6 votes vote down vote up
def __init__(
        self,
        name,
        input_module=IdentityModule(),
        middle_module=IdentityModule(),
        head_module=IdentityModule(),
        output_hat_func=(lambda X: X["data"]),
        # Note: no sigmoid (target labels can be in any range)
        loss_hat_func=(lambda X, Y: F.mse_loss(X["data"].view(-1), Y.view(-1))),
        loss_multiplier=1.0,
        scorer=Scorer(standard_metrics=[]),
    ) -> None:

        super(RegressionTask, self).__init__(
            name,
            input_module,
            middle_module,
            head_module,
            output_hat_func,
            loss_hat_func,
            loss_multiplier,
            scorer,
        ) 
Example #7
Source File: eTempensv1.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 6 votes vote down vote up
def __init__(self, model, optimizer, device, config):
        print('Tempens-v1 with epoch pseudo labels')
        self.model     = model
        self.optimizer = optimizer
        self.ce_loss   = torch.nn.CrossEntropyLoss(ignore_index=NO_LABEL)
        self.mse_loss  = mse_with_softmax # F.mse_loss 
        self.save_dir  = '{}-{}_{}-{}_{}'.format(config.arch, config.model,
                          config.dataset, config.num_labels,
                          datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"))
        self.save_dir  = os.path.join(config.save_dir, self.save_dir)
        self.device      = device
        self.usp_weight  = config.usp_weight
        self.ema_decay   = config.ema_decay
        self.rampup      = exp_rampup(config.rampup_length)
        self.save_freq   = config.save_freq
        self.print_freq  = config.print_freq
        self.epoch       = 0
        self.start_epoch = 0 
Example #8
Source File: nscupa.py    From Hierarchical-Sentiment with MIT License 6 votes vote down vote up
def test(epoch,net,dataset,cuda,msg="Evaluating"):
    net.eval()
    epoch_loss = 0
    ok_all = 0
    pred = 0
    skipped = 0
    mean_mse = 0
    mean_rmse = 0
    data_tensors = new_tensors(6,cuda,types={0:torch.LongTensor,1:torch.LongTensor,2:torch.LongTensor,3:torch.LongTensor,4:torch.LongTensor,5:torch.LongTensor}) #data-tensors
    
    with tqdm(total=len(dataset),desc=msg) as pbar:
        for iteration, (batch_t,r_t,u_t,i_t,sent_order,ui_indexs,ls,lr,review) in enumerate(dataset):
            data = tuple2var(data_tensors,(batch_t,r_t,u_t,i_t,sent_order,ui_indexs))
            out  = net(data[0],data[2],data[3],data[4],data[5],ls,lr)
            ok,per,val_i = accuracy(out,data[1])
            mseloss = F.mse_loss(val_i,data[1].float())
            mean_rmse += math.sqrt(mseloss.data[0])
            mean_mse += mseloss.data[0]
            ok_all += per.data[0]
            pred+=1
            pbar.update(1)
            pbar.set_postfix({"acc":ok_all/pred, "skipped":skipped,"mseloss":mean_mse/(iteration+1),"rmseloss":mean_rmse/(iteration+1)})

    print("===> {} Complete:  {}% accuracy".format(msg,ok_all/pred)) 
Example #9
Source File: mlp_dropout.py    From pytorch_DGCNN with MIT License 6 votes vote down vote up
def forward(self, x, y = None):
        h1 = self.h1_weights(x)
        h1 = F.relu(h1)

        if self.with_dropout:
            h1 = F.dropout(h1, training=self.training)
        pred = self.h2_weights(h1)[:, 0]

        if y is not None:
            y = Variable(y)
            mse = F.mse_loss(pred, y)
            mae = F.l1_loss(pred, y)
            mae = mae.cpu().detach()
            return pred, mae, mse
        else:
            return pred 
Example #10
Source File: MeanTeacherv2.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 6 votes vote down vote up
def __init__(self, model, ema_model, optimizer, device, config):
        print("MeanTeacher-v2")
        self.model      = model
        self.ema_model  = ema_model
        self.optimizer  = optimizer
        self.ce_loss    = torch.nn.CrossEntropyLoss(ignore_index=NO_LABEL)
        self.cons_loss  = mse_with_softmax #F.mse_loss
        self.save_dir  = '{}-{}_{}-{}_{}'.format(config.arch, config.model,
                          config.dataset, config.num_labels,
                          datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"))
        self.save_dir  = os.path.join(config.save_dir, self.save_dir)
        self.usp_weight  = config.usp_weight
        self.ema_decay   = config.ema_decay
        self.rampup      = exp_rampup(config.weight_rampup)
        self.save_freq   = config.save_freq
        self.print_freq  = config.print_freq
        self.device      = device
        self.global_step = 0
        self.epoch       = 0 
Example #11
Source File: iTempensv2.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 6 votes vote down vote up
def __init__(self, model, optimizer, device, config):
        print('Tempens-v2 with iteration pseudo labels')
        self.model     = model
        self.optimizer = optimizer
        self.ce_loss   = torch.nn.CrossEntropyLoss(ignore_index=NO_LABEL)
        self.mse_loss  = mse_with_softmax # F.mse_loss 
        self.save_dir  = '{}_{}-{}_{}'.format(config.arch, config.dataset,
                          config.num_labels,
                          datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"))
        self.save_dir  = os.path.join(config.save_dir, self.save_dir)
        self.save_freq   = config.save_freq
        self.print_freq  = config.print_freq
        self.device      = device
        self.epoch       = 0
        self.start_epoch = 0
        self.usp_weight  = config.usp_weight
        self.ema_decay   = config.ema_decay
        self.rampup      = exp_rampup(config.rampup_length) 
Example #12
Source File: experiments.py    From pytorch-deep-sets with MIT License 6 votes vote down vote up
def train_1_item(self, item_number: int) -> float:
        x, target = self.train_db.__getitem__(item_number)
        if torch.cuda.is_available():
            x, target = x.cuda(), target.cuda()

        x, target = Variable(x), Variable(target)

        self.optimizer.zero_grad()
        pred = self.model.forward(x)
        the_loss = F.mse_loss(pred, target)

        the_loss.backward()
        self.optimizer.step()

        the_loss_tensor = the_loss.data
        if torch.cuda.is_available():
            the_loss_tensor = the_loss_tensor.cpu()

        the_loss_numpy = the_loss_tensor.numpy().flatten()
        the_loss_float = float(the_loss_numpy[0])

        return the_loss_float 
Example #13
Source File: PIv1.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 6 votes vote down vote up
def __init__(self, model, optimizer, device, config):
        print('PI-v1')
        self.model     = model
        self.optimizer = optimizer
        self.ce_loss   = torch.nn.CrossEntropyLoss(ignore_index=NO_LABEL)
        self.cons_loss = mse_with_softmax #F.mse_loss
        self.save_dir  = '{}-{}_{}-{}_{}'.format(config.arch, config.model,
                          config.dataset, config.num_labels,
                          datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"))
        self.save_dir  = os.path.join(config.save_dir, self.save_dir)
        self.usp_weight  = config.usp_weight
        self.rampup      = exp_rampup(config.weight_rampup)
        self.save_freq   = config.save_freq
        self.print_freq  = config.print_freq
        self.device      = device
        self.epoch       = 0 
Example #14
Source File: sac_ae.py    From pytorch_sac_ae with MIT License 6 votes vote down vote up
def update_decoder(self, obs, target_obs, L, step):
        h = self.critic.encoder(obs)

        if target_obs.dim() == 4:
            # preprocess images to be in [-0.5, 0.5] range
            target_obs = utils.preprocess_obs(target_obs)
        rec_obs = self.decoder(h)
        rec_loss = F.mse_loss(target_obs, rec_obs)

        # add L2 penalty on latent representation
        # see https://arxiv.org/pdf/1903.12436.pdf
        latent_loss = (0.5 * h.pow(2).sum(1)).mean()

        loss = rec_loss + self.decoder_latent_lambda * latent_loss
        self.encoder_optimizer.zero_grad()
        self.decoder_optimizer.zero_grad()
        loss.backward()

        self.encoder_optimizer.step()
        self.decoder_optimizer.step()
        L.log('train_ae/ae_loss', loss, step)

        self.decoder.log(L, step, log_freq=LOG_FREQ) 
Example #15
Source File: sac_ae.py    From pytorch_sac_ae with MIT License 6 votes vote down vote up
def update_critic(self, obs, action, reward, next_obs, not_done, L, step):
        with torch.no_grad():
            _, policy_action, log_pi, _ = self.actor(next_obs)
            target_Q1, target_Q2 = self.critic_target(next_obs, policy_action)
            target_V = torch.min(target_Q1,
                                 target_Q2) - self.alpha.detach() * log_pi
            target_Q = reward + (not_done * self.discount * target_V)

        # get current Q estimates
        current_Q1, current_Q2 = self.critic(obs, action)
        critic_loss = F.mse_loss(current_Q1,
                                 target_Q) + F.mse_loss(current_Q2, target_Q)
        L.log('train_critic/loss', critic_loss, step)


        # Optimize the critic
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()

        self.critic.log(L, step) 
Example #16
Source File: train_eval.py    From IGMC with MIT License 6 votes vote down vote up
def eval_loss(model, loader, device, regression=False, show_progress=False):
    model.eval()
    loss = 0
    if show_progress:
        print('Testing begins...')
        pbar = tqdm(loader)
    else:
        pbar = loader
    for data in pbar:
        data = data.to(device)
        with torch.no_grad():
            out = model(data)
        if regression:
            loss += F.mse_loss(out, data.y.view(-1), reduction='sum').item()
        else:
            loss += F.nll_loss(out, data.y.view(-1), reduction='sum').item()
        torch.cuda.empty_cache()
    return loss / len(loader.dataset) 
Example #17
Source File: rl_s2v.py    From DeepRobust with MIT License 5 votes vote down vote up
def train(self, num_steps=100000, lr=0.001):
        """Train RL agent.
        """

        pbar = tqdm(range(self.burn_in), unit='batch')

        for p in pbar:
            self.run_simulation()

        pbar = tqdm(range(num_steps), unit='steps')
        optimizer = optim.Adam(self.net.parameters(), lr=lr)

        for self.step in pbar:

            self.run_simulation()

            if self.step % 123 == 0:
                # update the params of old_net
                self.take_snapshot()
            if self.step % 500 == 0:
                self.eval()

            cur_time, list_st, list_at, list_rt, list_s_primes, list_term = self.mem_pool.sample(batch_size=self.batch_size)
            list_target = torch.Tensor(list_rt).to(self.device)

            if not list_term[0]:
                target_nodes, _, picked_nodes = zip(*list_s_primes)
                _, q_t_plus_1 = self.old_net(cur_time + 1, list_s_primes, None)
                _, q_rhs = node_greedy_actions(target_nodes, picked_nodes, q_t_plus_1, self.old_net)
                list_target += q_rhs

            # list_target = Variable(list_target.view(-1, 1))
            list_target = list_target.view(-1, 1)
            _, q_sa = self.net(cur_time, list_st, list_at)
            q_sa = torch.cat(q_sa, dim=0)
            loss = F.mse_loss(q_sa, list_target)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            pbar.set_description('eps: %.5f, loss: %0.5f, q_val: %.5f' % (self.eps, loss, torch.mean(q_sa)) )
            # print('eps: %.5f, loss: %0.5f, q_val: %.5f' % (self.eps, loss, torch.mean(q_sa)) ) 
Example #18
Source File: clustering.py    From torchsupport with MIT License 5 votes vote down vote up
def ae_loss(self, predictions, target):
    loss = func.mse_loss(predictions, target)

    self.writer.add_scalar("reconstruction loss", float(loss), self.step_id)

    return loss 
Example #19
Source File: awr.py    From torchsupport with MIT License 5 votes vote down vote up
def auxiliary_loss(self, value, returns):
    return func.mse_loss(value.view(-1), returns.view(-1)) 
Example #20
Source File: clustering.py    From torchsupport with MIT License 5 votes vote down vote up
def vae_loss(self, mean, logvar, reconstruction, target, beta=20, c=0.5):
    mse = func.mse_loss(reconstruction, target)
    kld = -0.5 * torch.mean(1 + logvar - mean.pow(2) - logvar.exp())
    return mse + beta * torch.norm(kld - c, 1) 
Example #21
Source File: clustering.py    From torchsupport with MIT License 5 votes vote down vote up
def vae_loss(self, mean, logvar, reconstruction, target, beta=20, c=0.5):
    mse = func.mse_loss(reconstruction, target)
    kld = -0.5 * torch.mean(1 + logvar - mean.pow(2) - logvar.exp())
    
    self.writer.add_scalar("mse loss", float(mse), self.step_id)
    self.writer.add_scalar("kld loss", float(kld), self.step_id)
    self.writer.add_scalar("kld-c loss", float(torch.norm(kld - c, 2)), self.step_id)

    return mse + beta * torch.norm(kld - c, 2) 
Example #22
Source File: losses.py    From RecNet with MIT License 5 votes vote down vote up
def local_reconstruction_loss(x, x_recon):
    l2_loss = F.mse_loss(x, x_recon)
    return l2_loss 
Example #23
Source File: eTempensv1.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 5 votes vote down vote up
def train_iteration(self, data_loader, print_freq):
        loop_info = defaultdict(list)
        label_n, unlab_n = 0, 0
        for batch_idx, (data, targets, idxs) in enumerate(data_loader):
            data, targets = data.to(self.device), targets.to(self.device)
            ##=== decode targets ===
            lmask, umask = self.decode_targets(targets)
            lbs, ubs = lmask.float().sum().item(), umask.float().sum().item()

            ##=== forward ===
            outputs = self.model(data)
            loss = self.ce_loss(outputs[lmask], targets[lmask])
            loop_info['lloss'].append(loss.item())

            ##=== Semi-supervised Training Phase ===
            iter_unlab_pslab = self.epoch_pslab[idxs]
            tmp_loss  = self.mse_loss(outputs, iter_unlab_pslab)
            tmp_loss *= self.rampup(self.epoch)*self.usp_weight
            loss  += tmp_loss; loop_info['aTmp'].append(tmp_loss.item())
            ## update pseudo labels
            with torch.no_grad():
                self.epoch_pslab[idxs] = outputs.clone().detach()

            ## bachward 
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            ##=== log info ===
            label_n, unlab_n = label_n+lbs, unlab_n+ubs
            lacc = targets[lmask].eq(outputs[lmask].max(1)[1]).float().sum().item()
            uacc = targets[umask].eq(outputs[umask].max(1)[1]).float().sum().item()
            loop_info['lacc'].append(lacc)
            loop_info['uacc'].append(uacc)
            if print_freq>0 and (batch_idx%print_freq)==0:
                print(f"[train][{batch_idx:<3}]", self.gen_info(loop_info, lbs, ubs))
        # temporal ensemble
        self.update_ema_predictions() # update every epoch
        print(">>>[train]", self.gen_info(loop_info, label_n, unlab_n, False))
        return loop_info, label_n 
Example #24
Source File: bdpi.py    From torchsupport with MIT License 5 votes vote down vote up
def auxiliary_loss(self, value, target):
    return func.mse_loss(value, target) 
Example #25
Source File: mixup.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 5 votes vote down vote up
def mixup_mse_loss_with_softmax(preds, targets_a, targets_b, lam):
    """ mixed categorical mse loss
    """
    mixup_loss_a = F.mse_loss(F.softmax(preds,1), F.softmax(targets_a,1))
    mixup_loss_b = F.mse_loss(F.softmax(preds,1), F.softmax(targets_b,1))

    mixup_loss = lam* mixup_loss_a + (1- lam)* mixup_loss_b
    return mixup_loss 
Example #26
Source File: test_sac.py    From garage with MIT License 5 votes vote down vote up
def testCriticLoss():
    """Test Sac Critic/QF loss."""
    # pylint: disable=no-member
    policy = DummyActorPolicy()
    sac = SAC(env_spec=None,
              policy=policy,
              qf1=DummyCriticNet(),
              qf2=DummyCriticNet(),
              replay_buffer=None,
              gradient_steps_per_itr=1,
              discount=0.9,
              buffer_batch_size=2,
              target_entropy=3.0,
              max_path_length=10,
              optimizer=MagicMock)

    observations = torch.FloatTensor([[1, 2], [3, 4]])
    actions = torch.FloatTensor([[5], [6]])
    rewards = torch.FloatTensor([10, 20])
    terminals = torch.Tensor([[0.], [0.]])
    next_observations = torch.FloatTensor([[5, 6], [7, 8]])
    samples_data = {
        'observation': observations,
        'action': actions,
        'reward': rewards,
        'terminal': terminals,
        'next_observation': next_observations
    }
    td_targets = [7.3, 19.1]
    pred_td_targets = [7., 10.]

    # Expected critic loss has factor of 2, for the two TD3 critics.
    expected_loss = 2 * F.mse_loss(torch.Tensor(td_targets),
                                   torch.Tensor(pred_td_targets))
    loss = sac._critic_objective(samples_data)
    assert np.all(np.isclose(np.sum(loss), expected_loss)) 
Example #27
Source File: solver.py    From Beta-VAE with MIT License 5 votes vote down vote up
def reconstruction_loss(x, x_recon, distribution):
    batch_size = x.size(0)
    assert batch_size != 0

    if distribution == 'bernoulli':
        recon_loss = F.binary_cross_entropy_with_logits(x_recon, x, size_average=False).div(batch_size)
    elif distribution == 'gaussian':
        x_recon = F.sigmoid(x_recon)
        recon_loss = F.mse_loss(x_recon, x, size_average=False).div(batch_size)
    else:
        recon_loss = None

    return recon_loss 
Example #28
Source File: mse_loss.py    From GCNet with Apache License 2.0 5 votes vote down vote up
def forward(self, pred, target, weight=None, avg_factor=None):
        loss = self.loss_weight * mse_loss(
            pred,
            target,
            weight,
            reduction=self.reduction,
            avg_factor=avg_factor)
        return loss 
Example #29
Source File: models.py    From SteganoGAN with MIT License 5 votes vote down vote up
def _coding_scores(self, cover, generated, payload, decoded):
        encoder_mse = mse_loss(generated, cover)
        decoder_loss = binary_cross_entropy_with_logits(decoded, payload)
        decoder_acc = (decoded >= 0.0).eq(payload >= 0.5).sum().float() / payload.numel()

        return encoder_mse, decoder_loss, decoder_acc 
Example #30
Source File: loss.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 5 votes vote down vote up
def mse_with_softmax(logit1, logit2):
    assert logit1.size()==logit2.size()
    return F.mse_loss(F.softmax(logit1,1), F.softmax(logit2,1))