Python torch.isnan() Examples

The following are 30 code examples of torch.isnan(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: metrics.py    From vedaseg with Apache License 2.0 6 votes vote down vote up
def dice_score(pred, gt, thres_range=np.arange(0.1, 1.0, 0.1)):
    """dice_score
        
        Args:
            pred, n*c*h*w, torch.Tensor
            gt, n*c*h*w, torch.Tensor

        Return:
            dice, nthres * nclasses
    """
    gt = gt.float()

    dices = []
    for thres in thres_range:
        tpred = (pred > thres).float()
        nu = 2 * (tpred * gt).sum(dim=[2, 3])
        de = tpred.sum(dim=[2, 3]) + gt.sum(dim=[2, 3])
        dice = nu / de
        dice[torch.isnan(dice)] = 1
        dices.append(dice.sum(0))
    return torch.stack(dices, 0) 
Example #2
Source File: discrete_test.py    From nsf with MIT License 6 votes vote down vote up
def test_sample_and_log_prob_with_context(self):
        num_samples = 10
        context_size = 20
        input_shape = [2, 3, 4]
        context_shape = [2, 3, 4]

        dist = discrete.ConditionalIndependentBernoulli(input_shape)
        context = torch.randn(context_size, *context_shape)
        samples, log_prob = dist.sample_and_log_prob(num_samples, context=context)

        self.assertIsInstance(samples, torch.Tensor)
        self.assertIsInstance(log_prob, torch.Tensor)

        self.assertEqual(samples.shape, torch.Size([context_size, num_samples] + input_shape))
        self.assertEqual(log_prob.shape, torch.Size([context_size, num_samples]))

        self.assertFalse(torch.isnan(log_prob).any())
        self.assertFalse(torch.isinf(log_prob).any())
        self.assert_tensor_less_equal(log_prob, 0.0)

        self.assertFalse(torch.isnan(samples).any())
        self.assertFalse(torch.isinf(samples).any())
        binary = (samples == 1.0) | (samples == 0.0)
        self.assertEqual(binary, torch.ones_like(binary)) 
Example #3
Source File: normal_test.py    From nsf with MIT License 6 votes vote down vote up
def test_mean(self):
        context_size = 20
        input_shape = [2, 3, 4]
        context_shape = [5, 6]
        dist = normal.StandardNormal(input_shape)
        maybe_context = torch.randn(context_size, *context_shape)
        for context in [None, maybe_context]:
            with self.subTest(context=context):
                means = dist.mean(context=context)
                self.assertIsInstance(means, torch.Tensor)
                self.assertFalse(torch.isnan(means).any())
                self.assertFalse(torch.isinf(means).any())
                self.assertEqual(means, torch.zeros_like(means))
                if context is None:
                    self.assertEqual(means.shape, torch.Size(input_shape))
                else:
                    self.assertEqual(means.shape, torch.Size([context_size] + input_shape)) 
Example #4
Source File: normal_test.py    From nsf with MIT License 6 votes vote down vote up
def test_sample(self):
        num_samples = 10
        context_size = 20
        input_shape = [2, 3, 4]
        context_shape = [5, 6]
        dist = normal.StandardNormal(input_shape)
        maybe_context = torch.randn(context_size, *context_shape)
        for context in [None, maybe_context]:
            with self.subTest(context=context):
                samples = dist.sample(num_samples, context=context)
                self.assertIsInstance(samples, torch.Tensor)
                self.assertFalse(torch.isnan(samples).any())
                self.assertFalse(torch.isinf(samples).any())
                if context is None:
                    self.assertEqual(samples.shape, torch.Size([num_samples] + input_shape))
                else:
                    self.assertEqual(
                        samples.shape, torch.Size([context_size, num_samples] + input_shape)) 
Example #5
Source File: box_utils.py    From lightDSFD with MIT License 6 votes vote down vote up
def get_centerness_targets(loc_t, priors, variances, IoU=False):
    """generate the targets of centerness branch, like FCOS"""
    if not IoU:
        loc_t = decode(loc_t, priors, variances=variances)

    #
    l = priors[:, 0] - loc_t[:, 0]
    t = priors[:, 1] - loc_t[:, 1]
    r = loc_t[:, 2] - priors[:, 0]
    b = loc_t[:, 3] - priors[:, 1]

    left_right = torch.stack([l, r], dim=1)
    top_bottom = torch.stack([t, b], dim=1)

    centerness = torch.sqrt(
        left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0] * (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
    )
    
    assert not torch.isnan(centerness).any()
    return centerness 
Example #6
Source File: mlp_test.py    From nsf with MIT License 6 votes vote down vote up
def test_forward(self):
        batch_size = 10
        in_shape = [2, 3, 4]
        out_shape = [5, 6]
        inputs = torch.randn(batch_size, *in_shape)

        for hidden_sizes in [[20], [20, 30], [20, 30, 40]]:
            with self.subTest(hidden_sizes=hidden_sizes):
                model = mlp.MLP(
                    in_shape=in_shape,
                    out_shape=out_shape,
                    hidden_sizes=hidden_sizes,
                )
                outputs = model(inputs)
                self.assertIsInstance(outputs, torch.Tensor)
                self.assertEqual(outputs.shape, torch.Size([batch_size] + out_shape))
                self.assertFalse(torch.isnan(outputs).any())
                self.assertFalse(torch.isinf(outputs).any())

        with self.assertRaises(Exception):
            mlp.MLP(
                in_shape=in_shape,
                out_shape=out_shape,
                hidden_sizes=[],
            ) 
Example #7
Source File: adjust_smooth_l1_loss.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def forward(self, inputs, target, size_average=True):

        n = torch.abs(inputs -target)
        with torch.no_grad():
            if torch.isnan(n.var(dim=0)).sum().item() == 0:
                self.running_mean = self.running_mean.to(n.device)
                self.running_mean *= (1 - self.momentum)
                self.running_mean += (self.momentum * n.mean(dim=0))
                self.running_var = self.running_var.to(n.device)
                self.running_var *= (1 - self.momentum)
                self.running_var += (self.momentum * n.var(dim=0))


        beta = (self.running_mean - self.running_var)
        beta = beta.clamp(max=self.beta, min=1e-3)

        beta = beta.view(-1, self.num_features).to(n.device)
        cond = n < beta.expand_as(n)
        loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
        if size_average:
            return loss.mean()
        return loss.sum() 
Example #8
Source File: pose.py    From photometric-mesh-optim with MIT License 6 votes vote down vote up
def rotation_matrix_to_quaternion(R): # [B,3,3]
	row0,row1,row2 = torch.unbind(R,dim=-2)
	R00,R01,R02 = torch.unbind(row0,dim=-1)
	R10,R11,R12 = torch.unbind(row1,dim=-1)
	R20,R21,R22 = torch.unbind(row2,dim=-1)
	t = R[...,0,0]+R[...,1,1]+R[...,2,2]
	r = (1+t).sqrt()
	qa = 0.5*r
	qb = (R21-R12).sign()*0.5*(1+R00-R11-R22).sqrt()
	qc = (R02-R20).sign()*0.5*(1-R00+R11-R22).sqrt()
	qd = (R10-R01).sign()*0.5*(1-R00-R11+R22).sqrt()
	q = torch.stack([qa,qb,qc,qd],dim=-1)
	for i,qi in enumerate(q):
		if torch.isnan(qi).any():
			print(i)
			K = torch.stack([torch.stack([R00-R11-R22,R10+R01,R20+R02,R12-R21],dim=-1),
							 torch.stack([R10+R01,R11-R00-R22,R21+R12,R20-R20],dim=-1),
							 torch.stack([R20+R02,R21+R12,R22-R00-R11,R01-R10],dim=-1),
							 torch.stack([R12-R21,R20-R02,R01-R10,R00+R11+R22],dim=-1)],dim=-2)/3.0
			K = K[i]
			eigval,eigvec = K.eig(eigenvectors=True)
			idx = eigval[:,0].argmax()
			V = eigvec[:,idx]
			q[i] = torch.stack([V[3],V[0],V[1],V[2]])
	return q 
Example #9
Source File: entity_ranking.py    From kge with MIT License 6 votes vote down vote up
def _get_ranks_and_num_ties(
        scores: torch.Tensor, true_scores: torch.Tensor
    ) -> (torch.Tensor, torch.Tensor):
        """Returns rank and number of ties of each true score in scores.

        :param scores: batch_size x entities tensor of scores

        :param true_scores: batch_size x 1 tensor containing the actual scores of the batch

        :return: batch_size x 1 tensors rank and num_ties
        """
        # process NaN values
        scores = scores.clone()
        scores[torch.isnan(scores)] = float("-Inf")
        true_scores = true_scores.clone()
        true_scores[torch.isnan(true_scores)] = float("-Inf")

        # Determine how many scores are greater than / equal to each true answer (in its
        # corresponding row of scores)
        rank = torch.sum(scores > true_scores.view(-1, 1), dim=1, dtype=torch.long)
        num_ties = torch.sum(scores == true_scores.view(-1, 1), dim=1, dtype=torch.long)
        return rank, num_ties 
Example #10
Source File: base_task.py    From Doc2EDAG with MIT License 6 votes vote down vote up
def set_optimizer_params_grad(named_params_optimizer, named_params_model, test_nan=False):
    """
        Utility function for optimize_on_cpu and 16-bits training.
        Copy the gradient of the GPU parameters to the CPU/RAMM copy of the model
    """
    is_nan = False
    for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer, named_params_model):
        if name_opti != name_model:
            logger.error("name_opti != name_model: {} {}".format(name_opti, name_model))
            raise ValueError
        if param_model.grad is not None:
            if test_nan and torch.isnan(param_model.grad).sum() > 0:
                is_nan = True
            if param_opti.grad is None:
                param_opti.grad = torch.nn.Parameter(param_opti.data.new().resize_(*param_opti.data.size()))
            param_opti.grad.data.copy_(param_model.grad.data)
        else:
            param_opti.grad = None
    return is_nan 
Example #11
Source File: base.py    From torch-kalman with MIT License 6 votes vote down vote up
def _validate(self):
        if self.means.dim() != 2:
            raise ValueError("means should be 2D (first dimension batch-size)")
        if self.covs.dim() != 3:
            raise ValueError("covs should be 3D (first dimension batch-size)")
        if torch.isinf(self.means).any():
            raise ValueError("Infs in `means`.")
        if torch.isinf(self.covs).any():
            raise ValueError("Infs in `covs`.")
        if torch.isnan(self.means).any():
            raise ValueError("nans in `means`.")
        if torch.isnan(self.covs).any():
            raise ValueError("nans in `covs`.")
        if self.covs.shape[0] != self.means.shape[0]:
            raise ValueError("The batch-size (1st dimension) of cov doesn't match that of mean.")
        if self.covs.shape[1] != self.covs.shape[2]:
            raise ValueError("The cov should be symmetric in the last two dimensions.")
        if self.covs.shape[1] != self.means.shape[1]:
            raise ValueError("The state-size (2nd/3rd dimension) of cov doesn't match that of mean.")
        if self.last_measured.shape[0] != self.num_groups or self.last_measured.dim() != 1:
            raise ValueError(f"`last_measured` should be 1D tensor w/length of {self.num_groups:,}.") 
Example #12
Source File: base_test.py    From nsf with MIT License 6 votes vote down vote up
def test_stochastic_elbo(self):
        batch_size = 10
        input_shape = [2, 3, 4]
        latent_shape = [5, 6]

        prior = distributions.StandardNormal(latent_shape)
        approximate_posterior = distributions.StandardNormal(latent_shape)
        likelihood = distributions.StandardNormal(input_shape)
        vae = base.VariationalAutoencoder(prior, approximate_posterior, likelihood)

        inputs = torch.randn(batch_size, *input_shape)
        for num_samples in [1, 10, 100]:
            with self.subTest(num_samples=num_samples):
                elbo = vae.stochastic_elbo(inputs, num_samples)
                self.assertIsInstance(elbo, torch.Tensor)
                self.assertFalse(torch.isnan(elbo).any())
                self.assertFalse(torch.isinf(elbo).any())
                self.assertEqual(elbo.shape, torch.Size([batch_size])) 
Example #13
Source File: base_test.py    From nsf with MIT License 6 votes vote down vote up
def test_sample(self):
        num_samples = 10
        input_shape = [2, 3, 4]
        latent_shape = [5, 6]

        prior = distributions.StandardNormal(latent_shape)
        approximate_posterior = distributions.StandardNormal(latent_shape)
        likelihood = distributions.StandardNormal(input_shape)
        vae = base.VariationalAutoencoder(prior, approximate_posterior, likelihood)

        for mean in [True, False]:
            with self.subTest(mean=mean):
                samples = vae.sample(num_samples, mean=mean)
                self.assertIsInstance(samples, torch.Tensor)
                self.assertFalse(torch.isnan(samples).any())
                self.assertFalse(torch.isinf(samples).any())
                self.assertEqual(samples.shape, torch.Size([num_samples] + input_shape)) 
Example #14
Source File: base_test.py    From nsf with MIT License 6 votes vote down vote up
def test_encode(self):
        batch_size = 20
        input_shape = [2, 3, 4]
        latent_shape = [5, 6]
        inputs = torch.randn(batch_size, *input_shape)

        prior = distributions.StandardNormal(latent_shape)
        approximate_posterior = distributions.StandardNormal(latent_shape)
        likelihood = distributions.StandardNormal(input_shape)
        vae = base.VariationalAutoencoder(prior, approximate_posterior, likelihood)

        for num_samples in [None, 1, 10]:
            with self.subTest(num_samples=num_samples):
                encodings = vae.encode(inputs, num_samples)
                self.assertIsInstance(encodings, torch.Tensor)
                self.assertFalse(torch.isnan(encodings).any())
                self.assertFalse(torch.isinf(encodings).any())
                if num_samples is None:
                    self.assertEqual(encodings.shape, torch.Size([batch_size] + latent_shape))
                else:
                    self.assertEqual(
                        encodings.shape, torch.Size([batch_size, num_samples] + latent_shape)) 
Example #15
Source File: base_test.py    From nsf with MIT License 6 votes vote down vote up
def test_reconstruct(self):
        batch_size = 20
        input_shape = [2, 3, 4]
        latent_shape = [5, 6]
        inputs = torch.randn(batch_size, *input_shape)

        prior = distributions.StandardNormal(latent_shape)
        approximate_posterior = distributions.StandardNormal(latent_shape)
        likelihood = distributions.StandardNormal(input_shape)
        vae = base.VariationalAutoencoder(prior, approximate_posterior, likelihood)

        for mean in [True, False]:
            for num_samples in [None, 1, 10]:
                with self.subTest(mean=mean, num_samples=num_samples):
                    recons = vae.reconstruct(inputs, num_samples=num_samples, mean=mean)
                    self.assertIsInstance(recons, torch.Tensor)
                    self.assertFalse(torch.isnan(recons).any())
                    self.assertFalse(torch.isinf(recons).any())
                    if num_samples is None:
                        self.assertEqual(recons.shape, torch.Size([batch_size] + input_shape))
                    else:
                        self.assertEqual(
                            recons.shape, torch.Size([batch_size, num_samples] + input_shape)) 
Example #16
Source File: mixed_lipschitz.py    From residual-flows with MIT License 6 votes vote down vote up
def normalize_u(u, codomain, out=None):
    if not torch.is_tensor(codomain) and codomain == 2:
        u = F.normalize(u, p=2, dim=0, out=out)
    elif codomain == float('inf'):
        u = projmax_(u)
    else:
        uabs = torch.abs(u)
        uph = u / uabs
        uph[torch.isnan(uph)] = 1
        uabs = uabs / torch.max(uabs)
        uabs = uabs**(codomain - 1)
        if codomain == 1:
            u = uph * uabs / vector_norm(uabs, float('inf'))
        else:
            u = uph * uabs / vector_norm(uabs, codomain / (codomain - 1))
    return u 
Example #17
Source File: metric.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def precision(pred, target, num_classes):
    r"""Computes the precision
    :math:`\frac{\mathrm{TP}}{\mathrm{TP}+\mathrm{FP}}` of predictions.

    Args:
        pred (Tensor): The predictions.
        target (Tensor): The targets.
        num_classes (int): The number of classes.

    :rtype: :class:`Tensor`
    """
    tp = true_positive(pred, target, num_classes).to(torch.float)
    fp = false_positive(pred, target, num_classes).to(torch.float)

    out = tp / (tp + fp)
    out[torch.isnan(out)] = 0

    return out 
Example #18
Source File: metric.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def recall(pred, target, num_classes):
    r"""Computes the recall
    :math:`\frac{\mathrm{TP}}{\mathrm{TP}+\mathrm{FN}}` of predictions.

    Args:
        pred (Tensor): The predictions.
        target (Tensor): The targets.
        num_classes (int): The number of classes.

    :rtype: :class:`Tensor`
    """
    tp = true_positive(pred, target, num_classes).to(torch.float)
    fn = false_negative(pred, target, num_classes).to(torch.float)

    out = tp / (tp + fn)
    out[torch.isnan(out)] = 0

    return out 
Example #19
Source File: metric.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def f1_score(pred, target, num_classes):
    r"""Computes the :math:`F_1` score
    :math:`2 \cdot \frac{\mathrm{precision} \cdot \mathrm{recall}}
    {\mathrm{precision}+\mathrm{recall}}` of predictions.

    Args:
        pred (Tensor): The predictions.
        target (Tensor): The targets.
        num_classes (int): The number of classes.

    :rtype: :class:`Tensor`
    """
    prec = precision(pred, target, num_classes)
    rec = recall(pred, target, num_classes)

    score = 2 * (prec * rec) / (prec + rec)
    score[torch.isnan(score)] = 0

    return score 
Example #20
Source File: metric.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def mean_iou(pred, target, num_classes, batch=None):
    r"""Computes the mean intersection over union score of predictions.

    Args:
        pred (LongTensor): The predictions.
        target (LongTensor): The targets.
        num_classes (int): The number of classes.
        batch (LongTensor): The assignment vector which maps each pred-target
            pair to an example.

    :rtype: :class:`Tensor`
    """
    i, u = intersection_and_union(pred, target, num_classes, batch)
    iou = i.to(torch.float) / u.to(torch.float)
    iou[torch.isnan(iou)] = 1
    iou = iou.mean(dim=-1)
    return iou 
Example #21
Source File: pointnet2_segmentation.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def test(loader):
    model.eval()

    y_mask = loader.dataset.y_mask
    ious = [[] for _ in range(len(loader.dataset.categories))]

    for data in loader:
        data = data.to(device)
        pred = model(data).argmax(dim=1)

        i, u = i_and_u(pred, data.y, loader.dataset.num_classes, data.batch)
        iou = i.cpu().to(torch.float) / u.cpu().to(torch.float)
        iou[torch.isnan(iou)] = 1

        # Find and filter the relevant classes for each category.
        for iou, category in zip(iou.unbind(), data.category.unbind()):
            ious[category.item()].append(iou[y_mask[category]])

    # Compute mean IoU.
    ious = [torch.stack(iou).mean(0).mean(0) for iou in ious]
    return torch.tensor(ious).mean().item() 
Example #22
Source File: datasets.py    From vaeac with MIT License 6 votes vote down vote up
def compute_normalization(data, one_hot_max_sizes):
    """
    Compute the normalization parameters (i. e. mean to subtract and std
    to divide by) for each feature of the dataset.
    For categorical features mean is zero and std is one.
    i-th feature is denoted to be categorical if one_hot_max_sizes[i] >= 2.
    Returns two vectors: means and stds.
    """
    norm_vector_mean = torch.zeros(len(one_hot_max_sizes))
    norm_vector_std = torch.ones(len(one_hot_max_sizes))
    for i, size in enumerate(one_hot_max_sizes):
        if size >= 2:
            continue
        v = data[:, i]
        v = v[1 - torch.isnan(v)]
        vmin, vmax = v.min(), v.max()
        vmean = v.mean()
        vstd = v.std()
        norm_vector_mean[i] = vmean
        norm_vector_std[i] = vstd
    return norm_vector_mean, norm_vector_std 
Example #23
Source File: dgcnn_segmentation.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def test(loader):
    model.eval()

    y_mask = loader.dataset.y_mask
    ious = [[] for _ in range(len(loader.dataset.categories))]

    for data in loader:
        data = data.to(device)
        pred = model(data).argmax(dim=1)

        i, u = i_and_u(pred, data.y, loader.dataset.num_classes, data.batch)
        iou = i.cpu().to(torch.float) / u.cpu().to(torch.float)
        iou[torch.isnan(iou)] = 1

        # Find and filter the relevant classes for each category.
        for iou, category in zip(iou.unbind(), data.category.unbind()):
            ious[category.item()].append(iou[y_mask[category]])

    # Compute mean IoU.
    ious = [torch.stack(iou).mean(0).mean(0) for iou in ious]
    return torch.tensor(ious).mean().item() 
Example #24
Source File: trainer.py    From scVI with MIT License 6 votes vote down vote up
def check_training_status(self):
        """Checks if loss is admissible.

        If not, training is stopped after max_nans consecutive inadmissible loss
        loss corresponds to the training loss of the model.

        `max_nans` is the maximum number of consecutive NaNs after which a ValueError will be
        """
        loss_is_nan = torch.isnan(self.current_loss).item()
        if loss_is_nan:
            logger.warning("Model training loss was NaN")
            self.nan_counter += 1
            self.previous_loss_was_nan = True
        else:
            self.nan_counter = 0
            self.previous_loss_was_nan = False

        if self.nan_counter >= self.max_nans:
            raise ValueError(
                "Loss was NaN {} consecutive times: the model is not training properly. "
                "Consider using a lower learning rate.".format(self.max_nans)
            ) 
Example #25
Source File: local_optimizers.py    From rlgraph with Apache License 2.0 6 votes vote down vote up
def _graph_fn_step(self, variables, loss, loss_per_item, time_percentage, *inputs):
        # TODO n.b. PyTorch does not call api functions because other optimization semantics.
        if get_backend() == "tf":
            grads_and_vars = self._graph_fn_calculate_gradients(variables, loss, time_percentage)
            step_op = self._graph_fn_apply_gradients(grads_and_vars)
            return step_op
        elif get_backend() == "pytorch":
            # Instantiate optimizer with variables.
            if self.optimizer_obj is None:
                # self.optimizer is a lambda creating the respective optimizer
                # with params pre-filled.
                parameters = variables.values()
                self.optimizer_obj = self.optimizer(parameters)
            # Reset gradients.
            self.optimizer_obj.zero_grad()
            if not torch.isnan(loss):
                loss.backward()
            # Adjust learning rate via time-dependent parameter if not a constant.
            if not isinstance(self.learning_rate, Constant):
                lr = self.learning_rate.get(time_percentage)
                for param_group in self.optimizer_obj.param_groups:
                    param_group["lr"] = lr
            # Do the optimizer step.
            return self.optimizer_obj.step() 
Example #26
Source File: test_utils.py    From few-shot with MIT License 6 votes vote down vote up
def test_no_nans_on_zero_vectors(self):
        """Cosine distance calculation involves a divide-through by vector magnitude which
        can divide by zeros to occur.
        """
        # Create some dummy data with easily verifiable distances
        q = 1  # 1 query per class
        k = 3  # 3 way classification
        d = 2  # embedding dimension of two
        query = torch.zeros([q * k, d], dtype=torch.double)
        query[0] = torch.Tensor([0, 0])  # First query sample is all zeros
        query[1] = torch.Tensor([0, 1])
        query[2] = torch.Tensor([1, 1])
        support = torch.zeros([k, d], dtype=torch.double)
        support[0] = torch.Tensor([1, 1])
        support[1] = torch.Tensor([-1, -1])
        support[2] = torch.Tensor([0, 0])  # Third support sample is all zeros

        distances = pairwise_distances(query, support, 'cosine')

        self.assertTrue(torch.isnan(distances).sum() == 0, 'Cosine distances between 0-vectors should not be nan') 
Example #27
Source File: torchtest.py    From torchtest with GNU General Public License v3.0 6 votes vote down vote up
def assert_never_nan(tensor):
  """Make sure there are no NaN values in the given tensor.

  Parameters
  ----------
  tensor : torch.tensor
    input tensor 

  Raises
  ------
  NaNTensorException
    If one or more NaN values occur in the given tensor
  """

  try:
    assert not torch.isnan(tensor).byte().any()
  except AssertionError:
    raise NaNTensorException("There was a NaN value in tensor") 
Example #28
Source File: atss_head.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def centerness_target(self, anchors, bbox_targets):
        # only calculate pos centerness targets, otherwise there may be nan
        gts = self.bbox_coder.decode(anchors, bbox_targets)
        anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
        anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
        l_ = anchors_cx - gts[:, 0]
        t_ = anchors_cy - gts[:, 1]
        r_ = gts[:, 2] - anchors_cx
        b_ = gts[:, 3] - anchors_cy

        left_right = torch.stack([l_, r_], dim=1)
        top_bottom = torch.stack([t_, b_], dim=1)
        centerness = torch.sqrt(
            (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *
            (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))
        assert not torch.isnan(centerness).any()
        return centerness 
Example #29
Source File: seal_data.py    From funsor with Apache License 2.0 5 votes vote down vote up
def prepare_fake(sizes, random_effects):
    """
    Generate fake datasets of varying size. Used for evaluating computational performance.
    """
    obs_keys = ["step", "angle", "omega"]
    # data format for z1, z2:
    # single tensor with shape (individual, group, time, coords)
    observations = torch.randn((
        sizes["individual"], sizes["group"], sizes["timesteps"], len(obs_keys))).abs()
    observations[torch.isnan(observations)] = float("-inf")
    observations[observations >= 1.] = 0.5

    # make masks
    # mask_i should mask out individuals, it applies at all timesteps
    mask_i = (observations > float("-inf")).any(dim=-1).any(dim=-1)  # time nonempty

    # mask_t handles padding for time series of different length
    mask_t = (observations > float("-inf")).all(dim=-1)   # include non-inf

    # temporary hack to avoid zero-inflation issues
    # observations[observations == 0.] = MISSING
    observations[(observations == 0.) | (observations == float("-inf"))] = MISSING
    assert not torch.isnan(observations).any()

    # observations = observations[..., 5:11, :]  # truncate for testing

    config = {
        "MISSING": MISSING,
        "sizes": sizes.copy(),
        "group": {"random": random_effects["group"], "fixed": None},
        "individual": {"random": random_effects["individual"], "fixed": None, "mask": mask_i},
        "timestep": {"random": None, "fixed": None, "mask": mask_t},
        "observations": {
            "step": observations[..., 0],
            "angle": observations[..., 1],
            "omega": observations[..., 2],
        },
    }

    return config 
Example #30
Source File: callback.py    From fastNLP with Apache License 2.0 5 votes vote down vote up
def on_backward_begin(self, loss):
        if self.find:
            if torch.isnan(loss) or self.stop is True:
                self.stop = True
                return
            loss_val = loss.detach().mean().item()
            self.loss_history.append(loss_val)
            self.smooth_value.add_value(loss_val)
            if self.best_loss == 0. or self.smooth_value.smooth < self.best_loss:
                self.best_loss = self.smooth_value.smooth
                self.best_lr = self.opt.param_groups[0]["lr"]