Python torch.isfinite() Examples

The following are 30 code examples of torch.isfinite(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: smc2.py    From pyfilter with MIT License 6 votes vote down vote up
def _update(self, y):
        # ===== Save data ===== #
        self._y.append(y)

        # ===== Perform a filtering move ===== #
        _, ll = self.filter.filter(y)
        self._w_rec += ll

        # ===== Calculate efficient number of samples ===== #
        ess = get_ess(self._w_rec)
        self._logged_ess.append(ess)

        # ===== Rejuvenate if there are too few samples ===== #
        if ess < self._threshold or (~isfinite(self._w_rec)).any():
            self.rejuvenate()
            self._w_rec[:] = 0.

        return self 
Example #2
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_geodesic_segment_length_property(a, b, manifold, dtype):
    extra_dims = len(a.shape)
    segments = 12
    t = torch.linspace(0, 1, segments + 1, dtype=dtype).view(
        (segments + 1,) + (1,) * extra_dims
    )
    gamma_ab_t = manifold.geodesic(t, a, b)
    gamma_ab_t0 = gamma_ab_t[:-1]
    gamma_ab_t1 = gamma_ab_t[1:]
    dist_ab_t0mt1 = manifold.dist(gamma_ab_t0, gamma_ab_t1, keepdim=True)
    speed = manifold.dist(a, b, keepdim=True).unsqueeze(0).expand_as(dist_ab_t0mt1)
    # we have exactly 12 line segments
    tolerance = {
        torch.float32: dict(rtol=1e-5, atol=5e-3),
        torch.float64: dict(rtol=1e-5, atol=5e-3),
    }
    length = speed / segments
    np.testing.assert_allclose(
        dist_ab_t0mt1.detach(), length.detach(), **tolerance[dtype]
    )
    (length + dist_ab_t0mt1).sum().backward()
    assert torch.isfinite(a.grad).all()
    assert torch.isfinite(b.grad).all()
    assert torch.isfinite(manifold.k.grad).all() 
Example #3
Source File: training_tricks.py    From pytorch-lightning with Apache License 2.0 6 votes vote down vote up
def detect_nan_tensors(self, loss: Tensor) -> None:
        model = self.get_model()

        # check if loss is nan
        if not torch.isfinite(loss).all():
            raise ValueError(
                'The loss returned in `training_step` is nan or inf.'
            )
        # check if a network weight is nan
        for name, param in model.named_parameters():
            if not torch.isfinite(param).all():
                self.print_nan_gradients()
                raise ValueError(
                    f'Detected nan and/or inf values in `{name}`.'
                    ' Check your forward pass for numerically unstable operations.'
                ) 
Example #4
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_scalar_multiplication_distributive(a, r1, r2, manifold, dtype):
    res = manifold.mobius_scalar_mul(r1 + r2, a)
    res1 = manifold.mobius_add(
        manifold.mobius_scalar_mul(r1, a), manifold.mobius_scalar_mul(r2, a),
    )
    res2 = manifold.mobius_add(
        manifold.mobius_scalar_mul(r1, a), manifold.mobius_scalar_mul(r2, a),
    )
    tolerance = {
        torch.float32: dict(atol=5e-6, rtol=1e-4),
        torch.float64: dict(atol=1e-7, rtol=1e-4),
    }
    np.testing.assert_allclose(res1.detach(), res.detach(), **tolerance[dtype])
    np.testing.assert_allclose(res2.detach(), res.detach(), **tolerance[dtype])
    res.sum().backward()
    assert torch.isfinite(a.grad).all()
    assert torch.isfinite(r1.grad).all()
    assert torch.isfinite(r2.grad).all()
    assert torch.isfinite(manifold.k.grad).all() 
Example #5
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_n_additions_via_scalar_multiplication(n, a, dtype, negative, manifold, strict):
    n = torch.as_tensor(n, dtype=a.dtype).requires_grad_()
    y = torch.zeros_like(a)
    for _ in range(int(n.item())):
        y = manifold.mobius_add(a, y)
    ny = manifold.mobius_scalar_mul(n, a)
    if negative:
        tolerance = {
            torch.float32: dict(atol=4e-5, rtol=1e-3),
            torch.float64: dict(atol=1e-5, rtol=1e-3),
        }
    else:
        tolerance = {
            torch.float32: dict(atol=2e-6, rtol=1e-3),
            torch.float64: dict(atol=1e-5, rtol=1e-3),
        }
    tolerant_allclose_check(y, ny, strict=strict, **tolerance[dtype])
    ny.sum().backward()
    assert torch.isfinite(n.grad).all()
    assert torch.isfinite(a.grad).all()
    assert torch.isfinite(manifold.k.grad).all() 
Example #6
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_geodesic_segement_unit_property(a, b, manifold, dtype):
    extra_dims = len(a.shape)
    segments = 12
    t = torch.linspace(0, 1, segments + 1, dtype=dtype).view(
        (segments + 1,) + (1,) * extra_dims
    )
    gamma_ab_t = manifold.geodesic_unit(t, a, b)
    gamma_ab_t0 = gamma_ab_t[:1]
    gamma_ab_t1 = gamma_ab_t
    dist_ab_t0mt1 = manifold.dist(gamma_ab_t0, gamma_ab_t1, keepdim=True)
    true_distance_travelled = t.expand_as(dist_ab_t0mt1)
    # we have exactly 12 line segments
    tolerance = {
        torch.float32: dict(atol=2e-4, rtol=5e-5),
        torch.float64: dict(atol=1e-10),
    }
    np.testing.assert_allclose(
        dist_ab_t0mt1.detach(), true_distance_travelled.detach(), **tolerance[dtype]
    )
    (true_distance_travelled + dist_ab_t0mt1).sum().backward()
    assert torch.isfinite(a.grad).all()
    assert torch.isfinite(b.grad).all()
    assert torch.isfinite(manifold.k.grad).all() 
Example #7
Source File: test_trainer.py    From pytorch-lightning with Apache License 2.0 6 votes vote down vote up
def test_nan_params_detection(tmpdir):

    class CurrentModel(EvalModelTemplate):
        test_batch_nan = 8

        def on_after_backward(self):
            if self.global_step == self.test_batch_nan:
                # simulate parameter that became nan
                torch.nn.init.constant_(self.c_d1.bias, math.nan)

    model = CurrentModel()
    trainer = Trainer(
        default_root_dir=tmpdir,
        max_steps=(model.test_batch_nan + 1),
        terminate_on_nan=True,
    )

    with pytest.raises(ValueError, match=r'.*Detected nan and/or inf values in `c_d1.bias`.*'):
        trainer.fit(model)
        assert trainer.global_step == model.test_batch_nan

    # after aborting the training loop, model still has nan-valued params
    params = torch.cat([param.view(-1) for param in model.parameters()])
    assert not torch.isfinite(params).all() 
Example #8
Source File: test_model_e2e.py    From detectron2 with Apache License 2.0 6 votes vote down vote up
def test_inf_nan_data(self):
        self.model.eval()
        self.model.score_threshold = -999999999
        for tensor in [self._inf_tensor, self._nan_tensor]:
            images = ImageList(tensor(1, 3, 512, 512), [(510, 510)])
            features = [
                tensor(1, 256, 128, 128),
                tensor(1, 256, 64, 64),
                tensor(1, 256, 32, 32),
                tensor(1, 256, 16, 16),
                tensor(1, 256, 8, 8),
            ]
            anchors = self.model.anchor_generator(features)
            _, pred_anchor_deltas = self.model.head(features)
            HWAs = [np.prod(x.shape[-3:]) // 4 for x in pred_anchor_deltas]

            pred_logits = [tensor(1, HWA, self.model.num_classes) for HWA in HWAs]
            pred_anchor_deltas = [tensor(1, HWA, 4) for HWA in HWAs]
            det = self.model.inference(anchors, pred_logits, pred_anchor_deltas, images.image_sizes)
            # all predictions (if any) are infinite or nan
            if len(det[0]):
                self.assertTrue(torch.isfinite(det[0].pred_boxes.tensor).sum() == 0) 
Example #9
Source File: base_test_case.py    From gpytorch with MIT License 6 votes vote down vote up
def assertAllClose(self, tensor1, tensor2, rtol=1e-4, atol=1e-5, equal_nan=False):
        if not tensor1.shape == tensor2.shape:
            raise ValueError(f"tensor1 ({tensor1.shape}) and tensor2 ({tensor2.shape}) do not have the same shape.")

        if torch.allclose(tensor1, tensor2, rtol=rtol, atol=atol, equal_nan=equal_nan):
            return True

        if not equal_nan:
            if not torch.equal(tensor1, tensor1):
                raise AssertionError(f"tensor1 ({tensor1.shape}) contains NaNs")
            if not torch.equal(tensor2, tensor2):
                raise AssertionError(f"tensor2 ({tensor2.shape}) contains NaNs")

        rtol_diff = (torch.abs(tensor1 - tensor2) / torch.abs(tensor2)).view(-1)
        rtol_diff = rtol_diff[torch.isfinite(rtol_diff)]
        rtol_max = rtol_diff.max().item()

        atol_diff = (torch.abs(tensor1 - tensor2) - torch.abs(tensor2).mul(rtol)).view(-1)
        atol_diff = atol_diff[torch.isfinite(atol_diff)]
        atol_max = atol_diff.max().item()

        raise AssertionError(
            f"tensor1 ({tensor1.shape}) and tensor2 ({tensor2.shape}) are not close enough. \n"
            f"max rtol: {rtol_max:0.8f}\t\tmax atol: {atol_max:0.8f}"
        ) 
Example #10
Source File: terminate_on_nan.py    From LaSO with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __call__(self, engine):
        output = self._output_transform(engine.state.output)

        def raise_error(x):

            if isinstance(x, numbers.Number):
                x = torch.tensor(x)

            if isinstance(x, torch.Tensor) and not bool(torch.isfinite(x).all()):
                raise RuntimeError("Infinite or NaN tensor found.")

        try:
            apply_to_type(output, (numbers.Number, torch.Tensor), raise_error)
        except RuntimeError:
            self._logger.warning("{}: Output '{}' contains NaN or Inf. Stop training"
                                 .format(self.__class__.__name__, output))
            engine.terminate() 
Example #11
Source File: pytorch_transformer_wrapper_test.py    From allennlp with Apache License 2.0 6 votes vote down vote up
def test_positional_embeddings(positional_encoding: Optional[str]):
    # All sizes are prime, making them easy to find during debugging.
    batch_size = 7
    max_seq_len = 101
    n_head = 5
    dims = 11 * n_head
    transformer = PytorchTransformer(
        dims, 3, positional_encoding=positional_encoding, num_attention_heads=n_head
    )
    transformer.eval()

    with torch.no_grad():
        inputs = torch.randn(batch_size, max_seq_len, dims)
        mask = torch.ones(batch_size, max_seq_len, dtype=torch.bool)
        for b in range(batch_size):
            mask[b, max_seq_len - b :] = False

        assert not torch.isnan(inputs).any()
        assert torch.isfinite(inputs).all()
        outputs = transformer(inputs, mask)
        assert outputs.size() == inputs.size()
        assert not torch.isnan(outputs).any()
        assert torch.isfinite(outputs).all() 
Example #12
Source File: optimizers.py    From Tagger with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def apply_gradients(self, grads_and_vars):
        self._iterations += 1
        grads, var_list = list(zip(*grads_and_vars))
        new_grads = []

        if self._summaries:
            summary.scalar("optimizer/scale", self._scale,
                           utils.get_global_step())

        for grad in grads:
            if grad is None:
                new_grads.append(None)
                continue

            norm = grad.data.norm()

            if not torch.isfinite(norm):
                self._update_if_not_finite_grads()
                return
            else:
                # Rescale gradients
                new_grads.append(grad.data.float().mul_(1.0 / self._scale))

        self._update_if_finite_grads()
        self._optimizer.apply_gradients(zip(new_grads, var_list)) 
Example #13
Source File: torchtest.py    From torchtest with GNU General Public License v3.0 6 votes vote down vote up
def assert_never_inf(tensor):
  """Make sure there are no Inf values in the given tensor.

  Parameters
  ----------
  tensor : torch.tensor
    input tensor 

  Raises
  ------
  InfTensorException
    If one or more Inf values occur in the given tensor
  """

  try:
    assert torch.isfinite(tensor).byte().any()
  except AssertionError:
    raise InfTensorException("There was an Inf value in tensor") 
Example #14
Source File: terminate_on_nan.py    From ignite with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __call__(self, engine: Engine) -> None:
        output = self._output_transform(engine.state.output)

        def raise_error(x: Union[numbers.Number, torch.Tensor]) -> None:

            if isinstance(x, numbers.Number):
                x = torch.tensor(x)

            if isinstance(x, torch.Tensor) and not bool(torch.isfinite(x).all()):
                raise RuntimeError("Infinite or NaN tensor found.")

        try:
            apply_to_type(output, (numbers.Number, torch.Tensor), raise_error)
        except RuntimeError:
            self.logger.warning(
                "{}: Output '{}' contains NaN or Inf. Stop training".format(self.__class__.__name__, output)
            )
            engine.terminate() 
Example #15
Source File: fit_harn.py    From netharn with Apache License 2.0 6 votes vote down vote up
def _check_gradients(harn):
        """
        Checks that the the accumulated gradients are all finite.

        Raises:
            TrainingDiverged: if checks fail

        Example:
            harn = ...
            all_grads = harn._check_gradients()
            ub.map_vals(torch.norm, all_grads)
        """
        all_grads = ub.odict()
        for name, parameter in harn.model.named_parameters():
            if parameter.grad is not None:
                all_grads[name] = parameter.grad.data
        for key, value in all_grads.items():
            if torch.any(~torch.isfinite(value)):
                raise TrainingDiverged(
                    'NON-FINITE GRAD {}.grad = {!r}'.format(key, value))
        return all_grads 
Example #16
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 5 votes vote down vote up
def test_weighted_midpoint_weighted(_k, lincomb):
    manifold = stereographic.Stereographic(_k, learnable=True)
    a = manifold.random(2, 3, 10).requires_grad_(True)
    mid = manifold.weighted_midpoint(
        a, reducedim=[0], lincomb=lincomb, weights=torch.rand_like(a[..., 0])
    )
    assert mid.shape == a.shape[-2:]
    assert torch.isfinite(mid).all()
    mid.sum().backward()
    assert torch.isfinite(a.grad).all()
    assert not torch.isclose(manifold.k.grad, manifold.k.new_zeros(())) 
Example #17
Source File: fit_harn.py    From netharn with Apache License 2.0 5 votes vote down vote up
def _check_divergence(harn):
        """
        Checks that the model weights are all finite

        Raises:
            TrainingDiverged: if checks fail
        """
        # Eventually we may need to remove
        # num_batches_tracked once 0.5.0 lands
        state = harn.model.module.state_dict()
        sums = ub.map_vals(torch.sum, state)
        weight_sum = sum(s.float() for s in sums.values())
        if 'torch' in str(type(weight_sum)):  # torch 0.3 / 0.4 / 1.0 compat
            weight_sum = weight_sum.cpu().numpy()
        try:
            weight_sum = weight_sum.cpu().numpy()
        except AttributeError:
            pass
        if not np.isfinite(weight_sum):
            try:
                flags = [not np.isfinite(s.cpu().numpy()) for s in sums.values()]
            except AttributeError:
                flags = [not np.isfinite(s) for s in sums.values()]
            bad_layers = ub.odict(zip(
                ub.compress(sums.keys(), flags),
                ub.compress(sums.values(), flags)
            ))
            harn.error('NON-FINITE WEIGHTS: {}'.format(ub.repr2(bad_layers, nl=1)))
            raise TrainingDiverged(
                'NON-FINITE WEIGHTS weights.sum() = {!r}'.format(weight_sum)) 
Example #18
Source File: fit_harn.py    From netharn with Apache License 2.0 5 votes vote down vote up
def _check_loss(harn, loss_value):
        """
        Checks that the the loss is not too large

        Raises:
            TrainingDiverged: if checks fail
        """
        if not np.isfinite(loss_value):
            harn.warn('WARNING: got inf loss, setting loss to a large value')
            loss_value = harn.preferences['large_loss'] * 10

        if harn.current_tag == 'train':
            if loss_value > harn.preferences['large_loss']:
                # if the loss is getting large, check if the weights are ok
                harn._check_divergence() 
Example #19
Source File: dynamic_ll_loss.py    From attn2d with MIT License 5 votes vote down vote up
def compute_control_loss(self, controls, read_labels, write_labels):
        # controller
        Ts = controls.size(2)
        rmask = torch.isfinite(controls[..., 0])
        wmask = torch.isfinite(controls[..., 1])
        read_loss =  torch.sum(read_labels[rmask] * controls[...,0].float()[rmask]) 
        write_loss =  torch.sum(write_labels[wmask] * controls[...,1].float()[wmask])
        controlling_loss = - (read_loss + write_loss) / controls.size(-1)
        return controlling_loss 
Example #20
Source File: broyden.py    From deq with MIT License 5 votes vote down vote up
def _safe_norm(v):
    if not torch.isfinite(v).all():
        return np.inf
    return torch.norm(v) 
Example #21
Source File: simultrans_dynamic_loss.py    From attn2d with MIT License 5 votes vote down vote up
def compute_control_loss(self, RWlogits, read_labels, write_labels):
        # controller
        Ts = RWlogits.size(2)
        rmask = torch.isfinite(RWlogits[..., 0])
        wmask = torch.isfinite(RWlogits[..., 1])
        read_loss =  torch.sum(read_labels[rmask] * RWlogits[...,0].float()[rmask]) 
        write_loss =  torch.sum(write_labels[wmask] * RWlogits[...,1].float()[wmask])
        controlling_loss = - (read_loss + write_loss) / RWlogits.size(-1)
        accuracy = (write_labels[wmask].eq(1) == RWlogits[...,1].float()[wmask].exp().gt(0.5)).float()
        positions = accuracy.numel()
        accuracy = accuracy.sum().long()
        return controlling_loss, accuracy, positions 
Example #22
Source File: hmm_loss.py    From attn2d with MIT License 5 votes vote down vote up
def compute_control_loss(self, controls, read_labels, write_labels):
        # controller
        print('Writing labels:', write_labels[:,0].data)
        print('Read labels:', read_labels[:,0].data)
        if self.discretize:
            write_labels = torch.gt(write_labels, read_labels).float()
            read_labels = 1 - write_labels
        rmask = torch.isfinite(controls[..., 0])
        wmask = torch.isfinite(controls[..., 1])
        read_loss =  torch.sum(read_labels[rmask] * controls[...,0].float()[rmask])
        write_loss =  torch.sum(write_labels[wmask] * controls[...,1].float()[wmask])
        controlling_loss = - (read_loss + write_loss) / controls.size(-1)
        return controlling_loss 
Example #23
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_forward_isfinite():
    torch.set_grad_enabled(False)

    class IsFinite1(Module):
        def forward(self, *args):
            return torch.isfinite(args[0])

    input_data = torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]).float()
    verify_model(IsFinite1().float().eval(), input_data=input_data) 
Example #24
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 5 votes vote down vote up
def test_weighted_midpoint_reduce_dim(_k, lincomb):
    manifold = stereographic.Stereographic(_k, learnable=True)
    a = manifold.random(2, 3, 10).requires_grad_(True)
    mid = manifold.weighted_midpoint(a, reducedim=[0], lincomb=lincomb)
    assert mid.shape == a.shape[-2:]
    assert torch.isfinite(mid).all()
    mid.sum().backward()
    assert torch.isfinite(a.grad).all()
    assert not torch.isclose(manifold.k.grad, manifold.k.new_zeros(())) 
Example #25
Source File: ness.py    From pyfilter with MIT License 5 votes vote down vote up
def do_update(self):
        return (any(self._logged_ess) and self._logged_ess[-1] < self._threshold) or (~isfinite(self._w_rec)).any() 
Example #26
Source File: train.py    From yolact with MIT License 5 votes vote down vote up
def no_inf_mean(x:torch.Tensor):
    """
    Computes the mean of a vector, throwing out all inf values.
    If there are no non-inf values, this will return inf (i.e., just the normal mean).
    """

    no_inf = [a for a in x if torch.isfinite(a)]

    if len(no_inf) > 0:
        return sum(no_inf) / len(no_inf)
    else:
        return x.mean() 
Example #27
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 5 votes vote down vote up
def test_weighted_midpoint_zero(_k, lincomb):
    manifold = stereographic.Stereographic(_k, learnable=True)
    a = manifold.random(2, 3, 10).requires_grad_(True)
    mid = manifold.weighted_midpoint(
        a, reducedim=[0], lincomb=lincomb, weights=torch.zeros_like(a[..., 0])
    )
    assert mid.shape == a.shape[-2:]
    assert torch.allclose(mid, torch.zeros_like(mid))
    mid.sum().backward()
    assert torch.isfinite(a.grad).all()
    assert torch.isfinite(manifold.k.grad).all() 
Example #28
Source File: train_loop.py    From fast-reid with Apache License 2.0 5 votes vote down vote up
def _detect_anomaly(self, losses, loss_dict):
        if not torch.isfinite(losses).all():
            raise FloatingPointError(
                "Loss became infinite or NaN at iteration={}!\nloss_dict = {}".format(
                    self.iter, loss_dict
                )
            ) 
Example #29
Source File: heuristics_gpu.py    From baal with Apache License 2.0 5 votes vote down vote up
def get_uncertainties(self, predictions):
        """Get the uncertainties"""
        scores = self.compute_score(predictions)
        scores = self.reduction(scores)
        scores[~torch.isfinite(scores)] = 0.0 if self.reversed else 10000
        return scores 
Example #30
Source File: utils.py    From torch-kalman with MIT License 5 votes vote down vote up
def tobit_probs(mean: Tensor,
                cov: Tensor,
                lower: Optional[Tensor] = None,
                upper: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
    # CDF not well behaved at tails, truncate
    clamp = lambda z: torch.clamp(z, -5., 5.)

    if upper is None:
        upper = torch.empty_like(mean)
        upper[:] = float('inf')
    if lower is None:
        lower = torch.empty_like(mean)
        lower[:] = float('-inf')

    std = torch.diagonal(cov, dim1=-2, dim2=-1)
    probs_up = torch.zeros_like(mean)
    is_cens_up = torch.isfinite(upper)
    upper_z = (upper[is_cens_up] - mean[is_cens_up]) / std[is_cens_up]
    probs_up[is_cens_up] = 1. - std_normal.cdf(clamp(upper_z))

    probs_lo = torch.zeros_like(mean)
    is_cens_lo = torch.isfinite(lower)
    lower_z = (lower[is_cens_lo] - mean[is_cens_lo]) / std[is_cens_lo]
    probs_lo[is_cens_lo] = std_normal.cdf(clamp(lower_z))

    return probs_lo, probs_up