Python torch.double() Examples

The following are 30 code examples of torch.double(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: test_aev.py    From torchani with MIT License 6 votes vote down vote up
def testPBCConnersSeeEachOther(self):
        species = torch.tensor([[0, 0]])
        cell = torch.eye(3, dtype=torch.double) * 10
        pbc = torch.ones(3, dtype=torch.bool)
        allshifts = torchani.aev.compute_shifts(cell, pbc, 1)

        xyz1 = torch.tensor([0.1, 0.1, 0.1])
        xyz2s = [
            torch.tensor([9.9, 0.0, 0.0]),
            torch.tensor([0.0, 9.9, 0.0]),
            torch.tensor([0.0, 0.0, 9.9]),
            torch.tensor([9.9, 9.9, 0.0]),
            torch.tensor([0.0, 9.9, 9.9]),
            torch.tensor([9.9, 0.0, 9.9]),
            torch.tensor([9.9, 9.9, 9.9]),
        ]

        for xyz2 in xyz2s:
            coordinates = torch.stack([xyz1, xyz2]).to(torch.double).unsqueeze(0)
            atom_index12, _ = torchani.aev.neighbor_pairs(species == -1, coordinates, cell, allshifts, 1)
            atom_index1, atom_index2 = atom_index12.unbind(0)
            self.assertEqual(atom_index1.tolist(), [0])
            self.assertEqual(atom_index2.tolist(), [1]) 
Example #2
Source File: train.py    From examples with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def forward(self, input, future = 0):
        outputs = []
        h_t = torch.zeros(input.size(0), 51, dtype=torch.double)
        c_t = torch.zeros(input.size(0), 51, dtype=torch.double)
        h_t2 = torch.zeros(input.size(0), 51, dtype=torch.double)
        c_t2 = torch.zeros(input.size(0), 51, dtype=torch.double)

        for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
            h_t, c_t = self.lstm1(input_t, (h_t, c_t))
            h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
            output = self.linear(h_t2)
            outputs += [output]
        for i in range(future):# if we should predict the future
            h_t, c_t = self.lstm1(output, (h_t, c_t))
            h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
            output = self.linear(h_t2)
            outputs += [output]
        outputs = torch.stack(outputs, 1).squeeze(2)
        return outputs 
Example #3
Source File: test_multitask_multivariate_normal.py    From gpytorch with MIT License 6 votes vote down vote up
def test_log_prob(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            mean = torch.randn(4, 3, device=device, dtype=dtype)
            var = torch.randn(12, device=device, dtype=dtype).abs_()
            values = mean + 0.5
            diffs = (values - mean).view(-1)

            res = MultitaskMultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values)
            actual = -0.5 * (math.log(math.pi * 2) * 12 + var.log().sum() + (diffs / var * diffs).sum())
            self.assertLess((res - actual).div(res).abs().item(), 1e-2)

            mean = torch.randn(3, 4, 3, device=device, dtype=dtype)
            var = torch.randn(3, 12, device=device, dtype=dtype).abs_()
            values = mean + 0.5
            diffs = (values - mean).view(3, -1)

            res = MultitaskMultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values)
            actual = -0.5 * (math.log(math.pi * 2) * 12 + var.log().sum(-1) + (diffs / var * diffs).sum(-1))
            self.assertLess((res - actual).div(res).abs().norm(), 1e-2) 
Example #4
Source File: lazy_tensor.py    From gpytorch with MIT License 6 votes vote down vote up
def double(self, device_id=None):
        """
        This method operates identically to :func:`torch.Tensor.double`.
        """
        new_args = []
        new_kwargs = {}
        for arg in self._args:
            if hasattr(arg, "double"):
                new_args.append(arg.double())
            else:
                new_args.append(arg)
        for name, val in self._kwargs.items():
            if hasattr(val, "double"):
                new_kwargs[name] = val.double()
            else:
                new_kwargs[name] = val
        return self.__class__(*new_args, **new_kwargs) 
Example #5
Source File: test_multivariate_normal.py    From gpytorch with MIT License 6 votes vote down vote up
def test_log_prob(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            mean = torch.randn(4, device=device, dtype=dtype)
            var = torch.randn(4, device=device, dtype=dtype).abs_()
            values = torch.randn(4, device=device, dtype=dtype)

            res = MultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values)
            actual = TMultivariateNormal(mean, torch.eye(4, device=device, dtype=dtype) * var).log_prob(values)
            self.assertLess((res - actual).div(res).abs().item(), 1e-2)

            mean = torch.randn(3, 4, device=device, dtype=dtype)
            var = torch.randn(3, 4, device=device, dtype=dtype).abs_()
            values = torch.randn(3, 4, device=device, dtype=dtype)

            res = MultivariateNormal(mean, DiagLazyTensor(var)).log_prob(values)
            actual = TMultivariateNormal(
                mean, var.unsqueeze(-1) * torch.eye(4, device=device, dtype=dtype).repeat(3, 1, 1)
            ).log_prob(values)
            self.assertLess((res - actual).div(res).abs().norm(), 1e-2) 
Example #6
Source File: trainer.py    From madminer with MIT License 6 votes vote down vote up
def __init__(self, model, run_on_gpu=True, double_precision=False, n_workers=8):
        self._init_timer()
        self._timer(start="ALL")
        self._timer(start="initialize model")
        self.model = model
        self.run_on_gpu = run_on_gpu and torch.cuda.is_available()
        self.device = torch.device("cuda" if self.run_on_gpu else "cpu")
        self.dtype = torch.double if double_precision else torch.float
        self.n_workers = n_workers

        self.model = self.model.to(self.device, self.dtype)

        logger.info(
            "Training on %s with %s precision",
            "GPU" if self.run_on_gpu else "CPU",
            "double" if double_precision else "single",
        )

        self._timer(stop="initialize model")
        self._timer(stop="ALL") 
Example #7
Source File: experience_replay_tests.py    From cherry with Apache License 2.0 6 votes vote down vote up
def test_replay_myattr(self):
        standard_replay = self.replay
        vector = np.random.rand(VECTOR_SIZE)

        # a random tensor to be stuffed in
        test_tensor = th.randn(3, 3, dtype=th.double)

        # initialization, stuff just tensors in
        # and the results type should still be tensor
        for i in range(NUM_SAMPLES):
            standard_replay.append(vector,
                                   vector,
                                   i,
                                   vector,
                                   False,
                                   test=test_tensor)
        self.assertTrue(isinstance(standard_replay.test(), th.Tensor)) 
Example #8
Source File: gan.py    From torchsupport with MIT License 6 votes vote down vote up
def _mix_on_path(real, fake):
  result = None
  if isinstance(real, (list, tuple)):
    result = [
      _mix_on_path(real_part, fake_part)
      for real_part, fake_part in zip(real, fake)
    ]
  elif isinstance(real, dict):
    result = {
      key: _mix_on_path(real[key], fake[key])
      for key in real
    }
  elif isinstance(real, torch.Tensor):
    if real.dtype in (torch.half, torch.float, torch.double):
      result = _mix_on_path_aux(real, fake)
    else:
      result = random.choice([real, fake])
  else:
    result = random.choice([real, fake])
  return result 
Example #9
Source File: solve_for_calibration_params.py    From pyrobot with MIT License 6 votes vote down vote up
def reprojection_error(
    t_pts_2d, pts_2d_observed, min_inlier_fraction, inlier_pixel_threshold, mask=None
):
    """Computes re-projection error for observed and projected points."""
    n_pts = t_pts_2d.shape[0]
    err_all = t_pts_2d - pts_2d_observed
    err_all = err_all ** 2
    err_all = err_all.sum(2)
    topk_k = int(4 * n_pts * min_inlier_fraction)
    topk, _ = torch.topk(err_all.view(-1), k=topk_k, largest=False)
    in_px_thresh_pyt = torch.from_numpy(np.array([inlier_pixel_threshold ** 2]))
    in_px_thresh_pyt = in_px_thresh_pyt.double()
    topk = torch.max(topk[-1], in_px_thresh_pyt)
    err_all_robust = torch.min(topk, err_all)

    if mask is not None:
        err_all_robust = err_all_robust * mask
        err = err_all_robust.sum() / mask.sum()
    else:
        err = err_all_robust.mean()
    err_all = torch.sqrt(err_all)
    return err, err_all, topk 
Example #10
Source File: test_utils.py    From few-shot with MIT License 6 votes vote down vote up
def test_no_nans_on_zero_vectors(self):
        """Cosine distance calculation involves a divide-through by vector magnitude which
        can divide by zeros to occur.
        """
        # Create some dummy data with easily verifiable distances
        q = 1  # 1 query per class
        k = 3  # 3 way classification
        d = 2  # embedding dimension of two
        query = torch.zeros([q * k, d], dtype=torch.double)
        query[0] = torch.Tensor([0, 0])  # First query sample is all zeros
        query[1] = torch.Tensor([0, 1])
        query[2] = torch.Tensor([1, 1])
        support = torch.zeros([k, d], dtype=torch.double)
        support[0] = torch.Tensor([1, 1])
        support[1] = torch.Tensor([-1, -1])
        support[2] = torch.Tensor([0, 0])  # Third support sample is all zeros

        distances = pairwise_distances(query, support, 'cosine')

        self.assertTrue(torch.isnan(distances).sum() == 0, 'Cosine distances between 0-vectors should not be nan') 
Example #11
Source File: test_aev.py    From torchani with MIT License 6 votes vote down vote up
def testPBCSurfaceSeeEachOther(self):
        cell = torch.eye(3, dtype=torch.double) * 10
        pbc = torch.ones(3, dtype=torch.bool)
        allshifts = torchani.aev.compute_shifts(cell, pbc, 1)
        species = torch.tensor([[0, 0]])

        for i in range(3):
            xyz1 = torch.tensor([5.0, 5.0, 5.0], dtype=torch.double)
            xyz1[i] = 0.1
            xyz2 = xyz1.clone()
            xyz2[i] = 9.9

            coordinates = torch.stack([xyz1, xyz2]).unsqueeze(0)
            atom_index12, _ = torchani.aev.neighbor_pairs(species == -1, coordinates, cell, allshifts, 1)
            atom_index1, atom_index2 = atom_index12.unbind(0)
            self.assertEqual(atom_index1.tolist(), [0])
            self.assertEqual(atom_index2.tolist(), [1]) 
Example #12
Source File: test_aev.py    From torchani with MIT License 6 votes vote down vote up
def testPBCEdgesSeeEachOther(self):
        cell = torch.eye(3, dtype=torch.double) * 10
        pbc = torch.ones(3, dtype=torch.bool)
        allshifts = torchani.aev.compute_shifts(cell, pbc, 1)
        species = torch.tensor([[0, 0]])

        for i, j in itertools.combinations(range(3), 2):
            xyz1 = torch.tensor([5.0, 5.0, 5.0], dtype=torch.double)
            xyz1[i] = 0.1
            xyz1[j] = 0.1
            for new_i, new_j in [[0.1, 9.9], [9.9, 0.1], [9.9, 9.9]]:
                xyz2 = xyz1.clone()
                xyz2[i] = new_i
                xyz2[j] = new_j

            coordinates = torch.stack([xyz1, xyz2]).unsqueeze(0)
            atom_index12, _ = torchani.aev.neighbor_pairs(species == -1, coordinates, cell, allshifts, 1)
            atom_index1, atom_index2 = atom_index12.unbind(0)
            self.assertEqual(atom_index1.tolist(), [0])
            self.assertEqual(atom_index2.tolist(), [1]) 
Example #13
Source File: solve_for_calibration_params.py    From pyrobot with MIT License 6 votes vote down vote up
def get_transforms_to_optimize(chain, to_optimize_quat, to_optimize_trans, device):
    """Returns pytorch tensors to optimize along the chain as per
    to_optimize_trans and to_optimize_quat."""
    opt_pyt_trans, opt_pyt_quat = [], []
    for i in range(len(chain) - 1):
        t = None
        q = None
        if chain[i + 1] in to_optimize_trans:
            t = torch.zeros(1, 3, device=device, dtype=torch.double, requires_grad=True)
        if chain[i + 1] in to_optimize_quat:
            qxyz = torch.zeros(
                1, 3, device=device, dtype=torch.double, requires_grad=True
            )
            qw = torch.ones(1, 1, device=device, dtype=torch.double, requires_grad=True)
            q = [qw, qxyz]
        opt_pyt_trans.append(t)
        opt_pyt_quat.append(q)
    return opt_pyt_quat, opt_pyt_trans 
Example #14
Source File: test_aev.py    From torchani with MIT License 6 votes vote down vote up
def setUp(self):
        self.eps = 1e-9
        cell = ase.geometry.cellpar_to_cell([100, 100, 100 * math.sqrt(2), 90, 45, 90])
        self.cell = torch.tensor(ase.geometry.complete_cell(cell), dtype=torch.double)
        self.inv_cell = torch.inverse(self.cell)
        self.coordinates = torch.tensor([[[0.0, 0.0, 0.0],
                                          [1.0, -0.1, -0.1],
                                          [-0.1, 1.0, -0.1],
                                          [-0.1, -0.1, 1.0],
                                          [-1.0, -1.0, -1.0]]], dtype=torch.double)
        self.species = torch.tensor([[1, 0, 0, 0, 0]])
        self.pbc = torch.ones(3, dtype=torch.bool)
        self.v1, self.v2, self.v3 = self.cell
        self.center_coordinates = self.coordinates + 0.5 * (self.v1 + self.v2 + self.v3)
        ani1x = torchani.models.ANI1x()
        self.aev_computer = ani1x.aev_computer.to(torch.double)
        _, self.aev = self.aev_computer((self.species, self.center_coordinates), cell=self.cell, pbc=self.pbc) 
Example #15
Source File: time_sequence_prediction.py    From chainer-compiler with MIT License 6 votes vote down vote up
def forward(self, input, future):
        outputs = []
        h_t  = torch.zeros(input.size(0), 51, dtype=torch.double)
        c_t  = torch.zeros(input.size(0), 51, dtype=torch.double)
        h_t2 = torch.zeros(input.size(0), 51, dtype=torch.double)
        c_t2 = torch.zeros(input.size(0), 51, dtype=torch.double)

        for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
            h_t, c_t = self.lstm1(input_t, (h_t, c_t))
            h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
            output = self.linear(h_t2)
            outputs += [output]
        for i in range(future):# if we should predict the future
            h_t, c_t = self.lstm1(output, (h_t, c_t))
            h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
            output = self.linear(h_t2)
            outputs += [output]
        # EDIT(momohatt): Add 'dim='
        outputs = torch.stack(outputs, dim=1).squeeze(dim=2)
        return outputs


# Example input 
Example #16
Source File: types.py    From chainer-compiler with MIT License 6 votes vote down vote up
def torch_dtype_to_np_dtype(dtype):
    dtype_dict = {
            torch.bool    : np.dtype(np.bool),
            torch.uint8   : np.dtype(np.uint8),
            torch.int8    : np.dtype(np.int8),
            torch.int16   : np.dtype(np.int16),
            torch.short   : np.dtype(np.int16),
            torch.int32   : np.dtype(np.int32),
            torch.int     : np.dtype(np.int32),
            torch.int64   : np.dtype(np.int64),
            torch.long    : np.dtype(np.int64),
            torch.float16 : np.dtype(np.float16),
            torch.half    : np.dtype(np.float16),
            torch.float32 : np.dtype(np.float32),
            torch.float   : np.dtype(np.float32),
            torch.float64 : np.dtype(np.float64),
            torch.double  : np.dtype(np.float64),
            }
    return dtype_dict[dtype]


# ---------------------- InferenceEngine internal types ------------------------ 
Example #17
Source File: utils.py    From torchani with MIT License 6 votes vote down vote up
def sae(self, species):
        """Compute self energies for molecules.

        Padding atoms will be automatically excluded.

        Arguments:
            species (:class:`torch.Tensor`): Long tensor in shape
                ``(conformations, atoms)``.

        Returns:
            :class:`torch.Tensor`: 1D vector in shape ``(conformations,)``
            for molecular self energies.
        """
        intercept = 0.0
        if self.fit_intercept:
            intercept = self.self_energies[-1]

        self_energies = self.self_energies[species]
        self_energies[species == torch.tensor(-1, device=species.device)] = torch.tensor(0, device=species.device, dtype=torch.double)
        return self_energies.sum(dim=1) + intercept 
Example #18
Source File: test_multitask_multivariate_normal.py    From gpytorch with MIT License 5 votes vote down vote up
def test_multitask_multivariate_normal_exceptions(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([0, 1], device=device, dtype=dtype)
            covmat = torch.eye(2, device=device, dtype=dtype)
            with self.assertRaises(RuntimeError):
                MultitaskMultivariateNormal(mean=mean, covariance_matrix=covmat) 
Example #19
Source File: test_ase.py    From torchani with MIT License 5 votes vote down vote up
def setUp(self):
        self.model_pti = torchani.models.ANI1x(periodic_table_index=True).double()
        self.model = torchani.models.ANI1x().double() 
Example #20
Source File: test_multivariate_normal.py    From gpytorch with MIT License 5 votes vote down vote up
def test_multivariate_normal_lazy(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([0, 1, 2], device=device, dtype=dtype)
            covmat = torch.diag(torch.tensor([1, 0.75, 1.5], device=device, dtype=dtype))
            covmat_chol = torch.cholesky(covmat)
            mvn = MultivariateNormal(mean=mean, covariance_matrix=NonLazyTensor(covmat))
            self.assertTrue(torch.is_tensor(mvn.covariance_matrix))
            self.assertIsInstance(mvn.lazy_covariance_matrix, LazyTensor)
            self.assertAllClose(mvn.variance, torch.diag(covmat))
            self.assertAllClose(mvn.covariance_matrix, covmat)
            self.assertAllClose(mvn._unbroadcasted_scale_tril, covmat_chol)
            mvn_plus1 = mvn + 1
            self.assertAllClose(mvn_plus1.mean, mvn.mean + 1)
            self.assertAllClose(mvn_plus1.covariance_matrix, mvn.covariance_matrix)
            self.assertAllClose(mvn_plus1._unbroadcasted_scale_tril, covmat_chol)
            mvn_times2 = mvn * 2
            self.assertAllClose(mvn_times2.mean, mvn.mean * 2)
            self.assertAllClose(mvn_times2.covariance_matrix, mvn.covariance_matrix * 4)
            self.assertAllClose(mvn_times2._unbroadcasted_scale_tril, covmat_chol * 2)
            mvn_divby2 = mvn / 2
            self.assertAllClose(mvn_divby2.mean, mvn.mean / 2)
            self.assertAllClose(mvn_divby2.covariance_matrix, mvn.covariance_matrix / 4)
            self.assertAllClose(mvn_divby2._unbroadcasted_scale_tril, covmat_chol / 2)
            # TODO: Add tests for entropy, log_prob, etc. - this an issue b/c it
            # uses using root_decomposition which is not very reliable
            # self.assertAlmostEqual(mvn.entropy().item(), 4.3157, places=4)
            # self.assertAlmostEqual(mvn.log_prob(torch.zeros(3)).item(), -4.8157, places=4)
            # self.assertTrue(
            #     torch.allclose(
            #         mvn.log_prob(torch.zeros(2, 3)), -4.8157 * torch.ones(2))
            #     )
            # )
            conf_lower, conf_upper = mvn.confidence_region()
            self.assertAllClose(conf_lower, mvn.mean - 2 * mvn.stddev)
            self.assertAllClose(conf_upper, mvn.mean + 2 * mvn.stddev)
            self.assertTrue(mvn.sample().shape == torch.Size([3]))
            self.assertTrue(mvn.sample(torch.Size([2])).shape == torch.Size([2, 3]))
            self.assertTrue(mvn.sample(torch.Size([2, 4])).shape == torch.Size([2, 4, 3])) 
Example #21
Source File: test_grad.py    From torchani with MIT License 5 votes vote down vote up
def setUp(self):
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')

        self.model = torchani.models.ANI1x(model_index=0).to(device=self.device,
                                                             dtype=torch.double)
        datafile = os.path.join(path, 'test_data/NIST/all')

        # Some small molecules are selected to make the tests faster
        self.data = pickle.load(open(datafile, 'rb'))[1243:1250] 
Example #22
Source File: test_multivariate_normal.py    From gpytorch with MIT License 5 votes vote down vote up
def test_multivariate_normal_non_lazy(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([0, 1, 2], device=device, dtype=dtype)
            covmat = torch.diag(torch.tensor([1, 0.75, 1.5], device=device, dtype=dtype))
            mvn = MultivariateNormal(mean=mean, covariance_matrix=covmat, validate_args=True)
            self.assertTrue(torch.is_tensor(mvn.covariance_matrix))
            self.assertIsInstance(mvn.lazy_covariance_matrix, LazyTensor)
            self.assertAllClose(mvn.variance, torch.diag(covmat))
            self.assertAllClose(mvn.scale_tril, covmat.sqrt())
            mvn_plus1 = mvn + 1
            self.assertAllClose(mvn_plus1.mean, mvn.mean + 1)
            self.assertAllClose(mvn_plus1.covariance_matrix, mvn.covariance_matrix)
            mvn_times2 = mvn * 2
            self.assertAllClose(mvn_times2.mean, mvn.mean * 2)
            self.assertAllClose(mvn_times2.covariance_matrix, mvn.covariance_matrix * 4)
            mvn_divby2 = mvn / 2
            self.assertAllClose(mvn_divby2.mean, mvn.mean / 2)
            self.assertAllClose(mvn_divby2.covariance_matrix, mvn.covariance_matrix / 4)
            self.assertAlmostEqual(mvn.entropy().item(), 4.3157, places=4)
            self.assertAlmostEqual(mvn.log_prob(torch.zeros(3, device=device, dtype=dtype)).item(), -4.8157, places=4)
            logprob = mvn.log_prob(torch.zeros(2, 3, device=device, dtype=dtype))
            logprob_expected = torch.tensor([-4.8157, -4.8157], device=device, dtype=dtype)
            self.assertAllClose(logprob, logprob_expected)
            conf_lower, conf_upper = mvn.confidence_region()
            self.assertAllClose(conf_lower, mvn.mean - 2 * mvn.stddev)
            self.assertAllClose(conf_upper, mvn.mean + 2 * mvn.stddev)
            self.assertTrue(mvn.sample().shape == torch.Size([3]))
            self.assertTrue(mvn.sample(torch.Size([2])).shape == torch.Size([2, 3]))
            self.assertTrue(mvn.sample(torch.Size([2, 4])).shape == torch.Size([2, 4, 3])) 
Example #23
Source File: test_multitask_multivariate_normal.py    From gpytorch with MIT License 5 votes vote down vote up
def test_from_independent_mvns(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            # Test non-batch mode mvns
            n_tasks = 2
            n = 4
            mvns = [
                MultivariateNormal(
                    mean=torch.randn(4, device=device, dtype=dtype),
                    covariance_matrix=DiagLazyTensor(torch.randn(n, device=device, dtype=dtype).abs_()),
                )
                for i in range(n_tasks)
            ]
            mvn = MultitaskMultivariateNormal.from_independent_mvns(mvns=mvns)
            expected_mean_shape = [n, n_tasks]
            expected_covar_shape = [n * n_tasks] * 2
            self.assertEqual(list(mvn.mean.shape), expected_mean_shape)
            self.assertEqual(list(mvn.covariance_matrix.shape), expected_covar_shape)

            # Test batch mode mvns
            b = 3
            mvns = [
                MultivariateNormal(
                    mean=torch.randn(b, n, device=device, dtype=dtype),
                    covariance_matrix=DiagLazyTensor(torch.randn(b, n, device=device, dtype=dtype).abs_()),
                )
                for i in range(n_tasks)
            ]
            mvn = MultitaskMultivariateNormal.from_independent_mvns(mvns=mvns)
            self.assertEqual(list(mvn.mean.shape), [b] + expected_mean_shape)
            self.assertEqual(list(mvn.covariance_matrix.shape), [b] + expected_covar_shape) 
Example #24
Source File: test_multitask_multivariate_normal.py    From gpytorch with MIT License 5 votes vote down vote up
def test_multivariate_normal_batch_correlated_samples(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([[0, 1], [2, 3], [4, 5]], dtype=dtype, device=device).repeat(2, 1, 1)
            variance = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dtype, device=device).repeat(2, 1, 1)
            covmat = variance.view(2, 1, -1) * torch.eye(6, device=device, dtype=dtype)
            mtmvn = MultitaskMultivariateNormal(mean=mean, covariance_matrix=covmat)
            base_samples = mtmvn.get_base_samples(torch.Size((3, 4)))
            self.assertTrue(mtmvn.sample(base_samples=base_samples).shape == torch.Size([3, 4, 2, 3, 2]))
            base_samples = mtmvn.get_base_samples()
            self.assertTrue(mtmvn.sample(base_samples=base_samples).shape == torch.Size([2, 3, 2])) 
Example #25
Source File: test_multitask_multivariate_normal.py    From gpytorch with MIT License 5 votes vote down vote up
def test_multivariate_normal_correlated_samples(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([[0, 1], [2, 3], [4, 5]], dtype=dtype, device=device)
            variance = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dtype, device=device)
            covmat = variance.view(-1).diag()
            mtmvn = MultitaskMultivariateNormal(mean=mean, covariance_matrix=covmat)
            base_samples = mtmvn.get_base_samples(torch.Size([3, 4]))
            self.assertTrue(mtmvn.sample(base_samples=base_samples).shape == torch.Size([3, 4, 3, 2]))
            base_samples = mtmvn.get_base_samples()
            self.assertTrue(mtmvn.sample(base_samples=base_samples).shape == torch.Size([3, 2])) 
Example #26
Source File: lazy_tensor.py    From gpytorch with MIT License 5 votes vote down vote up
def requires_grad(self, val):
        for arg in self._args:
            if hasattr(arg, "requires_grad"):
                if arg.dtype in (torch.float, torch.double, torch.half):
                    arg.requires_grad = val
        for arg in self._kwargs.values():
            if hasattr(arg, "requires_grad"):
                arg.requires_grad = val 
Example #27
Source File: test_gaussian_likelihood.py    From gpytorch with MIT License 5 votes vote down vote up
def test_fixed_noise_gaussian_likelihood(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            noise = 0.1 + torch.rand(4, device=device, dtype=dtype)
            lkhd = FixedNoiseGaussianLikelihood(noise=noise)
            # test basics
            self.assertIsInstance(lkhd.noise_covar, FixedGaussianNoise)
            self.assertTrue(torch.equal(noise, lkhd.noise))
            new_noise = 0.1 + torch.rand(4, device=device, dtype=dtype)
            lkhd.noise = new_noise
            self.assertTrue(torch.equal(lkhd.noise, new_noise))
            # test __call__
            mean = torch.zeros(4, device=device, dtype=dtype)
            covar = DiagLazyTensor(torch.ones(4, device=device, dtype=dtype))
            mvn = MultivariateNormal(mean, covar)
            out = lkhd(mvn)
            self.assertTrue(torch.allclose(out.variance, 1 + new_noise))
            # things should break if dimensions mismatch
            mean = torch.zeros(5, device=device, dtype=dtype)
            covar = DiagLazyTensor(torch.ones(5, device=device, dtype=dtype))
            mvn = MultivariateNormal(mean, covar)
            with self.assertWarns(UserWarning):
                lkhd(mvn)
            # test __call__ w/ observation noise
            obs_noise = 0.1 + torch.rand(5, device=device, dtype=dtype)
            out = lkhd(mvn, noise=obs_noise)
            self.assertTrue(torch.allclose(out.variance, 1 + obs_noise)) 
Example #28
Source File: test_cholesky.py    From gpytorch with MIT License 5 votes vote down vote up
def test_psd_safe_cholesky_psd(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            for batch_mode in (False, True):
                if batch_mode:
                    A = self._gen_test_psd().to(device=device, dtype=dtype)
                else:
                    A = self._gen_test_psd()[0].to(device=device, dtype=dtype)
                idx = torch.arange(A.shape[-1], device=A.device)
                # default values
                Aprime = A.clone()
                Aprime[..., idx, idx] += 1e-6 if A.dtype == torch.float32 else 1e-8
                L_exp = torch.cholesky(Aprime)
                with warnings.catch_warnings(record=True) as w:
                    # Makes sure warnings we catch don't cause `-w error` to fail
                    warnings.simplefilter("always", NumericalWarning)

                    L_safe = psd_safe_cholesky(A)
                    self.assertTrue(any(issubclass(w_.category, NumericalWarning) for w_ in w))
                    self.assertTrue(any("A not p.d., added jitter" in str(w_.message) for w_ in w))
                self.assertTrue(torch.allclose(L_exp, L_safe))
                # user-defined value
                Aprime = A.clone()
                Aprime[..., idx, idx] += 1e-2
                L_exp = torch.cholesky(Aprime)
                with warnings.catch_warnings(record=True) as w:
                    # Makes sure warnings we catch don't cause `-w error` to fail
                    warnings.simplefilter("always", NumericalWarning)

                    L_safe = psd_safe_cholesky(A, jitter=1e-2)
                    self.assertTrue(any(issubclass(w_.category, NumericalWarning) for w_ in w))
                    self.assertTrue(any("A not p.d., added jitter" in str(w_.message) for w_ in w))
                self.assertTrue(torch.allclose(L_exp, L_safe)) 
Example #29
Source File: test_cholesky.py    From gpytorch with MIT License 5 votes vote down vote up
def test_psd_safe_cholesky_pd(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            for batch_mode in (False, True):
                if batch_mode:
                    A = self._gen_test_psd().to(device=device, dtype=dtype)
                    D = torch.eye(2).type_as(A).unsqueeze(0).repeat(2, 1, 1)
                else:
                    A = self._gen_test_psd()[0].to(device=device, dtype=dtype)
                    D = torch.eye(2).type_as(A)
                A += D
                # basic
                L = torch.cholesky(A)
                L_safe = psd_safe_cholesky(A)
                self.assertTrue(torch.allclose(L, L_safe))
                # upper
                L = torch.cholesky(A, upper=True)
                L_safe = psd_safe_cholesky(A, upper=True)
                self.assertTrue(torch.allclose(L, L_safe))
                # output tensors
                L = torch.empty_like(A)
                L_safe = torch.empty_like(A)
                torch.cholesky(A, out=L)
                psd_safe_cholesky(A, out=L_safe)
                self.assertTrue(torch.allclose(L, L_safe))
                # output tensors, upper
                torch.cholesky(A, upper=True, out=L)
                psd_safe_cholesky(A, upper=True, out=L_safe)
                self.assertTrue(torch.allclose(L, L_safe))
                # make sure jitter doesn't do anything if p.d.
                L = torch.cholesky(A)
                L_safe = psd_safe_cholesky(A, jitter=1e-2)
                self.assertTrue(torch.allclose(L, L_safe)) 
Example #30
Source File: algorithmic.py    From spectre with Apache License 2.0 5 votes vote down vote up
def __init__(self, keys: torch.Tensor):
        n = keys.shape[0]
        # sort by key (keep key in GPU device)
        relative_key = keys + torch.linspace(0, 0.9, n, dtype=torch.double, device=keys.device)
        sorted_keys, sorted_indices = torch.sort(relative_key)
        sorted_keys, sorted_indices = sorted_keys.int(), sorted_indices.cpu()
        # get group boundary
        diff = sorted_keys[1:] - sorted_keys[:-1]
        boundary = (diff.nonzero(as_tuple=True)[0] + 1).tolist()
        boundary = np.array([0] + boundary + [n])
        # get inverse indices
        width = np.diff(boundary).max()
        groups = len(boundary) - 1
        inverse_indices = sorted_indices.new_full((groups, width), n + 1)
        for start, end, i in zip(boundary[:-1], boundary[1:], range(groups)):
            inverse_indices[i, 0:(end - start)] = sorted_indices[start:end]
        # keep inverse_indices in GPU for sort
        inverse_indices = inverse_indices.flatten().to(keys.device, non_blocking=True)
        inverse_indices = torch.sort(inverse_indices)[1][:n]
        # for fast split
        take_indices = sorted_indices.new_full((groups, width), -1)
        for start, end, i in zip(boundary[:-1], boundary[1:], range(groups)):
            take_indices[i, 0:(end - start)] = sorted_indices[start:end]
        take_indices = take_indices.to(keys.device, non_blocking=True)
        # class members
        self._boundary = boundary
        self._sorted_indices = take_indices
        self._padding_mask = take_indices == -1
        self._inverse_indices = inverse_indices
        self._width = width
        self._groups = groups
        self._data_shape = (groups, width)