Python torch.logdet() Examples
The following are 20
code examples of torch.logdet().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: glow.py From tn2-wg with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward(self, z, reverse=False): # shape batch_size, group_size, n_of_groups = z.size() W = self.conv.weight.squeeze() if reverse: if not hasattr(self, 'W_inverse'): # Reverse computation W_inverse = W.inverse() W_inverse = Variable(W_inverse[..., None]) if z.type() == 'torch.cuda.HalfTensor': W_inverse = W_inverse.half() self.W_inverse = W_inverse z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0) return z else: # Forward computation log_det_W = batch_size * n_of_groups * torch.logdet(W) z = self.conv(z) return z, log_det_W
Example #2
Source File: glow.py From Tacotron2-Mandarin with MIT License | 6 votes |
def forward(self, z, reverse=False): # shape batch_size, group_size, n_of_groups = z.size() W = self.conv.weight.squeeze() if reverse: if not hasattr(self, 'W_inverse'): # Reverse computation W_inverse = W.float().inverse() W_inverse = Variable(W_inverse[..., None]) if z.type() == 'torch.cuda.HalfTensor': W_inverse = W_inverse.half() self.W_inverse = W_inverse z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0) return z else: # Forward computation log_det_W = batch_size * n_of_groups * torch.logdet(W) z = self.conv(z) return z, log_det_W
Example #3
Source File: glow.py From fac-via-ppg with Apache License 2.0 | 6 votes |
def forward(self, z, reverse=False): # shape batch_size, group_size, n_of_groups = z.size() W = self.conv.weight.squeeze() if reverse: if not hasattr(self, 'W_inverse'): # Reverse computation W_inverse = W.inverse() W_inverse = Variable(W_inverse[..., None]) if z.type() == 'torch.cuda.HalfTensor': W_inverse = W_inverse.half() self.W_inverse = W_inverse z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0) return z else: # Forward computation log_det_W = batch_size * n_of_groups * torch.logdet(W) z = self.conv(z) return z, log_det_W
Example #4
Source File: glow.py From FastSpeech with MIT License | 6 votes |
def forward(self, z, reverse=False): # shape batch_size, group_size, n_of_groups = z.size() W = self.conv.weight.squeeze() if reverse: if not hasattr(self, 'W_inverse'): # Reverse computation W_inverse = W.inverse() W_inverse = Variable(W_inverse[..., None]) if z.type() == 'torch.cuda.HalfTensor': W_inverse = W_inverse.half() self.W_inverse = W_inverse z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0) return z else: # Forward computation log_det_W = batch_size * n_of_groups * torch.logdet(W) z = self.conv(z) return z, log_det_W
Example #5
Source File: glow.py From FastSpeech with MIT License | 6 votes |
def forward(self, z, reverse=False): # shape batch_size, group_size, n_of_groups = z.size() W = self.conv.weight.squeeze() if reverse: if not hasattr(self, 'W_inverse'): # Reverse computation W_inverse = W.float().inverse() W_inverse = Variable(W_inverse[..., None]) if z.type() == 'torch.cuda.HalfTensor': W_inverse = W_inverse.half() self.W_inverse = W_inverse z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0) return z else: # Forward computation log_det_W = batch_size * n_of_groups * torch.logdet(W) z = self.conv(z) return z, log_det_W
Example #6
Source File: waveglow.py From NeMo with Apache License 2.0 | 6 votes |
def forward(self, z, reverse: bool = False): # shape batch_size, group_size, n_of_groups = z.size() W = self.conv.weight.squeeze() if reverse: if not hasattr(self, 'W_inverse'): # Reverse computation W_inverse = W.float().inverse() W_inverse = Variable(W_inverse[..., None]) if z.dtype == torch.half: W_inverse = W_inverse.half() self.W_inverse = W_inverse z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0) return z else: # Forward computation log_det_W = batch_size * n_of_groups * torch.logdet(W.float()) z = self.conv(z) return ( z, log_det_W, )
Example #7
Source File: glow.py From waveglow with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward(self, z, reverse=False): # shape batch_size, group_size, n_of_groups = z.size() W = self.conv.weight.squeeze() if reverse: if not hasattr(self, 'W_inverse'): # Reverse computation W_inverse = W.float().inverse() W_inverse = Variable(W_inverse[..., None]) if z.type() == 'torch.cuda.HalfTensor': W_inverse = W_inverse.half() self.W_inverse = W_inverse z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0) return z else: # Forward computation log_det_W = batch_size * n_of_groups * torch.logdet(W) z = self.conv(z) return z, log_det_W
Example #8
Source File: glow.py From LightSpeech with MIT License | 6 votes |
def forward(self, z, reverse=False): # shape batch_size, group_size, n_of_groups = z.size() W = self.conv.weight.squeeze() if reverse: if not hasattr(self, 'W_inverse'): # Reverse computation W_inverse = W.inverse() W_inverse = Variable(W_inverse[..., None]) if z.type() == 'torch.cuda.HalfTensor': W_inverse = W_inverse.half() self.W_inverse = W_inverse z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0) return z else: # Forward computation log_det_W = batch_size * n_of_groups * torch.logdet(W) z = self.conv(z) return z, log_det_W
Example #9
Source File: glow.py From LightSpeech with MIT License | 6 votes |
def forward(self, z, reverse=False): # shape batch_size, group_size, n_of_groups = z.size() W = self.conv.weight.squeeze() if reverse: if not hasattr(self, 'W_inverse'): # Reverse computation W_inverse = W.float().inverse() W_inverse = Variable(W_inverse[..., None]) if z.type() == 'torch.cuda.HalfTensor': W_inverse = W_inverse.half() self.W_inverse = W_inverse z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0) return z else: # Forward computation log_det_W = batch_size * n_of_groups * torch.logdet(W) z = self.conv(z) return z, log_det_W
Example #10
Source File: test_cached_cg_lazy_tensor.py From gpytorch with MIT License | 6 votes |
def test_inv_quad_logdet_no_reduce(self): # Forward lazy_tensor = self.create_lazy_tensor(with_solves=True, with_logdet=True) evaluated = self.evaluate_lazy_tensor(lazy_tensor) flattened_evaluated = evaluated.view(-1, *lazy_tensor.matrix_shape) vecs = lazy_tensor.eager_rhss[0].clone().detach().requires_grad_(True) vecs_copy = lazy_tensor.eager_rhss[0].clone().detach().requires_grad_(True) with gpytorch.settings.num_trace_samples(128), warnings.catch_warnings(record=True) as ws: res_inv_quad, res_logdet = lazy_tensor.inv_quad_logdet( inv_quad_rhs=vecs, logdet=True, reduce_inv_quad=False ) self.assertFalse(any(issubclass(w.category, ExtraComputationWarning) for w in ws)) res = res_inv_quad.sum(-1) + res_logdet actual_inv_quad = evaluated.inverse().matmul(vecs_copy).mul(vecs_copy).sum(-2).sum(-1) actual_logdet = torch.cat( [torch.logdet(flattened_evaluated[i]).unsqueeze(0) for i in range(lazy_tensor.batch_shape.numel())] ).view(lazy_tensor.batch_shape) actual = actual_inv_quad + actual_logdet diff = (res - actual).abs() / actual.abs().clamp(1, math.inf) self.assertLess(diff.max().item(), 15e-2)
Example #11
Source File: test_cached_cg_lazy_tensor.py From gpytorch with MIT License | 6 votes |
def test_inv_quad_logdet(self): # Forward lazy_tensor = self.create_lazy_tensor(with_solves=True, with_logdet=True) evaluated = self.evaluate_lazy_tensor(lazy_tensor) flattened_evaluated = evaluated.view(-1, *lazy_tensor.matrix_shape) vecs = lazy_tensor.eager_rhss[0].clone().detach().requires_grad_(True) vecs_copy = lazy_tensor.eager_rhss[0].clone().detach().requires_grad_(True) with gpytorch.settings.num_trace_samples(128), warnings.catch_warnings(record=True) as ws: res_inv_quad, res_logdet = lazy_tensor.inv_quad_logdet(inv_quad_rhs=vecs, logdet=True) self.assertFalse(any(issubclass(w.category, ExtraComputationWarning) for w in ws)) res = res_inv_quad + res_logdet actual_inv_quad = evaluated.inverse().matmul(vecs_copy).mul(vecs_copy).sum(-2).sum(-1) actual_logdet = torch.cat( [torch.logdet(flattened_evaluated[i]).unsqueeze(0) for i in range(lazy_tensor.batch_shape.numel())] ).view(lazy_tensor.batch_shape) actual = actual_inv_quad + actual_logdet diff = (res - actual).abs() / actual.abs().clamp(1, math.inf) self.assertLess(diff.max().item(), 15e-2)
Example #12
Source File: lazy_tensor_test_case.py From gpytorch with MIT License | 5 votes |
def _test_inv_quad_logdet(self, reduce_inv_quad=True, cholesky=False): if not self.__class__.skip_slq_tests: # Forward lazy_tensor = self.create_lazy_tensor() evaluated = self.evaluate_lazy_tensor(lazy_tensor) flattened_evaluated = evaluated.view(-1, *lazy_tensor.matrix_shape) vecs = torch.randn(*lazy_tensor.batch_shape, lazy_tensor.size(-1), 3, requires_grad=True) vecs_copy = vecs.clone().detach_().requires_grad_(True) _wrapped_cg = MagicMock(wraps=gpytorch.utils.linear_cg) with patch("gpytorch.utils.linear_cg", new=_wrapped_cg) as linear_cg_mock: with gpytorch.settings.num_trace_samples(256), gpytorch.settings.max_cholesky_size( math.inf if cholesky else 0 ), gpytorch.settings.cg_tolerance(1e-5): res_inv_quad, res_logdet = lazy_tensor.inv_quad_logdet( inv_quad_rhs=vecs, logdet=True, reduce_inv_quad=reduce_inv_quad ) actual_inv_quad = evaluated.inverse().matmul(vecs_copy).mul(vecs_copy).sum(-2) if reduce_inv_quad: actual_inv_quad = actual_inv_quad.sum(-1) actual_logdet = torch.cat( [torch.logdet(flattened_evaluated[i]).unsqueeze(0) for i in range(lazy_tensor.batch_shape.numel())] ).view(lazy_tensor.batch_shape) self.assertAllClose(res_inv_quad, actual_inv_quad, rtol=0.01, atol=0.01) self.assertAllClose(res_logdet, actual_logdet, rtol=0.2, atol=0.03) if not cholesky and self.__class__.should_call_cg: self.assertTrue(linear_cg_mock.called) else: self.assertFalse(linear_cg_mock.called)
Example #13
Source File: wishart_prior.py From gpytorch with MIT License | 5 votes |
def __init__(self, nu, K, validate_args=False): TModule.__init__(self) if K.dim() < 2: raise ValueError("K must be at least 2-dimensional") n = K.shape[-1] if isinstance(nu, Number): nu = torch.tensor(float(nu)) if torch.any(nu <= 0): raise ValueError("Must have nu > 0") self.n = torch.tensor(n, dtype=torch.long, device=nu.device) batch_shape = nu.shape event_shape = torch.Size([n, n]) # normalization constant c = (nu + n - 1) / 2 logdetK = torch.logdet(K) C = c * (logdetK - n * math.log(2)) - torch.mvlgamma(c, n) # need to assign values before registering as buffers to make argument validation work self.nu = nu self.K = K self.C = C super(InverseWishartPrior, self).__init__(batch_shape, event_shape, validate_args=validate_args) # now need to delete to be able to register buffer del self.nu, self.K, self.C self.register_buffer("nu", nu) self.register_buffer("K", K) self.register_buffer("C", C)
Example #14
Source File: wishart_prior.py From gpytorch with MIT License | 5 votes |
def log_prob(self, X): # I'm sure this could be done more elegantly logdetp = torch.logdet(X) Kinvp = torch.matmul(self.K_inv, X) trKinvp = torch.diagonal(Kinvp, dim1=-2, dim2=-1).sum(-1) return self.C + 0.5 * (self.nu - self.n - 1) * logdetp - trKinvp
Example #15
Source File: rmi_utils.py From RMI with MIT License | 5 votes |
def log_det_by_cholesky_test(): """ test for function log_det_by_cholesky() """ a = torch.randn(1, 4, 4) a = torch.matmul(a, a.transpose(2, 1)) print(a) res_1 = torch.logdet(torch.squeeze(a)) res_2 = log_det_by_cholesky(a) print(res_1, res_2)
Example #16
Source File: wishart_prior.py From gpytorch with MIT License | 5 votes |
def __init__(self, nu, K, validate_args=False): TModule.__init__(self) if K.dim() < 2: raise ValueError("K must be at least 2-dimensional") n = K.shape[-1] if K.shape[-2] != K.shape[-1]: raise ValueError("K must be square") if isinstance(nu, Number): nu = torch.tensor(float(nu)) if torch.any(nu <= n): raise ValueError("Must have nu > n - 1") self.n = torch.tensor(n, dtype=torch.long, device=nu.device) batch_shape = nu.shape event_shape = torch.Size([n, n]) # normalization constant logdetK = torch.logdet(K) C = -(nu / 2) * (logdetK + n * math.log(2)) - torch.mvlgamma(nu / 2, n) K_inv = torch.inverse(K) # need to assign values before registering as buffers to make argument validation work self.nu = nu self.K_inv = K_inv self.C = C super(WishartPrior, self).__init__(batch_shape, event_shape, validate_args=validate_args) # now need to delete to be able to register buffer del self.nu, self.K_inv, self.C self.register_buffer("nu", nu) self.register_buffer("K_inv", K_inv) self.register_buffer("C", C)
Example #17
Source File: loss.py From SceneChangeDet with MIT License | 5 votes |
def forward(self): constrainted_matrix = self.select_param() matrix_ = torch.squeeze(torch.squeeze(constrainted_matrix,dim=2),dim=2) matrix_t = torch.t(matrix_) matrixs = torch.mm(matrix_t,matrix_) trace_ = torch.trace(torch.mm(matrixs,torch.inverse(matrixs))) log_det = torch.logdet(matrixs) maha_loss = trace_ - log_det return maha_loss
Example #18
Source File: pairwise_gp.py From botorch with MIT License | 5 votes |
def forward(self, post: Posterior, comp: Tensor) -> Tensor: r"""Calculate approximated log evidence, i.e., log(P(D|theta)) Args: post: training posterior distribution from self.model comp: Comparisons pairs, see PairwiseGP.__init__ for more details Returns: The approximated evidence, i.e., the marginal log likelihood """ model = self.model if comp is not model.comparisons: raise RuntimeError("Must train on training data") f_max = post.mean log_posterior = model._posterior_f(f_max) part1 = -log_posterior part2 = model.covar @ model.likelihood_hess eye = torch.eye(part2.size(-1)).expand(part2.shape) part2 = part2 + eye part2 = -0.5 * torch.logdet(part2) evidence = part1 + part2 # Sum up mll first so that when adding prior probs it won't # propagate and double count evidence = evidence.sum() # Add log probs of priors on the (functions of) parameters for _, prior, closure, _ in self.named_priors(): evidence = evidence.add(prior.log_prob(closure()).sum()) return evidence
Example #19
Source File: torchutils.py From nsf with MIT License | 5 votes |
def logabsdet(x): """Returns the log absolute determinant of square matrix x.""" # Note: torch.logdet() only works for positive determinant. _, res = torch.slogdet(x) return res
Example #20
Source File: matrix_utils.py From invertible-resnet with MIT License | 5 votes |
def log_det_other(x): return torch.logdet(x)