Python scipy.linalg.cholesky() Examples

The following are 30 code examples of scipy.linalg.cholesky(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.linalg , or try the search function .
Example #1
Source File: lobpcg.py    From GraphicDesignPatternByPython with MIT License 7 votes vote down vote up
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
    if blockVectorBV is None:
        if B is not None:
            blockVectorBV = B(blockVectorV)
        else:
            blockVectorBV = blockVectorV  # Shared data!!!
    gramVBV = np.dot(blockVectorV.T.conj(), blockVectorBV)
    gramVBV = cholesky(gramVBV)
    gramVBV = inv(gramVBV, overwrite_a=True)
    # gramVBV is now R^{-1}.
    blockVectorV = np.dot(blockVectorV, gramVBV)
    if B is not None:
        blockVectorBV = np.dot(blockVectorBV, gramVBV)

    if retInvR:
        return blockVectorV, blockVectorBV, gramVBV
    else:
        return blockVectorV, blockVectorBV 
Example #2
Source File: lobpcg.py    From lambda-packs with MIT License 7 votes vote down vote up
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
    if blockVectorBV is None:
        if B is not None:
            blockVectorBV = B(blockVectorV)
        else:
            blockVectorBV = blockVectorV  # Shared data!!!
    gramVBV = np.dot(blockVectorV.T, blockVectorBV)
    gramVBV = cholesky(gramVBV)
    gramVBV = inv(gramVBV, overwrite_a=True)
    # gramVBV is now R^{-1}.
    blockVectorV = np.dot(blockVectorV, gramVBV)
    if B is not None:
        blockVectorBV = np.dot(blockVectorBV, gramVBV)

    if retInvR:
        return blockVectorV, blockVectorBV, gramVBV
    else:
        return blockVectorV, blockVectorBV 
Example #3
Source File: mmd_test.py    From opt-mmd with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def linear_hotelling_test(X, Y, reg=0):
    n, p = X.shape
    Z = X - Y
    Z_bar = Z.mean(axis=0)

    Z -= Z_bar
    S = Z.T.dot(Z)
    S /= (n - 1)
    if reg:
        S[::p + 1] += reg
    # z' inv(S) z = z' inv(L L') z = z' inv(L)' inv(L) z = ||inv(L) z||^2
    L = linalg.cholesky(S, lower=True, overwrite_a=True)
    Linv_Z_bar = linalg.solve_triangular(L, Z_bar, lower=True, overwrite_b=True)
    stat = n * Linv_Z_bar.dot(Linv_Z_bar)

    p_val = stats.chi2.sf(stat, p)
    return p_val, stat 
Example #4
Source File: utils.py    From vnpy_crypto with MIT License 7 votes vote down vote up
def log_multivariate_normal_density(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices. """
    if hasattr(linalg, 'solve_triangular'):
        # only in scipy since 0.9
        solve_triangular = linalg.solve_triangular
    else:
        # slower, but works
        solve_triangular = linalg.solve
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probabily stuck in a component with too
            # few observations, we need to reinitialize this components
            cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                      lower=True)
        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + \
                                     n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #5
Source File: lobpcg.py    From Computable with MIT License 7 votes vote down vote up
def b_orthonormalize(B, blockVectorV,
                      blockVectorBV=None, retInvR=False):
    """Internal."""
    import scipy.linalg as sla
    if blockVectorBV is None:
        if B is not None:
            blockVectorBV = B(blockVectorV)
        else:
            blockVectorBV = blockVectorV  # Shared data!!!
    gramVBV = sp.dot(blockVectorV.T, blockVectorBV)
    gramVBV = sla.cholesky(gramVBV)
    gramVBV = sla.inv(gramVBV, overwrite_a=True)
    # gramVBV is now R^{-1}.
    blockVectorV = sp.dot(blockVectorV, gramVBV)
    if B is not None:
        blockVectorBV = sp.dot(blockVectorBV, gramVBV)

    if retInvR:
        return blockVectorV, blockVectorBV, gramVBV
    else:
        return blockVectorV, blockVectorBV 
Example #6
Source File: ols.py    From fastats with MIT License 7 votes vote down vote up
def ols_cholesky(A, b):
    """
    Ordinary Least-Squares Regression Coefficients
    Estimation.

    If (A.T @ A) @ x = A.T @ b and A is full rank
    then there exists an upper triangular matrix
    R such that:

    (R.T @ R) @ x = A.T @ b
    R.T @ w = A.T @ b
    R @ x = w

    Find R using Cholesky decomposition.
    """
    R = cholesky(A.T @ A)
    w = solve_triangular(R, A.T @ b, trans='T')
    return solve_triangular(R, w) 
Example #7
Source File: gmm.py    From bhmm with GNU Lesser General Public License v3.0 7 votes vote down vote up
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices.
    """
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                      lower=True)
        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #8
Source File: linalg.py    From revrand with Apache License 2.0 7 votes vote down vote up
def cho_log_det(L):
    """
    Compute the log of the determinant of :math:`A`, given its (upper or lower)
    Cholesky factorization :math:`LL^T`.

    Parameters
    ----------
    L: ndarray
        an upper or lower Cholesky factor

    Examples
    --------
    >>> A = np.array([[ 2, -1,  0],
    ...               [-1,  2, -1],
    ...               [ 0, -1,  2]])

    >>> Lt = cholesky(A)
    >>> np.isclose(cho_log_det(Lt), np.log(np.linalg.det(A)))
    True

    >>> L = cholesky(A, lower=True)
    >>> np.isclose(cho_log_det(L), np.log(np.linalg.det(A)))
    True
    """
    return 2 * np.sum(np.log(L.diagonal())) 
Example #9
Source File: control_and_filter.py    From QuantEcon.lectures.code with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def predict(self, a_hist, t):
        """
        This function implements the prediction formula discussed is section 6 (1.59)
        It takes a realization for a^N, and the period in which the prediciton is formed

        Output:  E[abar | a_t, a_{t-1}, ..., a_1, a_0]
        """

        N = np.asarray(a_hist).shape[0] - 1
        a_hist = np.asarray(a_hist).reshape(N + 1, 1)
        V = self.construct_V(N + 1)

        aux_matrix = np.zeros((N + 1, N + 1))
        aux_matrix[:(t + 1), :(t + 1)] = np.eye(t + 1)
        L = la.cholesky(V).T
        Ea_hist = la.inv(L) @ aux_matrix @ L @ a_hist

        return Ea_hist 
Example #10
Source File: sigma_points.py    From filterpy with MIT License 6 votes vote down vote up
def __init__(self, n, alpha, beta, kappa, sqrt_method=None, subtract=None):
        #pylint: disable=too-many-arguments

        self.n = n
        self.alpha = alpha
        self.beta = beta
        self.kappa = kappa
        if sqrt_method is None:
            self.sqrt = cholesky
        else:
            self.sqrt = sqrt_method

        if subtract is None:
            self.subtract = np.subtract
        else:
            self.subtract = subtract

        self._compute_weights() 
Example #11
Source File: mnd.py    From TextDetector with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, sigma, mu, seed=42):
        self.sigma = sigma
        self.mu = mu
        if not (len(mu.shape) == 1):
            raise Exception('mu has shape ' + str(mu.shape) +
                            ' (it should be a vector)')

        self.sigma_inv = solve(self.sigma, N.identity(mu.shape[0]),
                               sym_pos=True)
        self.L = cholesky(self.sigma)

        self.s_rng = make_theano_rng(seed, which_method='normal')

        #Compute logZ
        #log Z = log 1/( (2pi)^(-k/2) |sigma|^-1/2 )
        # = log 1 - log (2pi^)(-k/2) |sigma|^-1/2
        # = 0 - log (2pi)^(-k/2) - log |sigma|^-1/2
        # = (k/2) * log(2pi) + (1/2) * log |sigma|
        k = float(self.mu.shape[0])
        self.logZ = 0.5 * (k * N.log(2. * N.pi) + N.log(det(sigma))) 
Example #12
Source File: mcmc.py    From deep_gp_random_features with Apache License 2.0 6 votes vote down vote up
def do_sampleF2(Y, X, current_F1, log_theta):

    log_theta_sigma2, log_theta_lengthscale, log_theta_lambda = unpack_log_theta(log_theta)

    n = Y.shape[0]

    K_F2 = covariance_function(current_F1, current_F1, log_theta_sigma2[1], log_theta_lengthscale[1]) + np.eye(n) * 1e-9
    K_Y = K_F2 + np.eye(n) * np.exp(log_theta_lambda)
    L_K_Y, lower_K_Y = linalg.cho_factor(K_Y, lower=True)
    K_inv_Y = linalg.cho_solve((L_K_Y, lower_K_Y), Y)

    mu = np.dot(K_F2, K_inv_Y)

    K_inv_K = linalg.cho_solve((L_K_Y, lower_K_Y), K_F2)
    Sigma = K_F2 - np.dot(K_F2, K_inv_K)

    L_Sigma = linalg.cholesky(Sigma, lower=True)
    proposed_F2 = mu + np.dot(L_Sigma, np.random.normal(0.0, 1.0, [n, 1]))

    return proposed_F2

## Unpack the vector of parameters into their three elements 
Example #13
Source File: bayesian.py    From nni with MIT License 6 votes vote down vote up
def first_fit(self, train_x, train_y):
        """ Fit the regressor for the first time. """
        train_x, train_y = np.array(train_x), np.array(train_y)

        self._x = np.copy(train_x)
        self._y = np.copy(train_y)

        self._distance_matrix = edit_distance_matrix(self._x)
        k_matrix = bourgain_embedding_matrix(self._distance_matrix)
        k_matrix[np.diag_indices_from(k_matrix)] += self.alpha

        self._l_matrix = cholesky(k_matrix, lower=True)  # Line 2

        self._alpha_vector = cho_solve(
            (self._l_matrix, True), self._y)  # Line 3

        self._first_fitted = True
        return self 
Example #14
Source File: cpu.py    From ggmm with MIT License 6 votes vote down vote up
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    '''Log probability for full covariance matrices.
    '''
    n_samples, n_dimensions = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dimensions),
                                      lower=True)
        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dimensions * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #15
Source File: gmm.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices."""
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            try:
                cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                          lower=True)
            except linalg.LinAlgError:
                raise ValueError("'covars' must be symmetric, "
                                 "positive-definite")

        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #16
Source File: gpei_constrained_chooser.py    From Milano with Apache License 2.0 6 votes vote down vote up
def sample_constraint_hypers(self, comp, labels):
        # The latent GP projection
        # The latent GP projection
        if (self.ff is None or self.ff.shape[0] < comp.shape[0]):
            self.ff_samples = []
            comp_cov  = self.cov(self.constraint_amp2, self.constraint_ls, comp)
            obsv_cov  = comp_cov + 1e-6*np.eye(comp.shape[0])
            obsv_chol = spla.cholesky(obsv_cov, lower=True)
            self.ff = np.dot(obsv_chol,npr.randn(obsv_chol.shape[0]))

        self._sample_constraint_noisy(comp, labels)
        self._sample_constraint_ls(comp, labels)
        self.constraint_hyper_samples.append((self.constraint_mean,
                                              self.constraint_gain,
                                              self.constraint_amp2,
                                              self.constraint_ls))
        self.ff_samples.append(self.ff) 
Example #17
Source File: gpei_constrained_chooser.py    From Milano with Apache License 2.0 6 votes vote down vote up
def pred_constraint_voilation(self, cand, comp, vals):
        # The primary covariances for prediction.
        comp_cov   = self.cov(self.constraint_amp2, self.constraint_ls, comp)
        cand_cross = self.cov(self.constraint_amp2, self.constraint_ls, comp,
                              cand)

        # Compute the required Cholesky.
        obsv_cov  = comp_cov + self.constraint_noise*np.eye(comp.shape[0])
        obsv_chol = spla.cholesky(obsv_cov, lower=True)

        cov_grad_func = getattr(gp, 'grad_' + self.cov_func.__name__)
        cand_cross_grad = cov_grad_func(self.constraint_ls, comp, cand)

        # Predictive things.
        # Solve the linear systems.
        alpha  = spla.cho_solve((obsv_chol, True), self.ff)
        beta   = spla.solve_triangular(obsv_chol, cand_cross, lower=True)

        # Predict the marginal means and variances at candidates.
        func_m = np.dot(cand_cross.T, alpha)# + self.constraint_mean
        func_m = sps.norm.cdf(func_m*self.constraint_gain)

        return func_m

    # Compute EI over hyperparameter samples 
Example #18
Source File: heart.py    From beat with GNU General Public License v3.0 6 votes vote down vote up
def log_determinant(A, inverse=False):
    """
    Calculates the natural logarithm of a determinant of the given matrix '
    according to the properties of a triangular matrix.

    Parameters
    ----------
    A : n x n :class:`numpy.ndarray`
    inverse : boolean
        If true calculates the log determinant of the inverse of the colesky
        decomposition, which is equvalent to taking the determinant of the
        inverse of the matrix.

        L.T* L = R           inverse=False
        L-1*(L-1)T = R-1     inverse=True

    Returns
    -------
    float logarithm of the determinant of the input Matrix A
    """

    cholesky = linalg.cholesky(A, lower=True)
    if inverse:
        cholesky = num.linalg.inv(cholesky)
    return num.log(num.diag(cholesky)).sum() * 2. 
Example #19
Source File: tututils.py    From MLSS with GNU General Public License v2.0 6 votes vote down vote up
def load_2d_hard():
    """
    Returns non-isotropoic data to motivate the use of non-euclidean norms (as
    well as the ground truth).
    """

    centres = np.array([[3., -1.], [-2., 1.], [2., 5.]])
    covs = []
    covs.append(np.array([[4., 2.], [2., 1.5]]))
    covs.append(np.array([[1, -1.5], [-1.5, 3.]]))
    covs.append(np.array([[1., 0.], [0., 1.]]))

    N = [1000, 500, 300]

    X = [np.random.randn(n, 2).dot(la.cholesky(c, lower=True)) + m
         for n, m, c in zip(N, centres, covs)]
    X = np.vstack(X)

    labels = np.concatenate((np.zeros(N[0]), np.ones(N[1]), 2*np.ones(N[2])))

    return X, labels 
Example #20
Source File: nutsjump.py    From PTMCMCSampler with MIT License 5 votes vote down vote up
def update_cf(self):
        """Update the Cholesky factor of the inverse mass matrix
        
        NOTE: this function is different from the one in GradientJump!
        """
        # Since we are adaptively tuning the step size epsilon, we should at
        # least keep the determinant of this guy equal to what it was before.
        new_cov_cf = sl.cholesky(self.mm_inv, lower=True)

        ldet_old = np.sum(np.log(np.diag(self.cov_cf)))
        ldet_new = np.sum(np.log(np.diag(new_cov_cf)))

        self.cov_cf = np.exp((ldet_old-ldet_new)/self.ndim) * new_cov_cf
        self.cov_cfi = sl.solve_triangular(self.cov_cf, np.eye(len(self.cov_cf)), trans=0, lower=True) 
Example #21
Source File: nutsjump.py    From PTMCMCSampler with MIT License 5 votes vote down vote up
def set_cf(self):
        """Update the Cholesky factor of the inverse mass matrix"""
        self.cov_cf = sl.cholesky(self.mm_inv, lower=True)
        self.cov_cfi = sl.solve_triangular(self.cov_cf, np.eye(len(self.cov_cf)), trans=0, lower=True) 
Example #22
Source File: linear_algebra.py    From klustakwik2 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def cholesky(self):
        M_chol = self.new_with_same_masks()
        lower = None
        if self.block.size:
            block = cholesky(self.block, lower=True)
        else:
            block = zeros_like(self.block)
        if (self.diagonal<=0).any():
            raise LinAlgError
        diagonal = sqrt(self.diagonal)
        return self.new_with_same_masks(block, diagonal) 
Example #23
Source File: test_linear_algebra.py    From klustakwik2 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_cholesky_trisolve():
    M = array([[3.5, 0,   0,   0],
               [0,   1.2, 0,   0.1],
               [0,   0,   6.2, 0],
               [0,   0.1,   0,   11]])
    masked = array([0, 2])
    unmasked = array([1, 3])
    cov = BlockPlusDiagonalMatrix(masked, unmasked)
    cov.block[:, :] = M[ix_(unmasked, unmasked)]
    cov.diagonal[:] = M[masked, masked]
    # test cholesky decomposition
    chol = cov.cholesky()
    M_chol_from_block = zeros_like(M)
    M_chol_from_block[ix_(unmasked, unmasked)] = chol.block
    M_chol_from_block[masked, masked] = chol.diagonal
    M_chol = cholesky(M, lower=True)
    assert_array_almost_equal(M_chol_from_block, M_chol)
    assert_array_almost_equal(M, M_chol.dot(M_chol.T))
    assert_array_almost_equal(cov.block, chol.block.dot(chol.block.T))
    # test trisolve
    x = randn(M.shape[0])
    y1 = solve_triangular(M_chol, x, lower=True)
    y2 = chol.trisolve(x)
    y3 = zeros(len(x))
    trisolve(chol.block, chol.diagonal, chol.masked, chol.unmasked, len(chol.masked), len(chol.unmasked), x, y3)
    assert_array_almost_equal(y1, y2)
    assert_array_almost_equal(y1, y3)
    assert_array_almost_equal(y2, y3)
    # test compute diagonal of inverse of cov matrix used in E-step
    inv_cov_diag = zeros(len(x))
    basis_vector = zeros(len(x))
    for i in range(len(x)):
        basis_vector[i] = 1.0
        root = chol.trisolve(basis_vector)
        inv_cov_diag[i] = sum(root**2)
        basis_vector[:] = 0
    M_inv_diag = diag(inv(M))
    assert_array_almost_equal(M_inv_diag, inv_cov_diag) 
Example #24
Source File: wishart_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def chol(x):
  """Compute Cholesky factorization."""
  return linalg.cholesky(x).T 
Example #25
Source File: gpei_chooser.py    From Milano with Apache License 2.0 5 votes vote down vote up
def _sample_noiseless(self, comp, vals):
    def logprob(hypers):
      mean = hypers[0]
      amp2 = hypers[1]
      noise = 1e-3

      if amp2 < 0:
        return -np.inf

      cov = amp2 * (self.cov_func(self.ls, comp, None) +
                    1e-6 * np.eye(comp.shape[0])) + noise * np.eye(
        comp.shape[0])
      chol = spla.cholesky(cov, lower=True)
      solve = spla.cho_solve((chol, True), vals - mean)
      lp = -np.sum(np.log(np.diag(chol))) - 0.5 * np.dot(vals - mean, solve)

      # Roll in amplitude lognormal prior
      lp -= 0.5 * (np.log(amp2) / self.amp2_scale) ** 2

      return lp

    hypers = slice_sample(np.array([self.mean, self.amp2, self.noise]), logprob,
                          compwise=False)
    self.mean = hypers[0]
    self.amp2 = hypers[1]
    self.noise = 1e-3 
Example #26
Source File: gpei_chooser.py    From Milano with Apache License 2.0 5 votes vote down vote up
def _sample_ls(self, comp, vals):
    def logprob(ls):
      if np.any(ls < 0) or np.any(ls > self.max_ls):
        return -np.inf

      cov = self.amp2 * (self.cov_func(ls, comp, None) + 1e-6 * np.eye(
        comp.shape[0])) + self.noise * np.eye(comp.shape[0])
      chol = spla.cholesky(cov, lower=True)
      solve = spla.cho_solve((chol, True), vals - self.mean)
      lp = -np.sum(np.log(np.diag(chol))) - 0.5 * np.dot(vals - self.mean,
                                                         solve)
      return lp

    self.ls = slice_sample(self.ls, logprob, compwise=True) 
Example #27
Source File: gpeiopt_chooser.py    From Milano with Apache License 2.0 5 votes vote down vote up
def _sample_noisy(self, comp, vals):
        def logprob(hypers):
            mean  = hypers[0]
            amp2  = hypers[1]
            noise = hypers[2]

            # This is pretty hacky, but keeps things sane.
            if mean > np.max(vals) or mean < np.min(vals):
                return -np.inf

            if amp2 < 0 or noise < 0:
                return -np.inf

            cov   = (amp2 * (self.cov_func(self.ls, comp, None) +
                1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0]))
            chol  = spla.cholesky(cov, lower=True)
            solve = spla.cho_solve((chol, True), vals - mean)
            lp    = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve)

            # Roll in noise horseshoe prior.
            lp += np.log(np.log(1 + (self.noise_scale/noise)**2))

            # Roll in amplitude lognormal prior
            lp -= 0.5*(np.log(np.sqrt(amp2))/self.amp2_scale)**2

            return lp

        hypers = slice_sample(np.array(
                [self.mean, self.amp2, self.noise]), logprob, compwise=False)
        self.mean  = hypers[0]
        self.amp2  = hypers[1]
        self.noise = hypers[2] 
Example #28
Source File: gpeiopt_chooser.py    From Milano with Apache License 2.0 5 votes vote down vote up
def _sample_noiseless(self, comp, vals):
        def logprob(hypers):
            mean  = hypers[0]
            amp2  = hypers[1]
            noise = 1e-3

            # This is pretty hacky, but keeps things sane.
            if mean > np.max(vals) or mean < np.min(vals):
                return -np.inf

            if amp2 < 0:
                return -np.inf

            cov   = (amp2 * (self.cov_func(self.ls, comp, None) +
                1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0]))
            chol  = spla.cholesky(cov, lower=True)
            solve = spla.cho_solve((chol, True), vals - mean)
            lp    = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve)

            # Roll in amplitude lognormal prior
            lp -= 0.5*(np.log(np.sqrt(amp2))/self.amp2_scale)**2

            return lp

        hypers = slice_sample(np.array(
                [self.mean, self.amp2, self.noise]), logprob, compwise=False)
        self.mean  = hypers[0]
        self.amp2  = hypers[1]
        self.noise = 1e-3 
Example #29
Source File: gp.py    From Milano with Apache License 2.0 5 votes vote down vote up
def logprob(self, comp, vals):
            mean  = self.mean
            amp2  = self.amp2
            noise = self.noise
            
            cov   = amp2 * (self.cov_func(self.ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0])
            chol  = spla.cholesky(cov, lower=True)
            solve = spla.cho_solve((chol, True), vals - mean)
            lp    = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve)
            return lp 
Example #30
Source File: try_mlecov.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def mvn_loglike_chol(x, sigma):
    '''loglike multivariate normal

    assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)

    brute force from formula
    no checking of correct inputs
    use of inv and log-det should be replace with something more efficient
    '''
    #see numpy thread
    #Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
    sigmainv = np.linalg.inv(sigma)
    cholsigmainv = np.linalg.cholesky(sigmainv).T
    x_whitened = np.dot(cholsigmainv, x)

    logdetsigma = np.log(np.linalg.det(sigma))
    nobs = len(x)
    from scipy import stats
    print('scipy.stats')
    print(np.log(stats.norm.pdf(x_whitened)).sum())

    llf = - np.dot(x_whitened.T, x_whitened)
    llf -= nobs * np.log(2 * np.pi)
    llf -= logdetsigma
    llf *= 0.5
    return llf, logdetsigma, 2 * np.sum(np.log(np.diagonal(cholsigmainv)))
#0.5 * np.dot(x_whitened.T, x_whitened) + nobs * np.log(2 * np.pi) + logdetsigma)