Python autograd.numpy.diag() Examples

The following are code examples for showing how to use autograd.numpy.diag(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: ReducedVarianceReparamGradients   Author: andymiller   File: misc.py    MIT License 6 votes vote down vote up
def kl_mvn_diag(m0, S0, m1, S1):
    """
    Kullback-Liebler divergence from Gaussian pm,pv to Gaussian qm,qv.
    Also computes KL divergence from a single Gaussian pm,pv to a set
    of Gaussians qm,qv.
    Diagonal covariances are assumed.  Divergence is expressed in nats.

    - accepts stacks of means, but only one S0 and S1

    From wikipedia
    KL( (m0, S0) || (m1, S1))
         = .5 * ( tr(S1^{-1} S0) + log |S1|/|S0| + 
                  (m1 - m0)^T S1^{-1} (m1 - m0) - N )
    """
    # store inv diag covariance of S1 and diff between means
    N = m0.shape[1]
    iS1 = 1./S1
    diff = m1 - m0

    # kl is made of three terms
    tr_term   = np.sum(iS1 * S0)
    det_term  = np.sum(np.log(S1)) - np.sum(np.log(S0))
    quad_term = np.sum( (diff*diff) * iS1, axis=1)
    return .5 * (tr_term + det_term + quad_term - N) 
Example 2
Project: ReducedVarianceReparamGradients   Author: andymiller   File: models.py    MIT License 6 votes vote down vote up
def set_lnpdf(model="baseball", dset="boston"):
    if model == "baseball":
        return lambda x: np.squeeze(baseball.lnpdf_flat(x, 0)), baseball.D, model
    if model == "frisk":
        lnpdf, unpack, num_params, frisk_df, param_names = \
            frisk.make_model_funs(crime=2., precinct_type=1)
        return lnpdf, num_params, model
    if model == "normal":
        D, r       = 10, 2
        mu0        = np.zeros(D)
        C_true     = np.random.randn(D, r) * 2.
        v_true     = np.random.randn(D)
        Sigma_true = np.dot(C_true, C_true.T) + np.diag(np.exp(v_true))
        print Sigma_true
        lnpdf = lambda x: misc.make_fixed_cov_mvn_logpdf(Sigma_true)(x, mean=mu0)
        return lnpdf, D, model
    if model == "bnn":
        (Xtrain, Ytrain), (Xtest, Ytest) = \
            uci.load_dataset(dset, split_seed=0)
        lnpdf, predict, loglike, parser, (std_X, ustd_X), (std_Y, ustd_Y) = \
            nn.make_nn_regression_funs(Xtrain[:100], Ytrain[:100],
                                       layer_sizes=None, obs_variance=None)
        lnpdf_vec = lambda ths: np.array([lnpdf(th)
                                          for th in np.atleast_2d(ths)])
        return lnpdf_vec, parser.N, "-".join([model, dset]) 
Example 3
Project: SyntheticStatistics   Author: BlissChapman   File: util.py    MIT License 6 votes vote down vote up
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
    """
    Fit a multivariate normal to the data X (n x d) and draw J points 
    from the fit. 
    - reg: regularizer to use with the covariance matrix
    - eig_pow: raise eigenvalues of the covariance matrix to this power to construct 
        a new covariance matrix before drawing samples. Useful to shrink the spread 
        of the variance.
    """
    with NumpySeedContext(seed=seed):
        d = X.shape[1]
        mean_x = np.mean(X, 0)
        cov_x = np.cov(X.T)
        if d==1:
            cov_x = np.array([[cov_x]])
        [evals, evecs] = np.linalg.eig(cov_x)
        evals = np.maximum(0, np.real(evals))
        assert np.all(np.isfinite(evals))
        evecs = np.real(evecs)
        shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
        V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
    return V 
Example 4
Project: SyntheticStatistics   Author: BlissChapman   File: tst.py    MIT License 6 votes vote down vote up
def init_locs_randn(tst_data, n_test_locs, seed=1):
        """Fit a Gaussian to the merged data of the two samples and draw
        n_test_locs points from the Gaussian"""
        # set the seed
        rand_state = np.random.get_state()
        np.random.seed(seed)

        X, Y = tst_data.xy()
        d = X.shape[1]
        # fit a Gaussian in the middle of X, Y and draw sample to initialize T
        xy = np.vstack((X, Y))
        mean_xy = np.mean(xy, 0)
        cov_xy = np.cov(xy.T)
        [Dxy, Vxy] = np.linalg.eig(cov_xy + 1e-3*np.eye(d))
        Dxy = np.real(Dxy)
        Vxy = np.real(Vxy)
        Dxy[Dxy<=0] = 1e-3
        eig_pow = 0.9 # 1.0 = not shrink
        reduced_cov_xy = Vxy.dot(np.diag(Dxy**eig_pow)).dot(Vxy.T) + 1e-3*np.eye(d)

        T0 = np.random.multivariate_normal(mean_xy, reduced_cov_xy, n_test_locs)
        # reset the seed back to the original
        np.random.set_state(rand_state)
        return T0 
Example 5
Project: pymanopt   Author: pymanopt   File: packing_on_the_sphere.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def packing_on_the_sphere(n, k, epsilon):
    manifold = Elliptope(n, k)
    solver = ConjugateGradient(mingradnorm=1e-8, maxiter=1e5)

    def cost(X):
        Y = np.dot(X, X.T)
        # Shift the exponentials by the maximum value to reduce numerical
        # trouble due to possible overflows.
        s = np.triu(Y, 1).max()
        expY = np.exp((Y - s) / epsilon)
        # Zero out the diagonal
        expY -= np.diag(np.diag(expY))
        u = np.triu(expY, 1).sum()
        return s + epsilon * np.log(u)

    problem = Problem(manifold, cost)
    return solver.solve(problem) 
Example 6
Project: pilco-learner   Author: cryscan   File: util.py    MIT License 6 votes vote down vote up
def gaussian_sin(m, v, i, e=None):
    d = len(m)
    L = len(i)
    e = np.ones((1, L)) if e is None else np.atleast_2d(e)

    mi = np.atleast_2d(m[i])
    vi = v[np.ix_(i, i)]
    vii = np.atleast_2d(np.diag(vi))
    M = e * exp(-vii / 2) * sin(mi)
    M = M.flatten()

    lq = -(vii.T + vii) / 2
    q = exp(lq)
    V = ((exp(lq + vi) - q) * cos(mi.T - mi) -
         (exp(lq - vi) - q) * cos(mi.T + mi))
    V = np.dot(e.T, e) * V / 2

    C = np.diag((e * exp(-vii / 2) * cos(mi)).flatten())
    C = fill_mat(C, np.zeros((d, L)), i, None)

    return M, V, C 
Example 7
Project: pilco-learner   Author: cryscan   File: gp.py    MIT License 6 votes vote down vote up
def log_pdf(self, hyp):
        x = np.atleast_2d(self.inputs)
        y = np.atleast_2d(self.targets)

        n, D = x.shape
        n, E = y.shape

        hyp = hyp.reshape(E, -1)
        K = self.kernel(hyp, x)  # [E, n, n]
        L = cholesky(K)
        alpha = np.hstack([solve(K[i], y[:, i]) for i in range(E)])
        y = y.flatten(order='F')

        logp = 0.5 * n * E * log(2 * np.pi) + 0.5 * np.dot(y, alpha) + np.sum(
            [log(np.diag(L[i])) for i in range(E)])

        return logp 
Example 8
Project: mici   Author: matt-graham   File: test_matrices.py    MIT License 6 votes vote down vote up
def __init__(self):
        super().__init__()
        if AUTOGRAD_AVAILABLE:
            grad_log_abs_det_sqrt_func = grad(
                lambda d: 0.5 * anp.linalg.slogdet(anp.diag(d))[1])
            grad_quadratic_form_inv_func = grad(
                lambda d, v: v @ anp.diag(1 / d) @ v)
        for sz in SIZES:
            diagonal = np.abs(self.rng.standard_normal(sz))
            self.matrices[sz] = matrices.PositiveDiagonalMatrix(diagonal)
            self.np_matrices[sz] = np.diag(diagonal)
            if AUTOGRAD_AVAILABLE:
                self.grad_log_abs_det_sqrts[sz] = (
                    grad_log_abs_det_sqrt_func(diagonal))
                self.grad_quadratic_form_invs[sz] = partial(
                    grad_quadratic_form_inv_func, diagonal) 
Example 9
Project: mici   Author: matt-graham   File: test_matrices.py    MIT License 6 votes vote down vote up
def __init__(self):
        super().__init__()
        if AUTOGRAD_AVAILABLE:
            grad_log_abs_det_sqrt_func = grad(
                lambda d: 0.5 * anp.linalg.slogdet(anp.diag(d))[1])
            grad_quadratic_form_inv_func = grad(
                lambda d, v: v @ anp.diag(1 / d) @ v)
        for sz in SIZES:
            diagonal = self.rng.standard_normal(sz)
            self.matrices[sz] = matrices.DiagonalMatrix(diagonal)
            self.np_matrices[sz] = np.diag(diagonal)
            if AUTOGRAD_AVAILABLE:
                self.grad_log_abs_det_sqrts[sz] = (
                    grad_log_abs_det_sqrt_func(diagonal))
                self.grad_quadratic_form_invs[sz] = partial(
                    grad_quadratic_form_inv_func, diagonal) 
Example 10
Project: kernel-gof   Author: wittawatj   File: util.py    MIT License 6 votes vote down vote up
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
    """
    Fit a multivariate normal to the data X (n x d) and draw J points 
    from the fit. 
    - reg: regularizer to use with the covariance matrix
    - eig_pow: raise eigenvalues of the covariance matrix to this power to construct 
        a new covariance matrix before drawing samples. Useful to shrink the spread 
        of the variance.
    """
    with NumpySeedContext(seed=seed):
        d = X.shape[1]
        mean_x = np.mean(X, 0)
        cov_x = np.cov(X.T)
        if d==1:
            cov_x = np.array([[cov_x]])
        [evals, evecs] = np.linalg.eig(cov_x)
        evals = np.maximum(0, np.real(evals))
        assert np.all(np.isfinite(evals))
        evecs = np.real(evecs)
        shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
        V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
    return V 
Example 11
Project: kernel-gof   Author: wittawatj   File: density.py    MIT License 6 votes vote down vote up
def multivariate_normal_density(mean, cov, X):
        """
        Exact density (not log density) of a multivariate Gaussian.
        mean: length-d array
        cov: a dxd covariance matrix
        X: n x d 2d-array
        """
        
        evals, evecs = np.linalg.eigh(cov)
        cov_half_inv = evecs.dot(np.diag(evals**(-0.5))).dot(evecs.T)
    #     print(evals)
        half_evals = np.dot(X-mean, cov_half_inv)
        full_evals = np.sum(half_evals**2, 1)
        unden = np.exp(-0.5*full_evals)
        
        Z = np.sqrt(np.linalg.det(2.0*np.pi*cov))
        den = unden/Z
        assert len(den) == X.shape[0]
        return den 
Example 12
Project: lightML   Author: jfzhang95   File: LinearClassification.py    MIT License 5 votes vote down vote up
def transform(self, X, y):
        """transform function"""
        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        XMat -= XMat.mean(axis=0)
        Sw, Sb = calc_Sw_Sb(XMat, yMat)

        if self.method == 'svd':
            U, S, V = np.linalg.svd(Sw)
            S = np.diag(S)
            Sw_inversed = V * np.linalg.pinv(S) * U.T
            A = Sw_inversed * Sb
        elif self.method == 'auto':
            A = np.linalg.pinv(Sw) * Sb

        eigval, eigvec = np.linalg.eig(A)
        eigval = eigval[0:self.n_components]
        eigvec = eigvec[:, 0:self.n_components]
        X_transformed = np.dot(XMat, eigvec)
        self.W = eigvec

        return X_transformed 
Example 13
Project: autograd-forward   Author: BB-UCL   File: test_numpy.py    MIT License 5 votes vote down vote up
def test_diag():     combo_check(np.diag, [0], [R(5, 5)], k=[-1, 0, 1]) 
Example 14
Project: autograd-forward   Author: BB-UCL   File: test_numpy.py    MIT License 5 votes vote down vote up
def test_diag_flat():combo_check(np.diag, [0], [R(5)],    k=[-1, 0, 1]) 
Example 15
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 5 votes vote down vote up
def get_link_h(w, q, ln_q, ln_1_q, ln_s):

    w = w.reshape(-1, 3)

    n = numpy.shape(w)[0]

    h = numpy.zeros((n*3, n*3))

    for i in range(0, 3):
        for j in range(0, 3):
            tmp_grad = autograd.elementwise_grad(autograd.elementwise_grad(e_link_log_lik, i), j)
            h[numpy.arange(0, n)*3 + i, numpy.arange(0, n)*3 + j] = \
                tmp_grad(w[:, 0].reshape(-1, 1), w[:, 1].reshape(-1, 1), w[:, 2].reshape(-1, 1),
                         q, ln_q, ln_1_q, ln_s).ravel()

    h_u = numpy.zeros((n*3, n*3))

    h_v = numpy.zeros((n*3, n*3))

    for i in range(0, n):
        tmp_u, tmp_s, tmp_v = scipy.linalg.svd(a=-h[i*3:(i+1)*3, i*3:(i+1)*3])

        tmp_s = tmp_s ** 0.5

        h_u[i*3:(i+1)*3, i*3:(i+1)*3] = numpy.matmul(tmp_u, numpy.diag(tmp_s))

        h_v[i*3:(i+1)*3, i*3:(i+1)*3] = numpy.matmul(numpy.diag(tmp_s), tmp_v)

    return -h, h_u, h_v 
Example 16
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 5 votes vote down vote up
def coregion(omega, kappa):

    B = numpy.dot(omega, omega.transpose())

    B = B + numpy.diag(kappa.ravel())

    B_g = numpy.zeros([3, 3, 3])

    for i in range(0, 3):
        B_g[i, :, i] = B_g[i, :, i] + omega.ravel()

        B_g[i, i, :] = B_g[i, i, :] + omega.ravel()

    omega_0_g = B_g[0, :, :]

    omega_1_g = B_g[1, :, :]

    omega_2_g = B_g[2, :, :]

    B_g = numpy.zeros([3, 3, 3])

    for i in range(0, 3):
        B_g[i, i, i] = 1

    kappa_0_g = B_g[0, :, :]

    kappa_1_g = B_g[1, :, :]

    kappa_2_g = B_g[2, :, :]

    return B, omega_0_g, omega_1_g, omega_2_g, kappa_0_g, kappa_1_g, kappa_2_g 
Example 17
Project: quanser-openai-driver   Author: BlueRiverTech   File: lqr.py    MIT License 5 votes vote down vote up
def LQR_control():
    # Cost matrices for LQR
    Q = np.diag(np.array([1, 1, 1, 1]))  # state_dimension = 4
    R = np.eye(1)  # control_dimension = 1

    A, B = computeAB(np.array([0.0, 0.0, 0.0, 0.0]), np.array([0.0]))

    # Use if discrete forward dynamics is used
    X = sp_linalg.solve_discrete_are(A, B, Q, R)
    K = np.dot(np.linalg.pinv(R + np.dot(B.T, np.dot(X, B))), np.dot(B.T, np.dot(X, A)))

    # Use for continuous version of LQR
    # X = sp_linalg.solve_continuous_are(A, B, Q, R)
    # K = np.dot(np.linalg.pinv(R), np.dot(B.T, X))
    return np.squeeze(K, 0) 
Example 18
Project: ReducedVarianceReparamGradients   Author: andymiller   File: misc.py    MIT License 5 votes vote down vote up
def mvn_logpdf(x, mean, icholSigma):
    D     = len(mean)
    coef  = -.5*D*np.log(2.*np.pi)
    dterm = np.sum(np.log(np.diag(icholSigma)))
    white = np.dot(np.atleast_2d(x) - mean, icholSigma.T)
    qterm = -.5*np.sum(white**2, axis=1)
    ll = coef + dterm + qterm
    if len(ll) == 1:
        return ll[0]
    return ll 
Example 19
Project: momi2   Author: popgenmethods   File: moran_model.py    GNU General Public License v3.0 5 votes vote down vote up
def moran_transition(t, n):
    assert t >= 0.0
    P, d, Pinv = moran_eigensystem(n)
    D = diag(exp(t * d))
    return check_probs_matrix(dot(P, dot(D, Pinv))) 
Example 20
Project: momi2   Author: popgenmethods   File: moran_model.py    GNU General Public License v3.0 5 votes vote down vote up
def rate_matrix(n, sparse_format="csr"):
    i = np.arange(n + 1)
    diag = i * (n - i) / 2.
    diags = [diag[:-1], -2 * diag, diag[1:]]
    M = scipy.sparse.diags(
        diags, [1, 0, -1], (n + 1, n + 1), format=sparse_format)
    return M 
Example 21
Project: SyntheticStatistics   Author: BlissChapman   File: tst.py    MIT License 5 votes vote down vote up
def compute_mean_variance_stat(tst_data, gwidth2):
        """Compute the mean and variance of the MMD^2, and the test statistic
        :return: (mean, variance)
        """

        X, Y = tst_data.xy()
        if X.shape[0] != Y.shape[0]:
            raise ValueError('Require sample size of X = size of Y')

        ker = kernel.KGauss(gwidth2)
        K = ker.eval(X, X)
        L = ker.eval(Y, Y)
        KL = ker.eval(X, Y)

        n = X.shape[0]
        # computing meanMMD is only correct for Gaussian kernels.
        meanMMD = 2.0/n * (1.0 - 1.0/n*np.sum(np.diag(KL)))

        np.fill_diagonal(K, 0.0)
        np.fill_diagonal(L, 0.0)
        np.fill_diagonal(KL, 0.0)

        varMMD = 2.0/n/(n-1) * 1.0/n/(n-1) * np.sum((K + L - KL - KL.T)**2)
        # test statistic
        test_stat = 1.0/n * np.sum(K + L - KL - KL.T)
        return meanMMD, varMMD, test_stat 
Example 22
Project: SyntheticStatistics   Author: BlissChapman   File: data.py    MIT License 5 votes vote down vote up
def sample(self, n, seed):
        rstate = np.random.get_state()
        np.random.seed(seed)

        d = self.d
        std_y = np.diag(np.hstack((np.sqrt(2.0), np.ones(d-1) )))
        X = np.random.randn(n, d)
        Y = np.random.randn(n, d).dot(std_y)
        np.random.set_state(rstate)
        return TSTData(X, Y, label='gvd') 
Example 23
Project: bayesian-coresets   Author: trevorcampbell   File: test_gaussian.py    MIT License 5 votes vote down vote up
def ll_m2_exact(muw, Sigw, Siginv, x):
  L = np.linalg.cholesky(Siginv)
  Rho = np.dot(np.dot(L.T, Sigw), L)

  crho = 2*(Rho**2).sum() + (np.diag(Rho)*np.diag(Rho)[:, np.newaxis]).sum()

  mu = np.dot(L.T, (x - muw).T).T
  musq = (mu**2).sum(axis=1)

  return 0.25*(crho + musq*musq[:, np.newaxis] + np.diag(Rho).sum()*(musq + musq[:,np.newaxis]) + 4*np.dot(np.dot(mu, Rho), mu.T))

#Var[Log N(x;, mu, Sig)] under mu ~ N(muw, Sigw) 
Example 24
Project: bayesian-coresets   Author: trevorcampbell   File: test_gaussian.py    MIT License 5 votes vote down vote up
def ll_m2_exact_diag(muw, Sigw, Siginv, x):
  L = np.linalg.cholesky(Siginv)
  Rho = np.dot(np.dot(L.T, Sigw), L)

  crho = 2*(Rho**2).sum() + (np.diag(Rho)*np.diag(Rho)[:, np.newaxis]).sum()

  mu = np.dot(L.T, (x - muw).T).T
  musq = (mu**2).sum(axis=1)

  return 0.25*(crho + musq**2 + 2*np.diag(Rho).sum()*musq + 4*(np.dot(mu, Rho)*mu).sum(axis=1)) 
Example 25
Project: pymanopt   Author: pymanopt   File: low_rank_matrix_approximation.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def cost(usv):
    delta = .5
    u = usv[0]
    s = usv[1]
    vt = usv[2]
    X = np.dot(np.dot(u, np.diag(s)), vt)
    return np.sum(np.sqrt((X - A)**2 + delta**2) - delta)


# define the Pymanopt problem 
Example 26
Project: ISM_supervised_DR   Author: chieh-neu   File: kernel_lib.py    MIT License 5 votes vote down vote up
def nomalized_by_Degree_matrix(M, D):
	D2 = np.diag(D)
	DMD = M*(np.outer(D2, D2))
	return DMD 
Example 27
Project: ISM_supervised_DR   Author: chieh-neu   File: kernel_lib.py    MIT License 5 votes vote down vote up
def compute_inverted_Degree_matrix(M):
	return np.diag(1.0/np.sqrt(M.sum(axis=1))) 
Example 28
Project: ISM_supervised_DR   Author: chieh-neu   File: kernel_lib.py    MIT License 5 votes vote down vote up
def compute_Degree_matrix(M):
	return np.diag(np.sum(M, axis=0)) 
Example 29
Project: variational-smc   Author: blei-lab   File: lgss_example.py    MIT License 5 votes vote down vote up
def log_prop(self, t, Xc, Xp, y, prop_params, model_params):
        mu0, Sigma0, A, Q, C, R = model_params
        mut, lint, log_s2t = prop_params[t]
        s2t = np.exp(log_s2t)
        
        if t > 0:
            mu = mut + np.dot(A, Xp.T).T*lint
        else:
            mu = mut + lint*mu0
        
        return self.log_normal(Xc, mu, np.diag(s2t)) 
Example 30
Project: vittles   Author: rgiordan   File: test_sensitivity_lib.py    Apache License 2.0 5 votes vote down vote up
def _test_max_order(self, eta_order, eps_order, test_order):
        # TODO: test this with reverse mode and swapping.

        # Partial derivative of the gradient higher than eta_order and
        # eps_order are zero.
        def objective(eta, eps):
            eta_sum = np.sum(eta)
            eps_sum = np.sum(eps)
            return (eta_sum ** (eta_order + 1)) * (eps_sum ** eps_order)

        # These need to be nonzero for the test to be valid.
        # Note that this test doesn't require an actual optimum,
        # nor does it require the real Hessian.
        eta0 = 0.01 * np.arange(2)
        eps0 = 0.02 * np.arange(3)
        eps1 = eps0 + 1

        # We don't actually need the real Hessian for this test.
        hess0 = np.diag(np.array([2.1, 4.5]))

        taylor_expansion_truth = \
            ParametricSensitivityTaylorExpansion.optimization_objective(
                objective_function=objective,
                input_val0=eta0,
                hyper_val0=eps0,
                hess0=hess0,
                order=test_order)

        taylor_expansion_test = \
            ParametricSensitivityTaylorExpansion.optimization_objective(
                objective_function=objective,
                input_val0=eta0,
                hyper_val0=eps0,
                hess0=hess0,
                max_input_order=eta_order,
                max_hyper_order=eps_order,
                order=test_order)

        assert_array_almost_equal(
            taylor_expansion_truth.evaluate_taylor_series(eps1),
            taylor_expansion_test.evaluate_taylor_series(eps1)) 
Example 31
Project: pilco-learner   Author: cryscan   File: util.py    MIT License 5 votes vote down vote up
def gaussian_trig(m, v, i, e=None):
    d = len(m)
    L = len(i)
    e = np.ones((1, L)) if e is None else np.atleast_2d(e)
    ee = np.vstack([e, e]).reshape(1, -1, order='F')

    mi = np.atleast_2d(m[i])
    vi = v[np.ix_(i, i)]
    vii = np.atleast_2d(np.diag(vi))

    M = np.vstack([e * exp(-vii / 2) * sin(mi), e * exp(-vii / 2) * cos(mi)])
    M = M.flatten(order='F')

    lq = -(vii.T + vii) / 2
    q = exp(lq)

    U1 = (exp(lq + vi) - q) * sin(mi.T - mi)
    U2 = (exp(lq - vi) - q) * sin(mi.T + mi)
    U3 = (exp(lq + vi) - q) * cos(mi.T - mi)
    U4 = (exp(lq - vi) - q) * cos(mi.T + mi)

    V = np.vstack(
        [np.hstack([U3 - U4, U1 + U2]),
         np.hstack([(U1 + U2).T, U3 + U4])])
    V = np.vstack([
        np.hstack([V[::2, ::2], V[::2, 1::2]]),
        np.hstack([V[1::2, ::2], V[1::2, 1::2]])
    ])
    V = np.dot(ee.T, ee) * V / 2

    C = np.hstack([np.diag(M[1::2]), -np.diag(M[::2])])
    C = np.hstack([C[:, ::2], C[:, 1::2]])
    C = fill_mat(C, np.zeros((d, 2 * L)), i, None)

    return M, V, C 
Example 32
Project: pylqr   Author: navigator8972   File: pylqr.py    GNU General Public License v3.0 5 votes vote down vote up
def regularized_persudo_inverse_(self, mat, reg=1e-5):
        """
        Use SVD to realize persudo inverse by perturbing the singularity values
        to ensure its positive-definite properties
        """
        u, s, v = np.linalg.svd(mat)
        s[ s < 0 ] = 0.0        #truncate negative values...
        diag_s_inv = np.zeros((v.shape[0], u.shape[1]))
        diag_s_inv[0:len(s), 0:len(s)] = np.diag(1./(s+reg))
        return v.dot(diag_s_inv).dot(u.T) 
Example 33
Project: kernel-gof   Author: wittawatj   File: density.py    MIT License 5 votes vote down vote up
def __init__(self, mean, cov):
        """
        mean: a numpy array of length d.
        cov: d x d numpy array for the covariance.
        """
        self.mean = mean 
        self.cov = cov
        assert mean.shape[0] == cov.shape[0]
        assert cov.shape[0] == cov.shape[1]
        E, V = np.linalg.eigh(cov)
        if np.any(np.abs(E) <= 1e-7):
            raise ValueError('covariance matrix is not full rank.')
        # The precision matrix
        self.prec = np.dot(np.dot(V, np.diag(old_div(1.0,E))), V.T)
        #print self.prec 
Example 34
Project: paragami   Author: rgiordan   File: numeric_array_patterns.py    Apache License 2.0 5 votes vote down vote up
def freeing_jacobian(self, folded_val, sparse=True):
        jac_array = \
            _unconstrain_array_jacobian(folded_val, self._lb, self._ub)
        jac_array = np.atleast_1d(jac_array).flatten()
        if sparse:
            return osp.sparse.diags(jac_array)
        else:
            return np.diag(jac_array) 
Example 35
Project: paragami   Author: rgiordan   File: numeric_array_patterns.py    Apache License 2.0 5 votes vote down vote up
def unfreeing_jacobian(self, folded_val, sparse=True):
        jac_array = \
            _constrain_array_jacobian(
                _unconstrain_array(folded_val, self._lb, self._ub),
                self._lb, self._ub)
        jac_array = np.atleast_1d(jac_array).flatten()
        if sparse:
            return osp.sparse.diags(jac_array)
        else:
            return np.diag(jac_array) 
Example 36
Project: paragami   Author: rgiordan   File: simplex_patterns.py    Apache License 2.0 5 votes vote down vote up
def _constrain_simplex_jacobian(simplex_vec):
    jac = \
        -1 * np.outer(simplex_vec, simplex_vec) + \
        np.diag(simplex_vec)
    return jac[:, 1:] 
Example 37
Project: paragami   Author: rgiordan   File: simplex_patterns.py    Apache License 2.0 5 votes vote down vote up
def _unconstrain_simplex_jacobian(simplex_vec):
    """Get the unconstraining Jacobian for a single simplex vector.
    """
    return np.hstack(
        [ np.full(len(simplex_vec) - 1, -1 / simplex_vec[0])[:, None],
          np.diag(1 / simplex_vec[1: ]) ]) 
Example 38
Project: ParametricGP   Author: maziarraissi   File: parametric_GP.py    MIT License 4 votes vote down vote up
def likelihood(self, hyp): 
        M = self.M 
        Z = self.Z
        m = self.m
        S = self.S
        X_batch = self.X_batch
        y_batch = self.y_batch
        jitter = self.jitter 
        jitter_cov = self.jitter_cov
        N = X_batch.shape[0]
        
        
        logsigma_n = hyp[-1]
        sigma_n = np.exp(logsigma_n)
        
        # Compute K_u_inv
        K_u = kernel(Z, Z, hyp[:-1])    
        # K_u_inv = np.linalg.solve(K_u + np.eye(M)*jitter_cov, np.eye(M))
        L = np.linalg.cholesky(K_u + np.eye(M)*jitter_cov)    
        K_u_inv = np.linalg.solve(L.T, np.linalg.solve(L,np.eye(M)))
        
        self.K_u_inv = K_u_inv
          
        # Compute mu
        psi = kernel(Z, X_batch, hyp[:-1])    
        K_u_inv_m = np.matmul(K_u_inv,m)   
        MU = np.matmul(psi.T,K_u_inv_m)
        
        # Compute cov
        Alpha = np.matmul(K_u_inv,psi)
        COV = kernel(X_batch, X_batch, hyp[:-1]) - np.matmul(psi.T, np.matmul(K_u_inv,psi)) + \
                np.matmul(Alpha.T, np.matmul(S,Alpha))
        
        COV_inv = np.linalg.solve(COV  + np.eye(N)*sigma_n + np.eye(N)*jitter, np.eye(N))
        # L = np.linalg.cholesky(COV  + np.eye(N)*sigma_n + np.eye(N)*jitter) 
        # COV_inv = np.linalg.solve(np.transpose(L), np.linalg.solve(L,np.eye(N)))
        
        # Compute cov(Z, X)
        cov_ZX = np.matmul(S,Alpha)
        
        # Update m and S
        alpha = np.matmul(COV_inv, cov_ZX.T)
        m = m + np.matmul(cov_ZX, np.matmul(COV_inv, y_batch-MU))    
        S = S - np.matmul(cov_ZX, alpha)
        
        self.m = m
        self.S = S
        
        # Compute NLML                
        K_u_inv_m = np.matmul(K_u_inv,m)
        NLML = 0.5*np.matmul(m.T,K_u_inv_m) + np.sum(np.log(np.diag(L))) + 0.5*np.log(2.*np.pi)*M
        
        return NLML[0,0] 
Example 39
Project: ParametricGP   Author: maziarraissi   File: parametric_GP.py    MIT License 4 votes vote down vote up
def predict(self, X_star):
        Z = self.Z
        m = self.m.value
        S = self.S.value
        hyp = self.hyp
        K_u_inv = self.K_u_inv
        
        N_star = X_star.shape[0]
        partitions_size = 10000
        (number_of_partitions, remainder_partition) = divmod(N_star, partitions_size)
        
        mean_star = np.zeros((N_star,1));
        var_star = np.zeros((N_star,1));
        
        for partition in range(0,number_of_partitions):
            print("Predicting partition: %d" % (partition))
            idx_1 = partition*partitions_size
            idx_2 = (partition+1)*partitions_size
            
            # Compute mu
            psi = kernel(Z, X_star[idx_1:idx_2,:], hyp[:-1])    
            K_u_inv_m = np.matmul(K_u_inv,m)   
            mu = np.matmul(psi.T,K_u_inv_m)
            
            mean_star[idx_1:idx_2,0:1] = mu;        
        
            # Compute cov  
            Alpha = np.matmul(K_u_inv,psi)
            cov = kernel(X_star[idx_1:idx_2,:], X_star[idx_1:idx_2,:], hyp[:-1]) - \
                    np.matmul(psi.T, np.matmul(K_u_inv,psi)) + np.matmul(Alpha.T, np.matmul(S,Alpha))
            var = np.abs(np.diag(cov))# + np.exp(hyp[-1])
            
            var_star[idx_1:idx_2,0] = var
    
        print("Predicting the last partition")
        idx_1 = number_of_partitions*partitions_size
        idx_2 = number_of_partitions*partitions_size + remainder_partition
        
        # Compute mu
        psi = kernel(Z, X_star[idx_1:idx_2,:], hyp[:-1])    
        K_u_inv_m = np.matmul(K_u_inv,m)   
        mu = np.matmul(psi.T,K_u_inv_m)
        
        mean_star[idx_1:idx_2,0:1] = mu;        
    
        # Compute cov  
        Alpha = np.matmul(K_u_inv,psi)
        cov = kernel(X_star[idx_1:idx_2,:], X_star[idx_1:idx_2,:], hyp[:-1]) - \
                np.matmul(psi.T, np.matmul(K_u_inv,psi)) + np.matmul(Alpha.T, np.matmul(S,Alpha))
        var = np.abs(np.diag(cov))# + np.exp(hyp[-1])
        
        var_star[idx_1:idx_2,0] = var
        
        
        return mean_star, var_star 
Example 40
Project: SyntheticStatistics   Author: BlissChapman   File: tst.py    MIT License 4 votes vote down vote up
def init_locs_2randn(tst_data, n_test_locs, subsample=10000, seed=1):
        """Fit a Gaussian to each dataset and draw half of n_test_locs from
        each. This way of initialization can be expensive if the input
        dimension is large.

        """
        if n_test_locs == 1:
            return MeanEmbeddingTest.init_locs_randn(tst_data, n_test_locs, seed)

        X, Y = tst_data.xy()
        n = X.shape[0]
        with util.NumpySeedContext(seed=seed):
            # Subsample X, Y if needed. Useful if the data are too large.
            if n > subsample:
                I = util.subsample_ind(n, subsample, seed=seed+2)
                X = X[I, :]
                Y = Y[I, :]


            d = X.shape[1]
            # fit a Gaussian to each of X, Y
            mean_x = np.mean(X, 0)
            mean_y = np.mean(Y, 0)
            cov_x = np.cov(X.T)
            [Dx, Vx] = np.linalg.eig(cov_x + 1e-3*np.eye(d))
            Dx = np.real(Dx)
            Vx = np.real(Vx)
            # a hack in case the data are high-dimensional and the covariance matrix
            # is low rank.
            Dx[Dx<=0] = 1e-3

            # shrink the covariance so that the drawn samples will not be so
            # far away from the data
            eig_pow = 0.9 # 1.0 = not shrink
            reduced_cov_x = Vx.dot(np.diag(Dx**eig_pow)).dot(Vx.T) + 1e-3*np.eye(d)
            cov_y = np.cov(Y.T)
            [Dy, Vy] = np.linalg.eig(cov_y + 1e-3*np.eye(d))
            Vy = np.real(Vy)
            Dy = np.real(Dy)
            Dy[Dy<=0] = 1e-3
            reduced_cov_y = Vy.dot(np.diag(Dy**eig_pow).dot(Vy.T)) + 1e-3*np.eye(d)
            # integer division
            Jx = old_div(n_test_locs,2)
            Jy = n_test_locs - Jx

            #from IPython.core.debugger import Tracer
            #t = Tracer()
            #t()
            assert Jx+Jy==n_test_locs, 'total test locations is not n_test_locs'
            Tx = np.random.multivariate_normal(mean_x, reduced_cov_x, Jx)
            Ty = np.random.multivariate_normal(mean_y, reduced_cov_y, Jy)
            T0 = np.vstack((Tx, Ty))

        return T0 
Example 41
Project: SyntheticStatistics   Author: BlissChapman   File: tst.py    MIT License 4 votes vote down vote up
def generic_nc_parameter(Z, reg='auto'):
    """
    Compute the non-centrality parameter of the non-central Chi-squared
    which is approximately the distribution of the test statistic under the H_1
    (and H_0). The empirical nc parameter is also the test statistic.

    - reg can be 'auto'. This will automatically determine the lowest value of
    the regularization parameter so that the statistic can be computed.
    """
    #from IPython.core.debugger import Tracer
    #Tracer()()

    n = Z.shape[0]
    Sig = np.cov(Z.T)
    W = np.mean(Z, 0)
    n_features = len(W)
    if n_features == 1:
        reg = 0 if reg=='auto' else reg
        s = float(n)*(W[0]**2)/(reg+Sig)
    else:
        if reg=='auto':
            # First compute with reg=0. If no problem, do nothing.
            # If the covariance is singular, make 0 eigenvalues positive.
            try:
                s = n*np.dot(np.linalg.solve(Sig, W), W)
            except np.linalg.LinAlgError:
                try:
                    # singular matrix
                    # eigen decompose
                    evals, eV = np.linalg.eig(Sig)
                    evals = np.real(evals)
                    eV = np.real(eV)
                    evals = np.maximum(0, evals)
                    # find the non-zero second smallest eigenvalue
                    snd_small = np.sort(evals[evals > 0])[0]
                    evals[evals <= 0] = snd_small

                    # reconstruct Sig
                    Sig = eV.dot(np.diag(evals)).dot(eV.T)
                    # try again
                    s = n*np.linalg.solve(Sig, W).dot(W)
                except:
                    s = -1
        else:
            # assume reg is a number
            # test statistic
            try:
                s = n*np.linalg.solve(Sig + reg*np.eye(Sig.shape[0]), W).dot(W)
            except np.linalg.LinAlgError:
                print('LinAlgError. Return -1 as the nc_parameter.')
                s = -1
    return s 
Example 42
Project: pilco-learner   Author: cryscan   File: gp.py    MIT License 4 votes vote down vote up
def gp0(self, m, s):
        """
        Compute joint predictions for MGP with uncertain inputs.
        """
        assert hasattr(self, "hyp")
        if not hasattr(self, "K"):
            self.cache()

        x = np.atleast_2d(self.inputs)
        y = np.atleast_2d(self.targets)
        n, D = x.shape
        n, E = y.shape

        X = self.hyp
        iK = self.iK
        beta = self.alpha

        m = np.atleast_2d(m)
        inp = x - m

        # Compute the predicted mean and IO covariance.
        iL = np.stack([np.diag(exp(-X[i, :D])) for i in range(E)])
        iN = np.matmul(inp, iL)
        B = iL @ s @ iL + np.eye(D)
        t = np.stack([solve(B[i].T, iN[i].T).T for i in range(E)])
        q = exp(-np.sum(iN * t, 2) / 2)
        qb = q * beta.T
        tiL = np.matmul(t, iL)
        c = exp(2 * X[:, D]) / sqrt(det(B))

        M = np.sum(qb, 1) * c
        V = (np.transpose(tiL, [0, 2, 1]) @ np.expand_dims(qb, 2)).reshape(
            E, D).T * c
        k = 2 * X[:, D].reshape(E, 1) - np.sum(iN**2, 2) / 2

        # Compute the predicted covariance.
        inp = np.expand_dims(inp, 0) / np.expand_dims(exp(2 * X[:, :D]), 1)
        ii = np.repeat(inp[:, newaxis, :, :], E, 1)
        ij = np.repeat(inp[newaxis, :, :, :], E, 0)

        iL = np.stack([np.diag(exp(-2 * X[i, :D])) for i in range(E)])
        siL = np.expand_dims(iL, 0) + np.expand_dims(iL, 1)
        R = np.matmul(s, siL) + np.eye(D)
        t = 1 / sqrt(det(R))
        iRs = np.stack(
            [solve(R.reshape(-1, D, D)[i], s) for i in range(E * E)])
        iRs = iRs.reshape(E, E, D, D)
        Q = exp(k[:, newaxis, :, newaxis] + k[newaxis, :, newaxis, :] +
                maha(ii, -ij, iRs / 2))

        S = np.einsum('ji,iljk,kl->il', beta, Q, beta)
        tr = np.hstack([np.sum(Q[i, i] * iK[i]) for i in range(E)])
        S = (S - np.diag(tr)) * t + np.diag(exp(2 * X[:, D]))
        S = S - np.matmul(M[:, newaxis], M[newaxis, :])

        return M, S, V 
Example 43
Project: pilco-learner   Author: cryscan   File: gp.py    MIT License 4 votes vote down vote up
def gp2(self, m, s):
        assert hasattr(self, "hyp")
        self.cache()

        x = np.atleast_2d(self.inputs)
        y = np.atleast_2d(self.targets)
        n, D = x.shape
        n, E = y.shape

        X = self.hyp
        beta = self.alpha

        m = np.atleast_2d(m)
        inp = x - m

        # Compute the predicted mean and IO covariance.
        iL = np.stack([np.diag(exp(-X[i, :D])) for i in range(E)])
        iN = np.matmul(inp, iL)
        B = iL @ s @ iL + np.eye(D)
        t = np.stack([solve(B[i].T, iN[i].T).T for i in range(E)])
        q = exp(-np.sum(iN * t, 2) / 2)
        qb = q * beta.T
        tiL = np.matmul(t, iL)
        c = exp(2 * X[:, D]) / sqrt(det(B))

        M = np.sum(qb, 1) * c
        V = (np.transpose(tiL, [0, 2, 1]) @ np.expand_dims(qb, 2)).reshape(
            E, D).T * c
        k = 2 * X[:, D].reshape(E, 1) - np.sum(iN**2, 2) / 2

        # Compute the predicted covariance.
        inp = np.expand_dims(inp, 0) / np.expand_dims(exp(2 * X[:, :D]), 1)
        ii = np.repeat(inp[:, newaxis, :, :], E, 1)
        ij = np.repeat(inp[newaxis, :, :, :], E, 0)

        iL = np.stack([np.diag(exp(-2 * X[i, :D])) for i in range(E)])
        siL = np.expand_dims(iL, 0) + np.expand_dims(iL, 1)
        R = np.matmul(s, siL) + np.eye(D)
        t = 1 / sqrt(det(R))
        iRs = np.stack(
            [solve(R.reshape(-1, D, D)[i], s) for i in range(E * E)])
        iRs = iRs.reshape(E, E, D, D)
        Q = exp(k[:, newaxis, :, newaxis] + k[newaxis, :, newaxis, :] +
                maha(ii, -ij, iRs / 2))

        S = t * np.einsum('ji,iljk,kl->il', beta, Q, beta) + 1e-6 * np.eye(E)
        S = S - np.matmul(M[:, newaxis], M[newaxis, :])

        return M, S, V 
Example 44
Project: paragami   Author: rgiordan   File: test_optimization_lib.py    Apache License 2.0 4 votes vote down vote up
def test_transform_eigenspace(self):
        dim = 6
        a = np.random.random((dim, dim))
        a = 0.5 * (a.T + a) + np.eye(dim)
        eigvals, eigvecs = np.linalg.eigh(a)
        vec = np.random.random(dim)

        # Test basic transforms.
        a_mult = transform_eigenspace(eigvecs, eigvals, lambda x: x)
        assert_array_almost_equal(a @ vec, a_mult(vec))

        a_inv_mult = transform_eigenspace(eigvecs, eigvals, lambda x: 1 / x)
        assert_array_almost_equal(np.linalg.solve(a, vec), a_inv_mult(vec))

        a_sqrt_mult = transform_eigenspace(eigvecs, eigvals, np.sqrt)

        for i in range(dim):
            vec2 = np.zeros(dim)
            vec2[i] = 1
            assert_array_almost_equal(
                vec2.T @ a @ vec, a_sqrt_mult(vec2).T @ a_sqrt_mult(vec))

        # Test a transform with an incomplete eigenspace.
        trans_ind = 2
        a_trans = transform_eigenspace(
            eigvecs[:, 0:trans_ind], eigvals[0:trans_ind], lambda x: 10)
        for i in range(trans_ind):
            vec = eigvecs[:, i]
            assert_array_almost_equal(10 * vec, a_trans(vec))

        for i in range(trans_ind + 1, dim):
            vec = eigvecs[:, i]
            assert_array_almost_equal(vec, a_trans(vec))

        # Test eigenvalue truncation.
        eigvals = np.linspace(0, dim, dim) + 1
        a2 = eigvecs @ np.diag(eigvals) @ eigvecs.T
        ev_min = 2.5
        ev_max = 5.5
        a_trans = transform_eigenspace(
            eigvecs, eigvals,
            lambda x: truncate_eigenvalues(x, ev_min=ev_min, ev_max=ev_max))

        for i in range(dim):
            vec = eigvecs[:, i]
            if eigvals[i] <= ev_min:
                assert_array_almost_equal(ev_min * vec, a_trans(vec))
            elif eigvals[i] >= ev_max:
                assert_array_almost_equal(ev_max * vec, a_trans(vec))
            else:
                assert_array_almost_equal(a2 @ vec, a_trans(vec))