Python autograd.numpy.eye() Examples

The following are code examples for showing how to use autograd.numpy.eye(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: STSC   Author: wOOL   File: stsc_autograd.py    The Unlicense 6 votes vote down vote up
def generate_Givens_rotation(i, j, theta, size):
    g = npy.eye(size)
    c = npy.cos(theta)
    s = npy.sin(theta)
    g[i, i] = 0
    g[j, j] = 0
    g[j, i] = 0
    g[i, j] = 0
    ii_mat = npy.zeros_like(g)
    ii_mat[i, i] = 1
    jj_mat = npy.zeros_like(g)
    jj_mat[j, j] = 1
    ji_mat = npy.zeros_like(g)
    ji_mat[j, i] = 1
    ij_mat = npy.zeros_like(g)
    ij_mat[i, j] = 1
    return g + c * ii_mat + c * jj_mat + s * ji_mat - s * ij_mat 
Example 2
Project: STSC   Author: wOOL   File: stsc_autograd.py    The Unlicense 6 votes vote down vote up
def get_rotation_matrix(X, C):
    ij_list = [(i, j) for i in range(C) for j in range(C) if i < j]

    def cost(theta_list):
        U_list = generate_U_list(ij_list, theta_list, C)
        R = reduce(npy.dot, U_list, npy.eye(C))
        Z = X.dot(R)
        M = npy.max(Z, axis=1, keepdims=True)
        return npy.sum((Z / M) ** 2)

    theta_list_init = npy.array([0.0] * int(C * (C - 1) / 2))
    opt = minimize(cost,
                   x0=theta_list_init,
                   method='CG',
                   jac=grad(cost),
                   options={'disp': False})
    return opt.fun, reduce(npy.dot, generate_U_list(ij_list, opt.x, C), npy.eye(C)) 
Example 3
Project: SyntheticStatistics   Author: BlissChapman   File: util.py    MIT License 6 votes vote down vote up
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
    """
    Fit a multivariate normal to the data X (n x d) and draw J points 
    from the fit. 
    - reg: regularizer to use with the covariance matrix
    - eig_pow: raise eigenvalues of the covariance matrix to this power to construct 
        a new covariance matrix before drawing samples. Useful to shrink the spread 
        of the variance.
    """
    with NumpySeedContext(seed=seed):
        d = X.shape[1]
        mean_x = np.mean(X, 0)
        cov_x = np.cov(X.T)
        if d==1:
            cov_x = np.array([[cov_x]])
        [evals, evecs] = np.linalg.eig(cov_x)
        evals = np.maximum(0, np.real(evals))
        assert np.all(np.isfinite(evals))
        evecs = np.real(evecs)
        shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
        V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
    return V 
Example 4
Project: SyntheticStatistics   Author: BlissChapman   File: tst.py    MIT License 6 votes vote down vote up
def init_locs_randn(tst_data, n_test_locs, seed=1):
        """Fit a Gaussian to the merged data of the two samples and draw
        n_test_locs points from the Gaussian"""
        # set the seed
        rand_state = np.random.get_state()
        np.random.seed(seed)

        X, Y = tst_data.xy()
        d = X.shape[1]
        # fit a Gaussian in the middle of X, Y and draw sample to initialize T
        xy = np.vstack((X, Y))
        mean_xy = np.mean(xy, 0)
        cov_xy = np.cov(xy.T)
        [Dxy, Vxy] = np.linalg.eig(cov_xy + 1e-3*np.eye(d))
        Dxy = np.real(Dxy)
        Vxy = np.real(Vxy)
        Dxy[Dxy<=0] = 1e-3
        eig_pow = 0.9 # 1.0 = not shrink
        reduced_cov_xy = Vxy.dot(np.diag(Dxy**eig_pow)).dot(Vxy.T) + 1e-3*np.eye(d)

        T0 = np.random.multivariate_normal(mean_xy, reduced_cov_xy, n_test_locs)
        # reset the seed back to the original
        np.random.set_state(rand_state)
        return T0 
Example 5
Project: variational-smc   Author: blei-lab   File: lgss_example.py    MIT License 6 votes vote down vote up
def init_model_params(Dx, Dy, alpha, r, obs, rs = npr.RandomState(0)):
    mu0 = np.zeros(Dx)
    Sigma0 = np.eye(Dx)
    
    A = np.zeros((Dx,Dx))
    for i in range(Dx):
        for j in range(Dx):
            A[i,j] = alpha**(abs(i-j)+1)
            
    Q = np.eye(Dx)
    C = np.zeros((Dy,Dx))
    if obs == 'sparse':
        C[:Dy,:Dy] = np.eye(Dy)
    else:
        C = rs.normal(size=(Dy,Dx))
    R = r * np.eye(Dy)
    
    return (mu0, Sigma0, A, Q, C, R) 
Example 6
Project: pilco-learner   Author: cryscan   File: loss.py    MIT License 6 votes vote down vote up
def loss_sat(self, m, s):
        D = len(m)

        W = self.W if hasattr(self, 'W') else np.eye(D)
        z = self.z if hasattr(self, 'z') else np.zeros(D)
        m, z = np.atleast_2d(m), np.atleast_2d(z)

        sW = np.dot(s, W)
        ispW = solve((np.eye(D) + sW).T, W.T).T
        L = -exp(-(m - z) @ ispW @ (m - z).T / 2) / sqrt(det(np.eye(D) + sW))

        i2spW = solve((np.eye(D) + 2 * sW).T, W.T).T
        r2 = exp(-(m - z) @ i2spW @ (m - z).T) / sqrt(det(np.eye(D) + 2 * sW))
        S = r2 - L**2

        t = np.dot(W, z.T) - ispW @ (np.dot(sW, z.T) + m.T)
        C = L * t

        return L + 1, S, C 
Example 7
Project: pilco-learner   Author: cryscan   File: control.py    MIT License 6 votes vote down vote up
def concat(con, sat, policy, m, s):
    max_u = policy.max_u
    E = len(max_u)
    D = len(m)

    F = D + E
    i, j = np.arange(D), np.arange(D, F)
    M = m
    S = fill_mat(s, np.zeros((F, F)))

    m, s, c = con(policy, m, s)
    M = np.hstack([M, m])
    S = fill_mat(s, S, j, j)
    q = np.matmul(S[np.ix_(i, i)], c)
    S = fill_mat(q, S, i, j)
    S = fill_mat(q.T, S, j, i)

    M, S, R = sat(M, S, j, max_u)
    C = np.hstack([np.eye(D), c]) @ R
    return M, S, C 
Example 8
Project: pylqr   Author: navigator8972   File: pylqr_trajctrl.py    GNU General Public License v3.0 6 votes vote down vote up
def build_ilqr_general_solver(self, cost_func, n_dims=2, T=100):
        #figure out dimension
        self.T_ = T
        self.n_dims_ = n_dims

        #build dynamics, second-order linear dynamical system
        self.A_ = np.eye(self.n_dims_*2)
        self.A_[0:self.n_dims_, self.n_dims_:] = np.eye(self.n_dims_) * self.dt_
        self.B_ = np.zeros((self.n_dims_*2, self.n_dims_))
        self.B_[self.n_dims_:, :] = np.eye(self.n_dims_) * self.dt_

        self.plant_dyn_ = lambda x, u, t, aux: np.dot(self.A_, x) + np.dot(self.B_, u)
        self.plant_dyn_dx_ = lambda x, u, t, aux: self.A_
        self.plant_dyn_du_ = lambda x, u, t, aux: self.B_

        self.cost_ = cost_func

        #build an iLQR solver based on given functions...
        self.ilqr_ = pylqr.PyLQR_iLQRSolver(T=self.T_-1, plant_dyn=self.plant_dyn_, cost=self.cost_, use_autograd=self.use_autograd)

        return 
Example 9
Project: kernel-gof   Author: wittawatj   File: util.py    MIT License 6 votes vote down vote up
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
    """
    Fit a multivariate normal to the data X (n x d) and draw J points 
    from the fit. 
    - reg: regularizer to use with the covariance matrix
    - eig_pow: raise eigenvalues of the covariance matrix to this power to construct 
        a new covariance matrix before drawing samples. Useful to shrink the spread 
        of the variance.
    """
    with NumpySeedContext(seed=seed):
        d = X.shape[1]
        mean_x = np.mean(X, 0)
        cov_x = np.cov(X.T)
        if d==1:
            cov_x = np.array([[cov_x]])
        [evals, evecs] = np.linalg.eig(cov_x)
        evals = np.maximum(0, np.real(evals))
        assert np.all(np.isfinite(evals))
        evecs = np.real(evecs)
        shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
        V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
    return V 
Example 10
Project: kernel-gof   Author: wittawatj   File: data.py    MIT License 6 votes vote down vote up
def gmm_sample(self, mean=None, w=None, N=10000,n=10,d=2,seed=10):
        np.random.seed(seed)
        self.d = d
        if mean is None:
            mean = np.random.randn(n,d)*10
        if w is None:
            w = np.random.rand(n)
        w = old_div(w,sum(w))
        multi = np.random.multinomial(N,w)
        X = np.zeros((N,d))
        base = 0
        for i in range(n):
            X[base:base+multi[i],:] = np.random.multivariate_normal(mean[i,:], np.eye(self.d), multi[i])
            base += multi[i]
        
        llh = np.zeros(N)
        for i in range(n):
            llh += w[i] * stats.multivariate_normal.pdf(X, mean[i,:], np.eye(self.d))
        #llh = llh/sum(llh)
        return X, llh 
Example 11
Project: bosonic   Author: steinbrecher   File: qonn.py    MIT License 6 votes vote down vote up
def clements_build(phis, m):
    U = np.eye(m, dtype=complex)
    ptr = 0
    bss = [build_bs_layer(m, 0), build_bs_layer(m, 1)]
    for i in range(m):
        offset = i % 2
        # Phis per layer
        ppl = (m - offset) // 2
        bs = bss[offset]
        phi1 = build_phi_layer(phis[ptr:ptr+ppl], m, offset)
        phi2 = build_phi_layer(phis[ptr+ppl:ptr+2*ppl], m, offset)
        ptr += 2*ppl

        U = np.multiply(phi1, U)
        U = np.dot(bs, U)
        U = np.multiply(phi2, U)
        U = np.dot(bs, U)
    outPhases = np.reshape(np.exp(1j*phis[ptr:ptr+m]), (m, 1))
    U = np.multiply(outPhases, U)
    return U 
Example 12
Project: prediction-constrained-topic-models   Author: dtak   File: util_differentiable_transform__2D_rows_sum_to_one.py    MIT License 5 votes vote down vote up
def to_diffable_arr(topics_KV, min_eps=MIN_EPS, do_force_safe=False):
    ''' Transform normalized topics to unconstrained space.

    Args
    ----
    topics_KV : 2D array, size K x V
        minimum value of any entry must be min_eps
        each row should sum to 1.0

    Returns
    -------
    log_topics_vec : 2D array, size K x (V-1)
        unconstrained real values

    Examples
    --------
    >>> topics_KV = np.eye(3) + np.ones((3,3))
    >>> topics_KV /= topics_KV.sum(axis=1)[:,np.newaxis]
    >>> log_topics_vec = to_diffable_arr(topics_KV)
    >>> out_KV = to_common_arr(log_topics_vec)
    >>> np.allclose(out_KV, topics_KV)
    True
    '''
    if do_force_safe:
        topics_KV = to_safe_common_arr(topics_KV, min_eps)
    K, V = topics_KV.shape
    log_topics_KV = np.log(topics_KV)
    log_topics_KVm1 = log_topics_KV[:, :-1]
    log_topics_KVm1 = log_topics_KVm1 - log_topics_KV[:, -1][:,np.newaxis]
    return log_topics_KVm1 + np.log1p(-V * min_eps) 
Example 13
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 5 votes vote down vote up
def indices_to_one_hot(data, nb_classes):
    """
    Convert an iterable of indices to one-hot encoded labels.
    From: https://stackoverflow.com/questions/37292872/how-can-i-one-hot-encode-in-python
    """
    targets = np.array(data).reshape(-1)
    return np.eye(nb_classes)[targets] 
Example 14
Project: STSC   Author: wOOL   File: stsc_manopt.py    The Unlicense 5 votes vote down vote up
def get_rotation_matrix(X, C):
    def cost(R):
        Z = npy.dot(X, R)
        M = npy.max(Z, axis=1, keepdims=True)
        return npy.sum((Z / M) ** 2)

    manifold = Stiefel(C, C)
    problem = Problem(manifold=manifold, cost=cost, verbosity=0)
    solver = SteepestDescent(logverbosity=0)
    opt = solver.solve(problem=problem, x=npy.eye(C))
    return cost(opt), opt 
Example 15
Project: autograd-forward   Author: BB-UCL   File: test_scipy.py    MIT License 5 votes vote down vote up
def make_psd(mat): return np.dot(mat.T, mat) + np.eye(mat.shape[0]) 
Example 16
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 5 votes vote down vote up
def kernel(theta, mu, sigma, jitter=1e-2):

    length_scale = theta[0]

    std = theta[1]

    omega = numpy.reshape(theta[2:5], (3, 1))

    kappa = theta[5:8]

    K_p, raw_K_p_g_0, raw_K_p_g_1, raw_mu_g, raw_sigma_g = RBF_p(mu, sigma, length_scale, std)

    B, omega_0_g, omega_1_g, omega_2_g, kappa_0_g, kappa_1_g, kappa_2_g = coregion(omega, kappa)

    C = numpy.kron(K_p, B) + numpy.eye(3*len(mu))*jitter

    length_scale_g = numpy.kron(raw_K_p_g_0, B)

    std_g = numpy.kron(raw_K_p_g_1, B)

    mu_g = numpy.kron(raw_mu_g, B)

    sigma_g = numpy.kron(raw_sigma_g, B)

    omega_0_g = numpy.kron(K_p, omega_0_g)

    omega_1_g = numpy.kron(K_p, omega_1_g)

    omega_2_g = numpy.kron(K_p, omega_2_g)

    kappa_0_g = numpy.kron(K_p, kappa_0_g)

    kappa_1_g = numpy.kron(K_p, kappa_1_g)

    kappa_2_g = numpy.kron(K_p, kappa_2_g)

    return C, [length_scale_g, std_g,
               omega_0_g, omega_1_g, omega_2_g, kappa_0_g, kappa_1_g, kappa_2_g, mu_g, sigma_g] 
Example 17
Project: quanser-openai-driver   Author: BlueRiverTech   File: lqr.py    MIT License 5 votes vote down vote up
def LQR_control():
    # Cost matrices for LQR
    Q = np.diag(np.array([1, 1, 1, 1]))  # state_dimension = 4
    R = np.eye(1)  # control_dimension = 1

    A, B = computeAB(np.array([0.0, 0.0, 0.0, 0.0]), np.array([0.0]))

    # Use if discrete forward dynamics is used
    X = sp_linalg.solve_discrete_are(A, B, Q, R)
    K = np.dot(np.linalg.pinv(R + np.dot(B.T, np.dot(X, B))), np.dot(B.T, np.dot(X, A)))

    # Use for continuous version of LQR
    # X = sp_linalg.solve_continuous_are(A, B, Q, R)
    # K = np.dot(np.linalg.pinv(R), np.dot(B.T, X))
    return np.squeeze(K, 0) 
Example 18
Project: pymanopt   Author: pymanopt   File: test_autograd.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        np.seterr(all='raise')
        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        n = self.n = 15

        Y = self.Y = rnd.randn(n)
        A = self.A = rnd.randn(n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian matrix H
        # Convert Y and A into matrices (row vectors)
        Ymat = np.matrix(Y)
        Amat = np.matrix(A)

        diag = np.eye(n)

        H = np.exp(np.sum(Y ** 2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'left multiply' H by A
        self.correct_hess = np.squeeze(np.array(Amat.dot(H)))

        self.backend = AutogradBackend() 
Example 19
Project: pymanopt   Author: pymanopt   File: test_autograd.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        np.seterr(all='raise')

        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        m = self.m = 10
        n = self.n = 15

        Y = self.Y = rnd.randn(m, n)
        A = self.A = rnd.randn(m, n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian tensor H (4th order)
        Y1 = Y.reshape(m, n, 1, 1)
        Y2 = Y.reshape(1, 1, m, n)

        # Create an m x n x m x n array with diag[i,j,k,l] == 1 iff
        # (i == k and j == l), this is a 'diagonal' tensor.
        diag = np.eye(m * n).reshape(m, n, m, n)

        H = np.exp(np.sum(Y ** 2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = A.reshape(1, 1, m, n)

        self.correct_hess = np.sum(H * Atensor, axis=(2, 3))

        self.backend = AutogradBackend() 
Example 20
Project: pymanopt   Author: pymanopt   File: test_autograd.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        np.seterr(all='raise')

        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        n1 = self.n1 = 3
        n2 = self.n2 = 4
        n3 = self.n3 = 5

        Y = self.Y = rnd.randn(n1, n2, n3)
        A = self.A = rnd.randn(n1, n2, n3)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian tensor H (6th order)
        Y1 = Y.reshape(n1, n2, n3, 1, 1, 1)
        Y2 = Y.reshape(1, 1, 1, n1, n2, n3)

        # Create an n1 x n2 x n3 x n1 x n2 x n3 diagonal tensor
        diag = np.eye(n1 * n2 * n3).reshape(n1, n2, n3, n1, n2, n3)

        H = np.exp(np.sum(Y ** 2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = A.reshape(1, 1, 1, n1, n2, n3)

        self.correct_hess = np.sum(H * Atensor, axis=(3, 4, 5))

        self.backend = AutogradBackend() 
Example 21
Project: vittles   Author: rgiordan   File: test_utils.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, dim):
        # Put lower bounds so we're testing the contraining functions
        # and so that derivatives of all orders are nonzero.
        self.dim = dim
        self.theta_pattern = \
            paragami.NumericArrayPattern(shape=(dim, ), lb=-20.)
        self.lambda_pattern = \
            paragami.NumericArrayPattern(shape=(dim, ), lb=-20.0)

        vec = np.linspace(0.1, 0.3, num=dim)
        self.matrix = np.outer(vec, vec) + np.eye(dim) 
Example 22
Project: pilco-learner   Author: cryscan   File: base.py    MIT License 5 votes vote down vote up
def propagate(m, s, plant, dynmodel, policy):
    angi = plant.angi
    poli = plant.poli
    dyni = plant.dyni
    difi = plant.difi

    D0 = len(m)
    D1 = D0 + 2 * len(angi)
    D2 = D1 + len(policy.max_u)
    M = np.array(m)
    S = s

    i, j = np.arange(D0), np.arange(D0, D1)
    m, s, c = gaussian_trig(M[i], S[np.ix_(i, i)], angi)
    q = np.matmul(S[np.ix_(i, i)], c)
    M = np.hstack([M, m])
    S = np.vstack([np.hstack([S, q]), np.hstack([q.T, s])])

    i, j = poli, np.arange(D1)
    m, s, c = policy.fcn(M[i], S[np.ix_(i, i)])
    q = np.matmul(S[np.ix_(j, i)], c)
    M = np.hstack([M, m])
    S = np.vstack([np.hstack([S, q]), np.hstack([q.T, s])])

    i, j = np.hstack([dyni, np.arange(D1, D2)]), np.arange(D2)
    m, s, c = dynmodel.fcn(M[i], S[np.ix_(i, i)])
    q = np.matmul(S[np.ix_(j, i)], c)
    M = np.hstack([M, m])
    S = np.vstack([np.hstack([S, q]), np.hstack([q.T, s])])

    P = np.hstack([np.zeros((D0, D2)), np.eye(D0)])
    P = fill_mat(np.eye(len(difi)), P, difi, difi)
    M_next = np.matmul(P, M[:, newaxis]).flatten()
    S_next = P @ S @ P.T
    S_next = (S_next + S_next.T) / 2
    return M_next, S_next 
Example 23
Project: pilco-learner   Author: cryscan   File: gp.py    MIT License 5 votes vote down vote up
def __call__(self, loghyp, x, z=None):
        loghyp = np.atleast_2d(loghyp)
        n, D = x.shape
        s2 = np.exp(2 * loghyp)  # [E, 1]
        s2 = s2.reshape(-1, 1, 1)

        if z is None:
            K = s2 * np.expand_dims(np.eye(n), 0)
        else:
            K = 0
        return K 
Example 24
Project: pilco-learner   Author: cryscan   File: gp.py    MIT License 5 votes vote down vote up
def cache(self):
        assert hasattr(self, "inputs")
        assert hasattr(self, "targets")
        x = np.atleast_2d(self.inputs)
        y = np.atleast_2d(self.targets)
        assert len(x) == len(y)

        n, D = x.shape
        n, E = y.shape

        self.K = self.kernel(self.hyp, x)
        self.iK = np.stack([solve(self.K[i], np.eye(n)) for i in range(E)])
        self.alpha = np.vstack([solve(self.K[i], y[:, i]) for i in range(E)]).T 
Example 25
Project: ParetoMTL   Author: Xi-L   File: run_synthetic_example.py    MIT License 5 votes vote down vote up
def get_d_moomtl(grads):
    """
    calculate the gradient direction for MOO-MTL 
    """
    
    nobj, dim = grads.shape
    
#    # use cvxopt to solve QP
#    P = np.dot(grads , grads.T)
#    
#    q = np.zeros(nobj)
#    
#    G =  - np.eye(nobj)
#    h = np.zeros(nobj)
#    
#    
#    A = np.ones(nobj).reshape(1,2)
#    b = np.ones(1)
#       
#    cvxopt.solvers.options['show_progress'] = False
#    sol = cvxopt_solve_qp(P, q, G, h, A, b)
    
    # use MinNormSolver to solve QP
    sol, nd = MinNormSolver.find_min_norm_element(grads)
    
    return sol 
Example 26
Project: mici   Author: matt-graham   File: test_matrices.py    MIT License 5 votes vote down vote up
def __init__(self):
        super().__init__()
        for sz in SIZES:
            scalar = abs(self.rng.normal())
            self.matrices[sz] = matrices.PositiveScaledIdentityMatrix(
                scalar, sz)
            self.np_matrices[sz] = scalar * np.identity(sz)
            if AUTOGRAD_AVAILABLE:
                self.grad_log_abs_det_sqrts[sz] = grad(
                    lambda s: 0.5 * anp.linalg.slogdet(s * anp.eye(sz))[1])(
                        scalar)
                self.grad_quadratic_form_invs[sz] = partial(
                    grad(lambda s, v: (v / s) @ v), scalar) 
Example 27
Project: mici   Author: matt-graham   File: test_matrices.py    MIT License 5 votes vote down vote up
def __init__(self):
        super().__init__()
        for sz in SIZES:
            scalar = self.rng.normal()
            self.matrices[sz] = matrices.ScaledIdentityMatrix(scalar, sz)
            self.np_matrices[sz] = scalar * np.identity(sz)
            if AUTOGRAD_AVAILABLE:
                self.grad_log_abs_det_sqrts[sz] = grad(
                    lambda s: 0.5 * anp.linalg.slogdet(s * anp.eye(sz))[1])(
                        scalar)
                self.grad_quadratic_form_invs[sz] = partial(
                    grad(lambda s, v: (v / s) @ v), scalar) 
Example 28
Project: pylqr   Author: navigator8972   File: pylqr_trajctrl.py    GNU General Public License v3.0 5 votes vote down vote up
def PyLQR_TrajCtrl_TrackingTest():
    n_pnts = 200
    x_coord = np.linspace(0.0, 2*np.pi, n_pnts)
    y_coord = np.sin(x_coord)
    #concatenate to have trajectory
    ref_traj = np.array([x_coord, y_coord]).T
    weight_mats = [ np.eye(ref_traj.shape[1])*100 ]

    #draw reference trajectory
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.hold(True)
    ax.plot(ref_traj[:, 0], ref_traj[:, 1], '.-k', linewidth=3.5)
    ax.plot([ref_traj[0, 0]], [ref_traj[0, 1]], '*k', markersize=16)

    lqr_traj_ctrl = PyLQR_TrajCtrl(use_autograd=True)
    lqr_traj_ctrl.build_ilqr_tracking_solver(ref_traj, weight_mats)

    n_queries = 5

    for i in range(n_queries):
        #start from a perturbed point
        x0 = ref_traj[0, :] + np.random.rand(2) * 2 - 1
        syn_traj = lqr_traj_ctrl.synthesize_trajectory(x0)
        #plot it
        ax.plot(syn_traj[:, 0], syn_traj[:, 1], linewidth=3.5)

    plt.show()
    return 
Example 29
Project: kernel-gof   Author: wittawatj   File: density.py    MIT License 5 votes vote down vote up
def log_normalized_den(self, X):
        d = self.dim()
        return stats.multivariate_normal.logpdf(X, mean=self.mean, cov=self.variance*np.eye(d)) 
Example 30
Project: paragami   Author: rgiordan   File: test_autograd_supplement_lib.py    Apache License 2.0 5 votes vote down vote up
def test_inv(self):
        def fun(x):
            return np.linalg.inv(x)

        D = 3
        mat = npr.randn(D, D) + np.eye(D) * 2

        check_grads(fun)(mat) 
Example 31
Project: paragami   Author: rgiordan   File: test_autograd_supplement_lib.py    Apache License 2.0 5 votes vote down vote up
def test_inv_3d(self):
        fun = lambda x: np.linalg.inv(x)

        D = 4
        mat = npr.randn(D, D, D) + 5 * np.eye(D)
        check_grads(fun)(mat)

        mat = npr.randn(D, D, D, D) + 5 * np.eye(D)
        check_grads(fun)(mat) 
Example 32
Project: paragami   Author: rgiordan   File: test_autograd_supplement_lib.py    Apache License 2.0 5 votes vote down vote up
def test_slogdet_3d(self):
        fun = lambda x: np.sum(np.linalg.slogdet(x)[1])
        mat = np.concatenate(
            [(rand_psd(5) + 5 * np.eye(5))[None,...] for _ in range(3)])
        # At this time, this is not supported.
        #check_grads(fun)(mat)

        # Check that it raises an error.
        fwd_grad = autograd.make_jvp(fun, argnum=0)
        def error_fun():
            return fwd_grad(mat)(mat)
        self.assertRaises(ValueError, error_fun) 
Example 33
Project: paragami   Author: rgiordan   File: test_autograd_supplement_lib.py    Apache License 2.0 5 votes vote down vote up
def test_solve_arg1(self):
        D = 8
        A = npr.randn(D, D) + 10.0 * np.eye(D)
        B = npr.randn(D, D - 1)
        def fun(a): return np.linalg.solve(a, B)
        check_grads(fun)(A) 
Example 34
Project: paragami   Author: rgiordan   File: test_autograd_supplement_lib.py    Apache License 2.0 5 votes vote down vote up
def test_solve_arg1_1d(self):
        D = 8
        A = npr.randn(D, D) + 10.0 * np.eye(D)
        B = npr.randn(D)
        def fun(a): return np.linalg.solve(a, B)
        check_grads(fun)(A) 
Example 35
Project: paragami   Author: rgiordan   File: test_autograd_supplement_lib.py    Apache License 2.0 5 votes vote down vote up
def test_solve_arg1_3d(self):
        D = 4
        A = npr.randn(D + 1, D, D) + 5 * np.eye(D)
        B = npr.randn(D + 1, D)
        fun = lambda A: np.linalg.solve(A, B)
        check_grads(fun)(A) 
Example 36
Project: paragami   Author: rgiordan   File: test_autograd_supplement_lib.py    Apache License 2.0 5 votes vote down vote up
def test_solve_arg1_3d_3d(self):
        D = 4
        A = npr.randn(D+1, D, D) + 5 * np.eye(D)
        B = npr.randn(D+1, D, D + 2)
        fun = lambda A: np.linalg.solve(A, B)
        check_grads(fun)(A) 
Example 37
Project: paragami   Author: rgiordan   File: test_utils.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, dim):
        # Put lower bounds so we're testing the contraining functions
        # and so that derivatives of all orders are nonzero.
        self.dim = dim
        self.theta_pattern = \
            paragami.NumericArrayPattern(shape=(dim, ), lb=-10.)
        self.lambda_pattern = \
            paragami.NumericArrayPattern(shape=(dim, ), lb=-10.0)

        vec = np.linspace(0.1, 0.3, num=dim)
        self.matrix = np.outer(vec, vec) + np.eye(dim)

        self.lam = self.get_default_lambda() 
Example 38
Project: paragami   Author: rgiordan   File: test_optimization_lib.py    Apache License 2.0 5 votes vote down vote up
def test_matrix_sqrt(self):
        dim = 5
        mat = dim * np.eye(dim)
        vec = np.random.random(dim)
        mat = mat + np.outer(vec, vec)
        self._test_matrix_sqrt(mat) 
Example 39
Project: paragami   Author: rgiordan   File: psdmatrix_patterns.py    Apache License 2.0 5 votes vote down vote up
def empty(self, valid):
        if valid:
            return np.eye(self.__size) * (self.__diag_lb + 1)
        else:
            return np.empty((self.__size, self.__size)) 
Example 40
Project: ParametricGP   Author: maziarraissi   File: parametric_GP.py    MIT License 4 votes vote down vote up
def likelihood(self, hyp): 
        M = self.M 
        Z = self.Z
        m = self.m
        S = self.S
        X_batch = self.X_batch
        y_batch = self.y_batch
        jitter = self.jitter 
        jitter_cov = self.jitter_cov
        N = X_batch.shape[0]
        
        
        logsigma_n = hyp[-1]
        sigma_n = np.exp(logsigma_n)
        
        # Compute K_u_inv
        K_u = kernel(Z, Z, hyp[:-1])    
        # K_u_inv = np.linalg.solve(K_u + np.eye(M)*jitter_cov, np.eye(M))
        L = np.linalg.cholesky(K_u + np.eye(M)*jitter_cov)    
        K_u_inv = np.linalg.solve(L.T, np.linalg.solve(L,np.eye(M)))
        
        self.K_u_inv = K_u_inv
          
        # Compute mu
        psi = kernel(Z, X_batch, hyp[:-1])    
        K_u_inv_m = np.matmul(K_u_inv,m)   
        MU = np.matmul(psi.T,K_u_inv_m)
        
        # Compute cov
        Alpha = np.matmul(K_u_inv,psi)
        COV = kernel(X_batch, X_batch, hyp[:-1]) - np.matmul(psi.T, np.matmul(K_u_inv,psi)) + \
                np.matmul(Alpha.T, np.matmul(S,Alpha))
        
        COV_inv = np.linalg.solve(COV  + np.eye(N)*sigma_n + np.eye(N)*jitter, np.eye(N))
        # L = np.linalg.cholesky(COV  + np.eye(N)*sigma_n + np.eye(N)*jitter) 
        # COV_inv = np.linalg.solve(np.transpose(L), np.linalg.solve(L,np.eye(N)))
        
        # Compute cov(Z, X)
        cov_ZX = np.matmul(S,Alpha)
        
        # Update m and S
        alpha = np.matmul(COV_inv, cov_ZX.T)
        m = m + np.matmul(cov_ZX, np.matmul(COV_inv, y_batch-MU))    
        S = S - np.matmul(cov_ZX, alpha)
        
        self.m = m
        self.S = S
        
        # Compute NLML                
        K_u_inv_m = np.matmul(K_u_inv,m)
        NLML = 0.5*np.matmul(m.T,K_u_inv_m) + np.sum(np.log(np.diag(L))) + 0.5*np.log(2.*np.pi)*M
        
        return NLML[0,0] 
Example 41
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 4 votes vote down vote up
def all_log_lik(theta, *args):

    (mdl,) = args

    # print('=============================================================')
    #
    # print('current theta is:')

    print(theta.ravel())

    C, K_g = kernel(theta, mdl.mu, mdl.sigma)

    w_map, A = newton_update(mdl.q, mdl.ln_q, mdl.ln_1_q, mdl.ln_s, C, mdl.n)

    link_g = get_link_g(w_map, mdl.q, mdl.ln_q, mdl.ln_1_q, mdl.ln_s).reshape(-1, 1)

    mdl.check_map = numpy.sum(numpy.abs(numpy.matmul(C, + link_g) - w_map.reshape(-1, 1)))

    mdl.w_map = w_map

    mdl.A = A

    H, H_U, H_V = get_link_h(w_map, mdl.q, mdl.ln_q, mdl.ln_1_q, mdl.ln_s)

    G = numpy.eye(numpy.shape(mdl.y)[0] * 3) + numpy.matmul(numpy.matmul(H_V, C), H_U)

    gradient = get_gradient(theta, w_map, A, C, G, H_U, H_V, link_g,
                            mdl.y, mdl.mu, mdl.sigma,
                            mdl.q, mdl.ln_q, mdl.ln_1_q, mdl.ln_s, K_g)

    mdl.gradient = gradient.copy()

    link_lik = numpy.sum(link_log_lik(w_map, mdl.q, mdl.ln_q, mdl.ln_1_q, mdl.ln_s))

    L = -0.5 * numpy.matmul(A.transpose(), w_map.reshape(-1, 1))[0, 0] + link_lik - 0.5 * numpy.linalg.slogdet(G)[1]

    # print('current gradient is:')
    #
    # print(gradient)
    #
    # print('Check MAP:')
    #
    # print(mdl.check_map)
    #
    # print('w_map is:')
    #
    # print(w_map)
    #
    # print('Log-Lik is:')
    #
    # print(L)
    #
    # print('=============================================================')
    #
    # print([-L, mdl.check_map, numpy.abs(gradient).sum()])

    return - L, - gradient 
Example 42
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 4 votes vote down vote up
def get_gradient(theta, w_map, A, C, G, H_U, H_V, link_g, y, mu, sigma, q, ln_q, ln_1_q, ln_s, K_g):

    n = int(numpy.shape(C)[0] / 3)

    GQ, GR = scipy.linalg.qr(G)

    # (C + H^-1)^-1
    CH_inv = numpy.matmul(H_U, scipy.linalg.solve_triangular(GR, numpy.matmul(GQ.transpose(), H_V)))

    H_g = - get_link_hessian_g(w_map, q, ln_q, ln_1_q, ln_s)

    # (C^-1 + H)^-1
    L = C - numpy.matmul(numpy.matmul(C, H_U),
                         scipy.linalg.solve_triangular(GR, numpy.matmul(GQ.transpose(), numpy.matmul(H_V, C))))

    # (I + CH)^-1
    CH = numpy.eye(3*n) - numpy.matmul(C, CH_inv)

    g_direct = numpy.zeros_like(theta)

    g_indirect = numpy.zeros_like(theta)

    for i in range(0, len(g_direct)):

        g_direct[i] = 0.5 * numpy.matmul(numpy.matmul(A.transpose(), K_g[i]), A) \
                      - 0.5 * numpy.trace(numpy.matmul(CH_inv, K_g[i]))

    tmp_trace = numpy.zeros(3 * n)

    for i in range(0, 3):
        for j in range(0, 3):
            for k in range(0, 3):
                tmp_trace[i*n:(i+1)*n] = tmp_trace[i*n:(i+1)*n] + \
                                         (L[numpy.arange(0, n)*3+j, numpy.arange(0, n)*3+k] * H_g[:, i, j, k])

    for i in range(0, len(g_indirect)):

        tmp_g = numpy.matmul(numpy.matmul(CH, K_g[i]), + link_g)

        g_indirect[i] = numpy.sum((-0.5) * tmp_trace * tmp_g.ravel())

    return g_direct + g_indirect 
Example 43
Project: qoc   Author: SchusterLab   File: expm.py    MIT License 4 votes vote down vote up
def expm_pade(a):
    """
    Compute the matrix exponential via pade approximation.

    References:
    [0] http://eprints.ma.man.ac.uk/634/1/high05e.pdf
    [1] https://github.com/scipy/scipy/blob/v0.14.0/scipy/linalg/_expm_frechet.py#L10

    Arguments:
    a :: ndarray(N x N) - The matrix to exponentiate.
    
    Returns:
    expm_a :: ndarray(N x N) - The exponential of a.
    """
    # If the one norm is sufficiently small,
    # pade orders up to 13 are well behaved.
    scale = 0
    size = a.shape[0]
    pade_order = None
    one_norm_ = one_norm(a)
    for pade_order_ in PADE_ORDERS:
        if one_norm_ < THETA[pade_order_]:
            pade_order = pade_order_
        #ENDIF
    #ENDFOR

    # If the one norm is large, scaling and squaring
    # is required.
    if pade_order is None:
        pade_order = 13
        scale = max(0, int(anp.ceil(anp.log2(one_norm_ / THETA[13]))))
        a = a * (2 ** -scale)

    # Execute pade approximant.
    i = anp.eye(size)
    u, v = PADE[pade_order](a, i)
    r = anp.linalg.solve(-u + v, u + v)

    # Do squaring if necessary.
    for _ in range(scale):
        r = anp.matmul(r, r)

    return r


### EXPM IMPLEMENTATION VIA EIGEN DECOMPOSITION AND DIAGONALIZATION ### 
Example 44
Project: differentiable-atomistic-potentials   Author: google   File: lennardjones.py    Apache License 2.0 4 votes vote down vote up
def energy(params, positions, cell, strain=np.zeros((3, 3))):
  """Compute the energy of a Lennard-Jones system.

    Parameters
    ----------

    params : dictionary of paramters.
      Defaults to {'sigma': 1.0, 'epsilon': 1.0}

    positions : array of floats. Shape = (natoms, 3)

    cell: array of unit cell vectors. Shape = (3, 3)

    strain: array of strains to apply to cell. Shape = (3, 3)

    Returns
    -------
    energy : float
    """

  sigma = params.get('sigma', 1.0)
  epsilon = params.get('epsilon', 1.0)

  rc = 3 * sigma

  e0 = 4 * epsilon * ((sigma / rc)**12 - (sigma / rc)**6)

  strain_tensor = np.eye(3) + strain
  cell = np.dot(strain_tensor, cell.T).T
  positions = np.dot(strain_tensor, positions.T).T

  r2 = get_distances(positions, cell, rc, 0.01)[0]**2

  zeros = np.equal(r2, 0.0)
  adjusted = np.where(zeros, np.ones_like(r2), r2)

  c6 = np.where((r2 <= rc**2) & (r2 > 0.0), (sigma**2 / adjusted)**3,
                np.zeros_like(r2))
  c6 = np.where(zeros, np.zeros_like(r2), c6)
  energy = -e0 * (c6 != 0.0).sum()
  c12 = c6**2
  energy += np.sum(4 * epsilon * (c12 - c6))

  # get_distances double counts the interactions, so we divide by two.
  return energy / 2 
Example 45
Project: differentiable-atomistic-potentials   Author: google   File: lennardjones.py    Apache License 2.0 4 votes vote down vote up
def energy_oneway(params, positions, cell, strain=np.zeros((3, 3))):
  """Compute the energy of a Lennard-Jones system.

    Parameters
    ----------

    params : dictionary of paramters.
      Defaults to {'sigma': 1.0, 'epsilon': 1.0}

    positions : array of floats. Shape = (natoms, 3)

    cell: array of unit cell vectors. Shape = (3, 3)

    strain: array of strains to apply to cell. Shape = (3, 3)

    Returns
    -------
    energy : float
    """

  sigma = params.get('sigma', 1.0)
  epsilon = params.get('epsilon', 1.0)

  rc = 3 * sigma

  e0 = 4 * epsilon * ((sigma / rc)**12 - (sigma / rc)**6)

  strain_tensor = np.eye(3) + strain
  cell = np.dot(strain_tensor, cell.T).T
  positions = np.dot(strain_tensor, positions.T).T

  inds, disps = get_neighbors_oneway(positions, cell, rc)

  natoms = len(positions)
  energy = 0.0

  for a in range(natoms):
    neighbors = inds[a]
    offsets = disps[a]
    cells = np.dot(offsets, cell)
    d = positions[neighbors] + cells - positions[a]
    r2 = (d**2).sum(1)
    c6 = np.where(r2 <= rc**2, (sigma**2 / r2)**3, np.zeros_like(r2))
    energy -= e0 * (c6 != 0.0).sum()
    c12 = c6**2
    energy += 4 * epsilon * (c12 - c6).sum()
  return energy 
Example 46
Project: differentiable-atomistic-potentials   Author: google   File: test_ag_emt.py    Apache License 2.0 4 votes vote down vote up
def test_stress(self):
    for structure in ('fcc', 'bcc', 'hcp', 'diamond', 'sc'):
      for repeat in ((1, 1, 1), (1, 2, 3)):
        for a in [3.0, 4.0]:
          atoms = bulk('Cu', structure, a=a).repeat(repeat)
          atoms.rattle()
          atoms.set_calculator(EMT())

          # Numerically calculate the ase stress
          d = 1e-9  # a delta strain

          ase_stress = np.empty((3, 3)).flatten()
          cell0 = atoms.cell

          # Use a finite difference approach that is centered.
          for i in [0, 4, 8, 5, 2, 1]:
            strain_tensor = np.zeros((3, 3))
            strain_tensor = strain_tensor.flatten()
            strain_tensor[i] = d
            strain_tensor = strain_tensor.reshape((3, 3))
            strain_tensor += strain_tensor.T
            strain_tensor /= 2
            strain_tensor += np.eye(3, 3)

            cell = np.dot(strain_tensor, cell0.T).T
            positions = np.dot(strain_tensor, atoms.positions.T).T
            atoms.cell = cell
            atoms.positions = positions
            ep = atoms.get_potential_energy()

            strain_tensor = np.zeros((3, 3))
            strain_tensor = strain_tensor.flatten()
            strain_tensor[i] = -d
            strain_tensor = strain_tensor.reshape((3, 3))
            strain_tensor += strain_tensor.T
            strain_tensor /= 2
            strain_tensor += np.eye(3, 3)

            cell = np.dot(strain_tensor, cell0.T).T
            positions = np.dot(strain_tensor, atoms.positions.T).T
            atoms.cell = cell
            atoms.positions = positions
            em = atoms.get_potential_energy()

            ase_stress[i] = (ep - em) / (2 * d) / atoms.get_volume()

          ase_stress = np.take(ase_stress.reshape((3, 3)), [0, 4, 8, 5, 2, 1])

          ag_stress = stress(parameters, atoms.positions, atoms.numbers, atoms.cell)

          # I picked the 0.03 tolerance here. I thought it should be closer, but
          # it is a simple numerical difference I am using for the derivative,
          # and I am not sure it is totally correct.
          self.assertTrue(np.all(np.abs(ase_stress - ag_stress) <= 0.03),
                          f'''
ase: {ase_stress}
ag : {ag_stress}')
diff {ase_stress - ag_stress}
''') 
Example 47
Project: momi2   Author: popgenmethods   File: optimizers.py    GNU General Public License v3.0 4 votes vote down vote up
def _find_minimum(f, start_params, optimizer, bounds=None,
                  callback=None,
                  opt_kwargs={}, **kwargs):
    fixed_params = []
    if bounds is not None:
        bounds = [(None, None) if b is None else b for b in bounds]
        for i, b in enumerate(bounds):
            try:
                if b[0] == b[1] and b[0] is not None:
                    fixed_params += [(i, b[0])]
            except (TypeError, IndexError) as e:
                fixed_params += [(i, b)]

        if any(start_params[i] != b for i, b in fixed_params):
            raise ValueError(
                "start_params does not agree with fixed parameters in bounds")

        if fixed_params:
            fixed_idxs, fixed_offset = list(
                map(np.array, list(zip(*fixed_params))))

            fixed_idxs = np.array([(i in fixed_idxs)
                                   for i in range(len(start_params))])
            proj0 = np.eye(len(fixed_idxs))[:, fixed_idxs]
            proj1 = np.eye(len(fixed_idxs))[:, ~fixed_idxs]

            fixed_offset = np.dot(proj0, fixed_offset)
            get_x = lambda x0: np.dot(proj1, x0) + fixed_offset

            def restricted(fun):
                if fun is None:
                    return None
                new_fun = lambda x0, * \
                    fargs, **fkwargs: fun(get_x(x0), *fargs, **fkwargs)
                return wraps(fun)(new_fun)
            f = restricted(f)
            callback = restricted(callback)

            start_params = np.array(
                [s for (fxd, s) in zip(fixed_idxs, start_params) if not fxd])
            bounds = [b for (fxd, b) in zip(fixed_idxs, bounds) if not fxd]

    opt_kwargs = dict(opt_kwargs)
    assert all([k not in opt_kwargs for k in ['bounds', 'callback']])
    if callback:
        opt_kwargs['callback'] = callback
    if bounds is not None and not all([l is None and h is None for (l, h) in bounds]):
        opt_kwargs['bounds'] = bounds

    ret = _find_minimum_helper(
        f, start_params, optimizer, opt_kwargs, **kwargs)
    if fixed_params:
        ret.x = get_x(ret.x)
    return ret 
Example 48
Project: SyntheticStatistics   Author: BlissChapman   File: tst.py    MIT License 4 votes vote down vote up
def init_locs_2randn(tst_data, n_test_locs, subsample=10000, seed=1):
        """Fit a Gaussian to each dataset and draw half of n_test_locs from
        each. This way of initialization can be expensive if the input
        dimension is large.

        """
        if n_test_locs == 1:
            return MeanEmbeddingTest.init_locs_randn(tst_data, n_test_locs, seed)

        X, Y = tst_data.xy()
        n = X.shape[0]
        with util.NumpySeedContext(seed=seed):
            # Subsample X, Y if needed. Useful if the data are too large.
            if n > subsample:
                I = util.subsample_ind(n, subsample, seed=seed+2)
                X = X[I, :]
                Y = Y[I, :]


            d = X.shape[1]
            # fit a Gaussian to each of X, Y
            mean_x = np.mean(X, 0)
            mean_y = np.mean(Y, 0)
            cov_x = np.cov(X.T)
            [Dx, Vx] = np.linalg.eig(cov_x + 1e-3*np.eye(d))
            Dx = np.real(Dx)
            Vx = np.real(Vx)
            # a hack in case the data are high-dimensional and the covariance matrix
            # is low rank.
            Dx[Dx<=0] = 1e-3

            # shrink the covariance so that the drawn samples will not be so
            # far away from the data
            eig_pow = 0.9 # 1.0 = not shrink
            reduced_cov_x = Vx.dot(np.diag(Dx**eig_pow)).dot(Vx.T) + 1e-3*np.eye(d)
            cov_y = np.cov(Y.T)
            [Dy, Vy] = np.linalg.eig(cov_y + 1e-3*np.eye(d))
            Vy = np.real(Vy)
            Dy = np.real(Dy)
            Dy[Dy<=0] = 1e-3
            reduced_cov_y = Vy.dot(np.diag(Dy**eig_pow).dot(Vy.T)) + 1e-3*np.eye(d)
            # integer division
            Jx = old_div(n_test_locs,2)
            Jy = n_test_locs - Jx

            #from IPython.core.debugger import Tracer
            #t = Tracer()
            #t()
            assert Jx+Jy==n_test_locs, 'total test locations is not n_test_locs'
            Tx = np.random.multivariate_normal(mean_x, reduced_cov_x, Jx)
            Ty = np.random.multivariate_normal(mean_y, reduced_cov_y, Jy)
            T0 = np.vstack((Tx, Ty))

        return T0 
Example 49
Project: SyntheticStatistics   Author: BlissChapman   File: tst.py    MIT License 4 votes vote down vote up
def generic_nc_parameter(Z, reg='auto'):
    """
    Compute the non-centrality parameter of the non-central Chi-squared
    which is approximately the distribution of the test statistic under the H_1
    (and H_0). The empirical nc parameter is also the test statistic.

    - reg can be 'auto'. This will automatically determine the lowest value of
    the regularization parameter so that the statistic can be computed.
    """
    #from IPython.core.debugger import Tracer
    #Tracer()()

    n = Z.shape[0]
    Sig = np.cov(Z.T)
    W = np.mean(Z, 0)
    n_features = len(W)
    if n_features == 1:
        reg = 0 if reg=='auto' else reg
        s = float(n)*(W[0]**2)/(reg+Sig)
    else:
        if reg=='auto':
            # First compute with reg=0. If no problem, do nothing.
            # If the covariance is singular, make 0 eigenvalues positive.
            try:
                s = n*np.dot(np.linalg.solve(Sig, W), W)
            except np.linalg.LinAlgError:
                try:
                    # singular matrix
                    # eigen decompose
                    evals, eV = np.linalg.eig(Sig)
                    evals = np.real(evals)
                    eV = np.real(eV)
                    evals = np.maximum(0, evals)
                    # find the non-zero second smallest eigenvalue
                    snd_small = np.sort(evals[evals > 0])[0]
                    evals[evals <= 0] = snd_small

                    # reconstruct Sig
                    Sig = eV.dot(np.diag(evals)).dot(eV.T)
                    # try again
                    s = n*np.linalg.solve(Sig, W).dot(W)
                except:
                    s = -1
        else:
            # assume reg is a number
            # test statistic
            try:
                s = n*np.linalg.solve(Sig + reg*np.eye(Sig.shape[0]), W).dot(W)
            except np.linalg.LinAlgError:
                print('LinAlgError. Return -1 as the nc_parameter.')
                s = -1
    return s 
Example 50
Project: vittles   Author: rgiordan   File: test_sensitivity_lib.py    Apache License 2.0 4 votes vote down vote up
def test_reverse_mode_swapping(self):
        dim1 = 3
        dim2 = 6

        a = (np.random.random(dim1) - 0.5)  / dim1
        b = (np.random.random(dim2) - 0.5) / dim2

        def objective12(x1, x2):
            return np.array([np.exp(np.dot(a, x1) + np.dot(b, x2)), 0])

        def objective21(x2, x1):
            return objective12(x1, x2)

        x1 = np.random.random(dim1)
        dx1 = np.random.random(dim1)
        solver1 = solver_lib.get_cholesky_solver(np.eye(dim1))

        x2 = np.random.random(dim2)
        dx2 = np.random.random(dim2)
        solver2 = solver_lib.get_cholesky_solver(np.eye(dim2))

        order = 2
        taylor_12 = \
            ParametricSensitivityTaylorExpansion(
                estimating_equation=objective12,
                forward_mode=False,
                input_val0=x1,
                hyper_val0=x2,
                hess_solver=solver1,
                order=order)

        taylor_21 = \
            ParametricSensitivityTaylorExpansion(
                estimating_equation=objective21,
                forward_mode=False,
                input_val0=x2,
                hyper_val0=x1,
                hess_solver=solver2,
                order=order)

        for k1, k2 in itertools.product(range(order + 1), range(order + 1)):
            print(k1, k2)
            dx1s = [dx1 for _ in range(k1)]
            dx2s = [dx2 for _ in range(k2)]

            # Just check that these evaluate.  They are not comparable
            # with one another.
            d12 = taylor_12._deriv_array.eval_directional_derivative(
                x1, x2, dx1s, dx2s)
            d21 = taylor_21._deriv_array.eval_directional_derivative(
                x2, x1, dx2s, dx1s)

            # These derivatives should be comparable.
            deriv12 = taylor_12._deriv_array.deriv_arrays(k1, k2)
            deriv21 = taylor_21._deriv_array.deriv_arrays(k2, k1)

            assert_array_almost_equal(
                _contract_tensor(deriv12, dx1s, dx2s),
                _contract_tensor(deriv21, dx2s, dx1s))