Python numpy.linalg.cholesky() Examples

The following are 30 code examples of numpy.linalg.cholesky(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy.linalg , or try the search function .
Example #1
Source File: dml_algorithm.py    From pyDML with GNU General Public License v3.0 6 votes vote down vote up
def transformer(self):
        """Computes a transformation matrix from the Mahalanobis matrix.
        ..math:: L = M^{1/2}

        Returns
        -------
        L : (d' x d) matrix, with d' <= d. It defines a projection. The distance can be calculated by
        ..math:: d(x,y) = \\|L(x-y)\\|_2.
        """

        if hasattr(self, 'L_'):
            return self.L_
        else:
            if hasattr(self, 'M_'):
                try:
                    L = cholesky(self.metric()).T
                    return L
                except:
                    L = metric_to_linear(self.metric())
                    return L
                # self.L_ = L
                return L
            else:
                raise NameError("Transformer was not defined. Algorithm was not fitted.") 
Example #2
Source File: copulapdf.py    From copula-py with GNU General Public License v3.0 6 votes vote down vote up
def _t(u, rho, nu):
    d = u.shape[1]
    nu = float(nu)
    
    try:
        R = cholesky(rho)
    except LinAlgError:
        raise ValueError('Provided Rho matrix is not Positive Definite!')
    
    ticdf = t.ppf(u, nu)
    
    z = solve(R,ticdf.T)
    z = z.T
    logSqrtDetRho = np.sum(np.log(np.diag(R)))
    const = gammaln((nu+d)/2.0) + (d-1)*gammaln(nu/2.0) - d*gammaln((nu+1)/2.0) - logSqrtDetRho
    sq = np.power(z,2)
    summer = np.sum(np.power(z,2),axis=1)
    numer = -((nu+d)/2.0) * np.log(1.0 + np.sum(np.power(z,2),axis=1)/nu)
    denom = np.sum(-((nu+1)/2) * np.log(1 + (np.power(ticdf,2))/nu), axis=1)
    y = np.exp(const + numer - denom)
    
    return y 
Example #3
Source File: expectation_acquisition.py    From emukit with Apache License 2.0 6 votes vote down vote up
def dgp_dL_via_Sigma(self, L: np.ndarray, L_inv: np.ndarray, dsigma: np.ndarray) -> np.ndarray:
        """
        Partial derivatives of the gp posterior samples with respect to the cholesky of the posterior covariance matrix given the partial derivative values with respect to the posterior covariance matrix.
        
        :param s: Samples of observations from the posterior distribution of the model
        :param L: Cholesky decomposition(s) of the posterior covariance matrix (samples)
        :param L_inv: Inverse(s) of Cholesky decomposition(s) of the posterior covariance matrix (samples)
        :param dsigma: Partial derivatives with respect to the posterior covariance matrix
        :return: the derivative of the gp samples with respect to the choleskies
        """
        E = np.tril(2*np.ones((L.shape[1],L.shape[2])),-1 ) +np.eye(L.shape[2])
        dl = np.empty(dsigma.shape) #np.empty((N,b,b,b,d))
        for i in range(dsigma.shape[3]):
            for j in range(dsigma.shape[4]):
                tmp1 = np.matmul(L_inv, dsigma[:,:,:,i,j]) # N x b x b
                tmp2 = np.matmul(tmp1, np.swapaxes(L_inv,1,2)) # N x b x b
                tmp3 = tmp2 * E[None,:,:] # N x b x b
                dl[:,:,:,i,j] = 0.5 * np.matmul(L, tmp3) # N x b x b
        return dl # N x b x b 
Example #4
Source File: test_linalg.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_basic_property(self):
        # Check A = L L^H
        shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
        dtypes = (np.float32, np.float64, np.complex64, np.complex128)

        for shape, dtype in itertools.product(shapes, dtypes):
            np.random.seed(1)
            a = np.random.randn(*shape)
            if np.issubdtype(dtype, np.complexfloating):
                a = a + 1j*np.random.randn(*shape)

            t = list(range(len(shape)))
            t[-2:] = -1, -2

            a = np.matmul(a.transpose(t).conj(), a)
            a = np.asarray(a, dtype=dtype)

            c = np.linalg.cholesky(a)

            b = np.matmul(c, c.transpose(t).conj())
            assert_allclose(b, a,
                            err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c),
                            atol=500 * a.shape[0] * np.finfo(dtype).eps) 
Example #5
Source File: bayesian_matrix_factorization.py    From yelp with GNU Lesser General Public License v2.1 6 votes vote down vote up
def update_item_params(self):
        x_bar = np.mean(self.item_features, 0).T
        x_bar = np.reshape(x_bar, (self.num_features, 1))
        S_bar = np.cov(self.item_features.T)
        norm_X_bar = self.mu0_item - x_bar

        WI_post = inv(inv(self.WI_item) + self.num_items * S_bar + \
            np.dot(norm_X_bar, norm_X_bar.T) * \
            (self.num_items * self.beta_item) / (self.beta_item + self.num_items))

        # Not sure why we need this...
        WI_post = (WI_post + WI_post.T) / 2.0

        # update alpha_item
        self.alpha_item = sample_wishart(WI_post, self.df_post_item)

        # update mu_item
        mu_temp = (self.beta_item * self.mu0_item + self.num_items * x_bar) / \
            (self.beta_item + self.num_items)
        lam = cholesky(inv((self.beta_item + self.num_items) * self.alpha_item))
        # lam = lam.T
        self.mu_item = mu_temp + np.dot(lam, NormalRandom.generate_matrix(self.num_features, 1))

        # raise ValueError('AAA') 
Example #6
Source File: bayesian_matrix_factorization.py    From yelp with GNU Lesser General Public License v2.1 6 votes vote down vote up
def update_user_params(self):
        x_bar = np.mean(self.user_features, 0).T
        x_bar = np.reshape(x_bar, (self.num_features, 1))
        S_bar = np.cov(self.user_features.T)
        norm_X_bar = self.mu0_user - x_bar

        WI_post = inv(inv(self.WI_user) + self.num_users * S_bar + \
            np.dot(norm_X_bar, norm_X_bar.T) * \
            (self.num_users * self.beta_user) / (self.beta_user + self.num_users))

        # Not sure why we need this...
        WI_post = (WI_post + WI_post.T) / 2.0

        # update alpha_user
        self.alpha_user = sample_wishart(WI_post, self.df_post_user)

        # update mu_item
        mu_temp = (self.beta_user * self.mu0_user + self.num_users * x_bar) / \
            (self.beta_user + self.num_users)
        lam = cholesky(inv((self.beta_user + self.num_users) * self.alpha_user))
        self.mu_user = mu_temp + np.dot(lam, NormalRandom.generate_matrix(self.num_features, 1)) 
Example #7
Source File: bayesian_matrix_factorization.py    From yelp with GNU Lesser General Public License v2.1 6 votes vote down vote up
def udpate_item_features(self):
        self.matrix = self.matrix.T
        # Gibbs sampling for item features
        for item_id in range(self.num_items):
            self.results_file.write('Item %d\n' % (item_id+1))
            users = self.matrix[:, item_id] > 0.0
            features = self.user_features[users, :]
            ratings = self.matrix[users, item_id] - self.mean_rating
            rating_len = len(ratings)
            ratings = np.reshape(ratings, (rating_len, 1))

            covar = inv(
                self.alpha_item + self.beta * np.dot(features.T, features))
            lam = cholesky(covar)
            temp = self.beta * \
                np.dot(features.T, ratings) + np.dot(
                    self.alpha_item, self.mu_item)
            mean = np.dot(covar, temp)
            temp_feature = mean + np.dot(lam, NormalRandom.generate_matrix(self.num_features, 1))
            temp_feature = np.reshape(temp_feature, (self.num_features,))
            self.item_features[item_id, :] = temp_feature 
Example #8
Source File: test_linalg.py    From pySINDy with MIT License 6 votes vote down vote up
def test_basic_property(self):
        # Check A = L L^H
        shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
        dtypes = (np.float32, np.float64, np.complex64, np.complex128)

        for shape, dtype in itertools.product(shapes, dtypes):
            np.random.seed(1)
            a = np.random.randn(*shape)
            if np.issubdtype(dtype, np.complexfloating):
                a = a + 1j*np.random.randn(*shape)

            t = list(range(len(shape)))
            t[-2:] = -1, -2

            a = np.matmul(a.transpose(t).conj(), a)
            a = np.asarray(a, dtype=dtype)

            c = np.linalg.cholesky(a)

            b = np.matmul(c, c.transpose(t).conj())
            assert_allclose(b, a,
                            err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c),
                            atol=500 * a.shape[0] * np.finfo(dtype).eps) 
Example #9
Source File: test_linalg.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 6 votes vote down vote up
def test_basic_property(self):
        # Check A = L L^H
        shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
        dtypes = (np.float32, np.float64, np.complex64, np.complex128)

        for shape, dtype in itertools.product(shapes, dtypes):
            np.random.seed(1)
            a = np.random.randn(*shape)
            if np.issubdtype(dtype, np.complexfloating):
                a = a + 1j*np.random.randn(*shape)

            t = list(range(len(shape)))
            t[-2:] = -1, -2

            a = np.matmul(a.transpose(t).conj(), a)
            a = np.asarray(a, dtype=dtype)

            c = np.linalg.cholesky(a)

            b = np.matmul(c, c.transpose(t).conj())
            assert_allclose(b, a,
                            err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c),
                            atol=500 * a.shape[0] * np.finfo(dtype).eps) 
Example #10
Source File: test_linalg.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_basic_property(self):
        # Check A = L L^H
        shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
        dtypes = (np.float32, np.float64, np.complex64, np.complex128)

        for shape, dtype in itertools.product(shapes, dtypes):
            np.random.seed(1)
            a = np.random.randn(*shape)
            if np.issubdtype(dtype, np.complexfloating):
                a = a + 1j*np.random.randn(*shape)

            t = list(range(len(shape)))
            t[-2:] = -1, -2

            a = np.matmul(a.transpose(t).conj(), a)
            a = np.asarray(a, dtype=dtype)

            c = np.linalg.cholesky(a)

            b = np.matmul(c, c.transpose(t).conj())
            assert_allclose(b, a,
                            err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c),
                            atol=500 * a.shape[0] * np.finfo(dtype).eps) 
Example #11
Source File: test_linalg.py    From coffeegrindsize with MIT License 6 votes vote down vote up
def test_basic_property(self):
        # Check A = L L^H
        shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
        dtypes = (np.float32, np.float64, np.complex64, np.complex128)

        for shape, dtype in itertools.product(shapes, dtypes):
            np.random.seed(1)
            a = np.random.randn(*shape)
            if np.issubdtype(dtype, np.complexfloating):
                a = a + 1j*np.random.randn(*shape)

            t = list(range(len(shape)))
            t[-2:] = -1, -2

            a = np.matmul(a.transpose(t).conj(), a)
            a = np.asarray(a, dtype=dtype)

            c = np.linalg.cholesky(a)

            b = np.matmul(c, c.transpose(t).conj())
            assert_allclose(b, a,
                            err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c),
                            atol=500 * a.shape[0] * np.finfo(dtype).eps) 
Example #12
Source File: test_linalg.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_basic_property(self):
        # Check A = L L^H
        shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
        dtypes = (np.float32, np.float64, np.complex64, np.complex128)

        for shape, dtype in itertools.product(shapes, dtypes):
            np.random.seed(1)
            a = np.random.randn(*shape)
            if np.issubdtype(dtype, np.complexfloating):
                a = a + 1j*np.random.randn(*shape)

            t = list(range(len(shape)))
            t[-2:] = -1, -2

            a = np.matmul(a.transpose(t).conj(), a)
            a = np.asarray(a, dtype=dtype)

            c = np.linalg.cholesky(a)

            b = np.matmul(c, c.transpose(t).conj())
            assert_allclose(b, a,
                            err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c),
                            atol=500 * a.shape[0] * np.finfo(dtype).eps) 
Example #13
Source File: test_linalg.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def test_basic_property(self):
        # Check A = L L^H
        shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
        dtypes = (np.float32, np.float64, np.complex64, np.complex128)

        for shape, dtype in itertools.product(shapes, dtypes):
            np.random.seed(1)
            a = np.random.randn(*shape)
            if np.issubdtype(dtype, np.complexfloating):
                a = a + 1j*np.random.randn(*shape)

            t = list(range(len(shape)))
            t[-2:] = -1, -2

            a = np.matmul(a.transpose(t).conj(), a)
            a = np.asarray(a, dtype=dtype)

            c = np.linalg.cholesky(a)

            b = np.matmul(c, c.transpose(t).conj())
            assert_allclose(b, a,
                            err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c),
                            atol=500 * a.shape[0] * np.finfo(dtype).eps) 
Example #14
Source File: test_linalg.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_basic_property(self):
        # Check A = L L^H
        shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
        dtypes = (np.float32, np.float64, np.complex64, np.complex128)

        for shape, dtype in itertools.product(shapes, dtypes):
            np.random.seed(1)
            a = np.random.randn(*shape)
            if np.issubdtype(dtype, np.complexfloating):
                a = a + 1j*np.random.randn(*shape)

            t = list(range(len(shape)))
            t[-2:] = -1, -2

            a = np.matmul(a.transpose(t).conj(), a)
            a = np.asarray(a, dtype=dtype)

            c = np.linalg.cholesky(a)

            b = np.matmul(c, c.transpose(t).conj())
            assert_allclose(b, a,
                            err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c),
                            atol=500 * a.shape[0] * np.finfo(dtype).eps) 
Example #15
Source File: test_linalg.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def test_basic_property(self):
        # Check A = L L^H
        shapes = [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]
        dtypes = (np.float32, np.float64, np.complex64, np.complex128)

        for shape, dtype in itertools.product(shapes, dtypes):
            np.random.seed(1)
            a = np.random.randn(*shape)
            if np.issubdtype(dtype, np.complexfloating):
                a = a + 1j*np.random.randn(*shape)

            t = list(range(len(shape)))
            t[-2:] = -1, -2

            a = np.matmul(a.transpose(t).conj(), a)
            a = np.asarray(a, dtype=dtype)

            c = np.linalg.cholesky(a)

            b = np.matmul(c, c.transpose(t).conj())
            assert_allclose(b, a,
                            err_msg="{} {}\n{}\n{}".format(shape, dtype, a, c),
                            atol=500 * a.shape[0] * np.finfo(dtype).eps) 
Example #16
Source File: test_design.py    From torch-kalman with MIT License 6 votes vote down vote up
def test_design_r(self):
        design = simple_mv_velocity_design(3)
        batch_design = design.for_batch(2, 1)

        cov = batch_design.R(0)[0]
        self.assertTupleEqual(cov.size(), (3, 3))

        self.assertTrue(cov.requires_grad)
        cholesky_log_diag = design.measure_covariance.param_dict()['cholesky_log_diag']
        cholesky_off_diag = design.measure_covariance.param_dict()['cholesky_off_diag']

        cov = cov.data.numpy()
        self.assertTrue(np.isclose(cov, cov.T).all(), msg="Covariance is not symmetric.")
        chol = cholesky(cov)

        for a, b in zip(torch.exp(cholesky_log_diag).tolist(), np.diag(chol).tolist()):
            self.assertAlmostEqual(a, b, places=4)

        for a, b in zip(cholesky_off_diag.tolist(), chol[np.tril_indices_from(chol, k=-1)].tolist()):
            self.assertAlmostEqual(a, b, places=4) 
Example #17
Source File: test_regression.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def test_lapack_endian(self):
        # For bug #1482
        a = array([[5.7998084,  -2.1825367],
                   [-2.1825367,   9.85910595]], dtype='>f8')
        b = array(a, dtype='<f8')

        ap = linalg.cholesky(a)
        bp = linalg.cholesky(b)
        assert_array_equal(ap, bp) 
Example #18
Source File: posdef.py    From markowitz-portfolio-optimization with MIT License 5 votes vote down vote up
def isPD(B):
    """Returns true when input is positive-definite, via Cholesky"""
    try:
        _ = la.cholesky(B)
        return True
    except la.LinAlgError:
        return False 
Example #19
Source File: test_regression.py    From keras-lambda with MIT License 5 votes vote down vote up
def test_lapack_endian(self):
        # For bug #1482
        a = array([[5.7998084,  -2.1825367],
                   [-2.1825367,   9.85910595]], dtype='>f8')
        b = array(a, dtype='<f8')

        ap = linalg.cholesky(a)
        bp = linalg.cholesky(b)
        assert_array_equal(ap, bp) 
Example #20
Source File: test_regression.py    From ImageFusion with MIT License 5 votes vote down vote up
def test_lapack_endian(self):
        # For bug #1482
        a = array([[5.7998084,  -2.1825367 ],
                   [-2.1825367,   9.85910595]], dtype='>f8')
        b = array(a, dtype='<f8')

        ap = linalg.cholesky(a)
        bp = linalg.cholesky(b)
        assert_array_equal(ap, bp) 
Example #21
Source File: test_regression.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_lapack_endian(self):
        # For bug #1482
        a = array([[5.7998084,  -2.1825367],
                   [-2.1825367,   9.85910595]], dtype='>f8')
        b = array(a, dtype='<f8')

        ap = linalg.cholesky(a)
        bp = linalg.cholesky(b)
        assert_array_equal(ap, bp) 
Example #22
Source File: test_linalg.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_0_size(self):
        class ArraySubclass(np.ndarray):
            pass
        a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
        res = linalg.cholesky(a)
        assert_equal(a.shape, res.shape)
        assert_(res.dtype.type is np.float64)
        # for documentation purpose:
        assert_(isinstance(res, np.ndarray))

        a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass)
        res = linalg.cholesky(a)
        assert_equal(a.shape, res.shape)
        assert_(res.dtype.type is np.complex64)
        assert_(isinstance(res, np.ndarray)) 
Example #23
Source File: bayesian_matrix_factorization.py    From yelp with GNU Lesser General Public License v2.1 5 votes vote down vote up
def update_user_features(self):
        self.matrix = self.matrix.T
        # print('matrix', self.matrix.shape, self.matrix[0:10, 0:5])

        # Gibbs sampling for user features
        for user_id in range(self.num_users):
            self.results_file.write('User %d\n' % (user_id+1))
            items = self.matrix[:, user_id] > 0.0
            features = self.item_features[items, :]
            ratings = self.matrix[items, user_id] - self.mean_rating
            rating_len = len(ratings)
            ratings = np.reshape(ratings, (rating_len, 1))

            covar = inv(
                self.alpha_user + self.beta * np.dot(features.T, features))

            temp = self.beta * \
                np.dot(features.T, ratings) + np.dot(
                    self.alpha_user, self.mu_user)
            mean = np.dot(covar, temp)
            lam = cholesky(covar)
            temp_feature = mean + np.dot(lam, NormalRandom.generate_matrix(self.num_features, 1))
            temp_feature = np.reshape(temp_feature, (self.num_features,))
            self.user_features[user_id, :] = temp_feature

        self.results_file.write('user_features \t (%d,%d) \t %16.16f\n' % (self.user_features.shape[0], self.user_features.shape[1], self.user_features[0,0]))
        # transpose back
        # self.matrix = self.matrix.T 
Example #24
Source File: bayesian_matrix_factorization.py    From yelp with GNU Lesser General Public License v2.1 5 votes vote down vote up
def sample_wishart(sigma, dof):
    '''
    Returns a sample from the Wishart distn, conjugate prior for precision matrices.
    '''

    n = sigma.shape[0]

    chol = np.linalg.cholesky(sigma).T

    rnd_matrix = NormalRandom.generate_matrix(dof, n)
    X = np.dot(rnd_matrix, chol)
    W = np.dot(X.T, X)

    return W 
Example #25
Source File: var_model.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _chol_sigma_u(self):
        return chol(self.sigma_u) 
Example #26
Source File: matrices.py    From mici with MIT License 5 votes vote down vote up
def factor(self):
        if self._factor is None:
            try:
                self._factor = TriangularMatrix(
                    nla.cholesky(self._sign * self._array), lower=True, 
                    make_triangular=False)
            except nla.LinAlgError as e:
                raise LinAlgError('Cholesky factorisation failed.') from e
        return self._factor 
Example #27
Source File: matrices.py    From mici with MIT License 5 votes vote down vote up
def _construct_sqrt(self):
        # Uses O(dim_inner**3 + dim_inner**2 * dim_outer) cost implementation
        # proposed in
        #   Ambikasaran, O'Neill & Singh (2016). Fast symmetric factorization
        #   of hierarchical matrices with applications. arxiv:1405.0223.
        # Variable naming below follows notation in Algorithm 1 in paper
        W = self.pos_def_matrix.sqrt
        K = self.inner_pos_def_matrix
        U = W.inv @ self.factor_matrix
        L = TriangularMatrix(
            nla.cholesky(U.T @ U.array), lower=True, make_triangular=False)
        I_outer, I_inner = IdentityMatrix(U.shape[0]), np.identity(U.shape[1])
        M = sla.sqrtm(I_inner + L.T @ (K @ L.array))
        X = DenseSymmetricMatrix(L.inv.T @ ((M - I_inner) @ L.inv))
        return W @ SymmetricLowRankUpdateMatrix(U, I_outer, X) 
Example #28
Source File: test_linalg.py    From elasticintel with GNU General Public License v3.0 5 votes vote down vote up
def test_0_size(self):
        class ArraySubclass(np.ndarray):
            pass
        a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
        res = linalg.cholesky(a)
        assert_equal(a.shape, res.shape)
        assert_(res.dtype.type is np.float64)
        # for documentation purpose:
        assert_(isinstance(res, np.ndarray))

        a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass)
        res = linalg.cholesky(a)
        assert_equal(a.shape, res.shape)
        assert_(res.dtype.type is np.complex64)
        assert_(isinstance(res, np.ndarray)) 
Example #29
Source File: test_linalg.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def test_0_size(self):
        class ArraySubclass(np.ndarray):
            pass
        a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
        res = linalg.cholesky(a)
        assert_equal(a.shape, res.shape)
        assert_(res.dtype.type is np.float64)
        # for documentation purpose:
        assert_(isinstance(res, np.ndarray))

        a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass)
        res = linalg.cholesky(a)
        assert_equal(a.shape, res.shape)
        assert_(res.dtype.type is np.complex64)
        assert_(isinstance(res, np.ndarray)) 
Example #30
Source File: test_regression.py    From elasticintel with GNU General Public License v3.0 5 votes vote down vote up
def test_lapack_endian(self):
        # For bug #1482
        a = array([[5.7998084,  -2.1825367],
                   [-2.1825367,   9.85910595]], dtype='>f8')
        b = array(a, dtype='<f8')

        ap = linalg.cholesky(a)
        bp = linalg.cholesky(b)
        assert_array_equal(ap, bp)