Python numpy.tril_indices_from() Examples

The following are 14 code examples for showing how to use numpy.tril_indices_from(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: torch-kalman   Author: strongio   File: test_design.py    License: MIT License 6 votes vote down vote up
def test_design_r(self):
        design = simple_mv_velocity_design(3)
        batch_design = design.for_batch(2, 1)

        cov = batch_design.R(0)[0]
        self.assertTupleEqual(cov.size(), (3, 3))

        self.assertTrue(cov.requires_grad)
        cholesky_log_diag = design.measure_covariance.param_dict()['cholesky_log_diag']
        cholesky_off_diag = design.measure_covariance.param_dict()['cholesky_off_diag']

        cov = cov.data.numpy()
        self.assertTrue(np.isclose(cov, cov.T).all(), msg="Covariance is not symmetric.")
        chol = cholesky(cov)

        for a, b in zip(torch.exp(cholesky_log_diag).tolist(), np.diag(chol).tolist()):
            self.assertAlmostEqual(a, b, places=4)

        for a, b in zip(cholesky_off_diag.tolist(), chol[np.tril_indices_from(chol, k=-1)].tolist()):
            self.assertAlmostEqual(a, b, places=4) 
Example 2
Project: GraphicDesignPatternByPython   Author: Relph1119   File: _hessian_update_strategy.py    License: MIT License 6 votes vote down vote up
def get_matrix(self):
        """Return the current internal matrix.

        Returns
        -------
        M : ndarray, shape (n, n)
            Dense matrix containing either the Hessian or its inverse
            (depending on how `approx_type` was defined).
        """
        if self.approx_type == 'hess':
            M = np.copy(self.B)
        else:
            M = np.copy(self.H)
        li = np.tril_indices_from(M, k=-1)
        M[li] = M.T[li]
        return M 
Example 3
Project: qml   Author: qmlcode   File: math.py    License: MIT License 6 votes vote down vote up
def cho_invert(A):
    """ Returns the inverse of a positive definite matrix, using a Cholesky decomposition
        via calls to LAPACK dpotrf and dpotri in the F2PY module.

        :param A: Matrix (symmetric and positive definite, left-hand side).
        :type A: numpy array

        :return: The inverse matrix
        :rtype: numpy array
    """

    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    I = np.asfortranarray(A)

    fcho_invert(I)

    # Matrix to store the inverse
    i_lower = np.tril_indices_from(A)

    # Copy lower triangle to upper
    I.T[i_lower] = I[i_lower]

    return I 
Example 4
Project: qml   Author: qmlcode   File: math.py    License: MIT License 6 votes vote down vote up
def bkf_invert(A):
    """ Returns the inverse of a positive definite matrix, using a Cholesky decomposition
        via calls to LAPACK dpotrf and dpotri in the F2PY module.

        :param A: Matrix (symmetric and positive definite, left-hand side).
        :type A: numpy array

        :return: The inverse matrix
        :rtype: numpy array
    """

    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    I = np.asfortranarray(A)

    fbkf_invert(I)

    # Matrix to store the inverse
    i_lower = np.tril_indices_from(A)

    # Copy lower triangle to upper
    I.T[i_lower] = I[i_lower]

    return I 
Example 5
Project: OpenFermion   Author: quantumlib   File: _namedtensor_test.py    License: Apache License 2.0 5 votes vote down vote up
def test_tensor_iterator():
    a = np.arange(16).reshape((4, 4))
    test_tensor = Tensor(tensor=a)
    assert np.allclose(test_tensor.data, a)
    assert test_tensor.size == 16
    assert isinstance(test_tensor.basis, Bijection)

    a_triu = a[np.triu_indices_from(a)]
    a_tril = a[np.tril_indices_from(a)]

    counter = 0
    for val, idx in test_tensor.utri_iterator():
        assert val == a[tuple(idx)]
        assert val == a_triu[counter]
        counter += 1
    assert counter == 4 * (4 + 1) / 2

    counter = 0
    for val, idx in test_tensor.ltri_iterator():
        assert val == a[tuple(idx)]
        assert val == a_tril[counter]
        counter += 1
    assert counter == 4 * (4 + 1) / 2

    counter = 0
    for val, idx in test_tensor.all_iterator():
        assert val == a[tuple(idx)]
        counter += 1

    assert np.allclose(test_tensor.vectorize(), a.reshape((-1, 1), order='C'))

    with pytest.raises(TypeError):
        list(test_tensor._iterator('blah')) 
Example 6
Project: q2mm   Author: ericchansen   File: filetypes.py    License: MIT License 5 votes vote down vote up
def read_self(self):
        logger.log(5, 'READING: {}'.format(self.filename))
        stuff = re.search(
            'Atomic numbers\s+I\s+N=\s+(?P<num_atoms>\d+)'
            '\n\s+(?P<anums>.*?)'
            'Nuclear charges.*?Current cartesian coordinates.*?\n(?P<coords>.*?)'
            'Force Field'
            '.*?Real atomic weights.*?\n(?P<masses>.*?)'
            'Atom fragment info.*?Cartesian Gradient.*?\n(?P<evals>.*?)'
            'Cartesian Force Constants.*?\n(?P<hess>.*?)'
            'Dipole Moment',
            open(self.path, 'r').read(), flags=re.DOTALL)
        anums = [int(x) for x in stuff.group('anums').split()]
        masses = [float(x) for x in stuff.group('masses').split()]
        coords = [float(x) for x in stuff.group('coords').split()]
        coords = [coords[i:i+3] for i in range(0, len(coords), 3)]
        for anum, mass, coord in zip(anums, masses, coords):
            self.atoms.append(
                Atom(
                    atomic_num = anum,
                    coords = coord,
                    exact_mass = mass)
                )
        logger.log(5, '  -- Read {} atoms.'.format(len(self.atoms)))
        self.evals = np.array(
            [float(x) for x in stuff.group('evals').split()], dtype=float)
        logger.log(5, '  -- Read {} eigenvectors.'.format(len(self.evals)))
        self.low_tri = np.array(
            [float(x) for x in stuff.group('hess').split()], dtype=float)
        one_dim = len(anums) * 3
        self._hess = np.empty([one_dim, one_dim], dtype=float)
        self._hess[np.tril_indices_from(self._hess)] = self.low_tri
        self._hess += np.tril(self._hess, -1).T
        # Convert to MacroModel units.
        self._hess *= co.HESSIAN_CONVERSION
        logger.log(5, '  -- Read {} Hessian.'.format(self._hess.shape)) 
Example 7
Project: pandas-plink   Author: limix   File: _read_rel.py    License: MIT License 5 votes vote down vote up
def _1d_to_2d(values, n):
    from numpy import zeros, tril_indices_from, tril

    K = zeros((n, n))
    K[tril_indices_from(K)] = values
    K = K + tril(K, -1).T
    return K 
Example 8
Project: qml   Author: qmlcode   File: math.py    License: MIT License 5 votes vote down vote up
def cho_solve(A, y):
    """ Solves the equation

            :math:`A x = y`

        for x using a Cholesky decomposition  via calls to LAPACK dpotrf and dpotrs in the F2PY module. Preserves the input matrix A.

        :param A: Matrix (symmetric and positive definite, left-hand side).
        :type A: numpy array
        :param y: Vector (right-hand side of the equation).
        :type y: numpy array

        :return: The solution vector.
        :rtype: numpy array
        """

    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    if len(y.shape) != 1 or y.shape[0] != A.shape[1]:
        raise ValueError('expected matrix and vector of same stride size')

    n = A.shape[0]

    # Backup diagonal before Cholesky-decomposition
    A_diag = A[np.diag_indices_from(A)]

    x = np.zeros(n)
    fcho_solve(A, y, x)

    # Reset diagonal after Cholesky-decomposition
    A[np.diag_indices_from(A)] = A_diag

    # Copy lower triangle to upper
    i_lower = np.tril_indices_from(A)
    A.T[i_lower] = A[i_lower]

    return x 
Example 9
Project: qml   Author: qmlcode   File: math.py    License: MIT License 5 votes vote down vote up
def bkf_solve(A, y):
    """ Solves the equation

            :math:`A x = y`

        for x using a Cholesky decomposition  via calls to LAPACK dpotrf and dpotrs in the F2PY module. Preserves the input matrix A.

        :param A: Matrix (symmetric and positive definite, left-hand side).
        :type A: numpy array
        :param y: Vector (right-hand side of the equation).
        :type y: numpy array

        :return: The solution vector.
        :rtype: numpy array
        """

    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    if len(y.shape) != 1 or y.shape[0] != A.shape[1]:
        raise ValueError('expected matrix and vector of same stride size')

    n = A.shape[0]

    # Backup diagonal before Cholesky-decomposition
    A_diag = A[np.diag_indices_from(A)]

    x = np.zeros(n)
    fbkf_solve(A, y, x)

    # Reset diagonal after Cholesky-decomposition
    A[np.diag_indices_from(A)] = A_diag

    # Copy lower triangle to upper
    i_lower = np.tril_indices_from(A)
    A.T[i_lower] = A[i_lower]

    return x 
Example 10
Project: graspy   Author: neurodata   File: test_models.py    License: Apache License 2.0 5 votes vote down vote up
def _test_score(estimator, p_mat, graph):
    np.random.seed(8888)
    graph = graph.copy()
    p_mat = p_mat.copy()
    estimator.fit(graph)
    estimator.p_mat_ = p_mat  # hack just for testing likelihood

    if is_symmetric(graph):
        inds = np.triu_indices_from(graph, k=1)
    else:
        xu, yu = np.triu_indices_from(graph, k=1)
        xl, yl = np.tril_indices_from(graph, k=-1)
        x = np.concatenate((xl, xu))
        y = np.concatenate((yl, yu))
        inds = (x, y)

    p_rav = p_mat[inds]
    g_rav = graph[inds]

    lik = np.zeros(g_rav.shape)
    c = 1 / p_mat.size
    for i, (g, p) in enumerate(zip(g_rav, p_rav)):
        if p < c:
            p = c
        if p > 1 - c:
            p = 1 - c
        if g == 1:
            lik[i] = p
        else:
            lik[i] = 1 - p

    # lik = np.reshape(lik_rav, p_mat.shape)
    lik[lik < 1e-10] = 1
    lik = np.log(lik)
    assert_allclose(lik, estimator.score_samples(graph))
    assert np.sum(lik) == estimator.score(graph) 
Example 11
Project: Carnets   Author: holzschu   File: test_quantity_non_ufuncs.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_tril_indices_from(self):
        self.check(np.tril_indices_from) 
Example 12
Project: feets   Author: quatrope   File: ext_slotted_a_length.py    License: MIT License 4 votes vote down vote up
def slotted_autocorrelation(
        self, data, time, T, K, second_round=False, K1=100
    ):

        slots, i = np.zeros((K, 1)), 1

        # make time start from 0
        time = time - np.min(time)

        # subtract mean from mag values
        m = np.mean(data)
        data = data - m

        prod = np.zeros((K, 1))
        pairs = np.subtract.outer(time, time)
        pairs[np.tril_indices_from(pairs)] = 10000000

        ks = np.int64(np.floor(np.abs(pairs) / T + 0.5))

        # We calculate the slotted autocorrelation for k=0 separately
        idx = np.where(ks == 0)
        prod[0] = (sum(data ** 2) + sum(data[idx[0]] * data[idx[1]])) / (
            len(idx[0]) + len(data)
        )
        slots[0] = 0

        # We calculate it for the rest of the ks
        if second_round is False:
            for k in np.arange(1, K):
                idx = np.where(ks == k)
                if len(idx[0]) != 0:
                    prod[k] = sum(data[idx[0]] * data[idx[1]]) / (len(idx[0]))
                    slots[i] = k
                    i = i + 1
                else:
                    prod[k] = np.infty
        else:
            for k in np.arange(K1, K):
                idx = np.where(ks == k)
                if len(idx[0]) != 0:
                    prod[k] = sum(data[idx[0]] * data[idx[1]]) / (len(idx[0]))
                    slots[i - 1] = k
                    i = i + 1
                else:
                    prod[k] = np.infty
            np.trim_zeros(prod, trim="b")

        slots = np.trim_zeros(slots, trim="b")
        return prod / prod[0], np.int64(slots).flatten() 
Example 13
Project: graspy   Author: neurodata   File: base.py    License: Apache License 2.0 4 votes vote down vote up
def score_samples(self, graph, clip=None):
        """
        Compute the weighted log probabilities for each potential edge.

        Note that this implicitly assumes the input graph is indexed like the 
        fit model.

        Parameters
        ----------
        graph : np.ndarray
            Input graph. Must be same shape as model's ``p_mat_`` attribute
        
        clip : scalar or None, optional (default=None)
            Values for which to clip probability matrix, entries less than c or more
            than 1 - c are set to c or 1 - c, respectively.
            If None, values will not be clipped in the likelihood calculation, which may
            result in poorly behaved likelihoods depending on the model. 
        
        Returns
        -------
        sample_scores : np.ndarray (size of ``graph``)
            log-likelihood per potential edge in the graph
        """
        check_is_fitted(self, "p_mat_")
        # P.ravel() <dot> graph * (1 - P.ravel()) <dot> (1 - graph)
        graph = import_graph(graph)
        if not is_unweighted(graph):
            raise ValueError("Model only implemented for unweighted graphs")
        p_mat = self.p_mat_.copy()

        if np.shape(p_mat) != np.shape(graph):
            raise ValueError("Input graph size must be the same size as P matrix")

        inds = None
        if not self.directed and self.loops:
            inds = np.triu_indices_from(graph)  # ignore lower half of graph, symmetric
        elif not self.directed and not self.loops:
            inds = np.triu_indices_from(graph, k=1)  # ignore the diagonal
        elif self.directed and not self.loops:
            xu, yu = np.triu_indices_from(graph, k=1)
            xl, yl = np.tril_indices_from(graph, k=-1)
            x = np.concatenate((xl, xu))
            y = np.concatenate((yl, yu))
            inds = (x, y)
        if inds is not None:
            p_mat = p_mat[inds]
            graph = graph[inds]

        # clip the probabilities that are degenerate
        if clip is not None:
            p_mat[p_mat < clip] = clip
            p_mat[p_mat > 1 - clip] = 1 - clip

        # TODO: use nonzero inds here will be faster
        successes = np.multiply(p_mat, graph)
        failures = np.multiply((1 - p_mat), (1 - graph))
        likelihood = successes + failures
        return np.log(likelihood) 
Example 14
Project: FATS   Author: isadoranun   File: FeatureFunctionLib.py    License: MIT License 4 votes vote down vote up
def slotted_autocorrelation(self, data, time, T, K,
                                second_round=False, K1=100):

        slots = np.zeros((K, 1))
        i = 1

        # make time start from 0
        time = time - np.min(time)

        # subtract mean from mag values
        m = np.mean(data)
        data = data - m

        prod = np.zeros((K, 1))
        pairs = np.subtract.outer(time, time)
        pairs[np.tril_indices_from(pairs)] = 10000000

        ks = np.int64(np.floor(np.abs(pairs) / T + 0.5))

        # We calculate the slotted autocorrelation for k=0 separately
        idx = np.where(ks == 0)
        prod[0] = ((sum(data ** 2) + sum(data[idx[0]] *
                   data[idx[1]])) / (len(idx[0]) + len(data)))
        slots[0] = 0

        # We calculate it for the rest of the ks
        if second_round is False:
            for k in np.arange(1, K):
                idx = np.where(ks == k)
                if len(idx[0]) != 0:
                    prod[k] = sum(data[idx[0]] * data[idx[1]]) / (len(idx[0]))
                    slots[i] = k
                    i = i + 1
                else:
                    prod[k] = np.infty
        else:
            for k in np.arange(K1, K):
                idx = np.where(ks == k)
                if len(idx[0]) != 0:
                    prod[k] = sum(data[idx[0]] * data[idx[1]]) / (len(idx[0]))
                    slots[i - 1] = k
                    i = i + 1
                else:
                    prod[k] = np.infty
            np.trim_zeros(prod, trim='b')

        slots = np.trim_zeros(slots, trim='b')
        return prod / prod[0], np.int64(slots).flatten()