Python scipy.sparse.lil_matrix() Examples

The following are 30 code examples of scipy.sparse.lil_matrix(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.sparse , or try the search function .
Example #1
Source File: test_basic.py    From D-VAE with MIT License 8 votes vote down vote up
def random_lil(shape, dtype, nnz):
    rval = sp.lil_matrix(shape, dtype=dtype)
    huge = 2 ** 30
    for k in range(nnz):
        # set non-zeros in random locations (row x, col y)
        idx = numpy.random.random_integers(huge, size=2) % shape
        value = numpy.random.rand()
        # if dtype *int*, value will always be zeros!
        if "int" in dtype:
            value = int(value * 100)
        # The call to tuple is needed as scipy 0.13.1 do not support
        # ndarray with lenght 2 as idx tuple.
        rval.__setitem__(
            tuple(idx),
            value)
    return rval 
Example #2
Source File: Classifier.py    From ConvLab with MIT License 7 votes vote down vote up
def toSparse(baseX, X, dictionary):
    # convert baseX & X (a list of dictionaries), to a sparse matrix, using dictionary to map to indices
    out = lil_matrix((len(X),len(dictionary)))
    for i, (basex, x) in enumerate(zip(baseX, X)) :
        for key in basex :
            if key not in dictionary :
                continue
            out[i,dictionary[key]] = basex[key]  
        for key in x :
            if key not in dictionary :
                continue
            out[i,dictionary[key]] = x[key]
            
    out = out.tocsr()
    return out

    
# classifiers define :
#  train(X,y)
#  predict(X)
#  params()
#  load(params)
#  X is a sparse matrix, y is a vector of class labels (ints) 
Example #3
Source File: test_label.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_label_binarize_multilabel():
    y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
    classes = [0, 1, 2]
    pos_label = 2
    neg_label = 0
    expected = pos_label * y_ind
    y_sparse = [sparse_matrix(y_ind)
                for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
                                      dok_matrix, lil_matrix]]

    for y in [y_ind] + y_sparse:
        check_binarized_results(y, classes, pos_label, neg_label,
                                expected)

    assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
                  pos_label=pos_label, sparse_output=True) 
Example #4
Source File: basis.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def get_to_std(self):
        '''
        Retrieve the matrix that transforms a vector from this basis to the
        standard basis of this basis's dimension.

        Returns
        -------
        numpy array or scipy.sparse.lil_matrix
            An array of shape `(dim, size)` where `dim` is the dimension
            of this basis (the length of its vectors) and `size` is the
            size of this basis (its number of vectors).
        '''
        if self.sparse:
            toStd = _sps.lil_matrix((self.dim, self.size), dtype='complex')
        else:
            toStd = _np.zeros((self.dim, self.size), 'complex')

        for i, vel in enumerate(self.vector_elements):
            toStd[:, i] = vel
        return toStd 
Example #5
Source File: basis.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def _lazy_build_vector_elements(self):
        if self.sparse:
            compMxs = []
        else:
            compMxs = _np.zeros((self.size, self.dim), 'complex')

        i, start = 0, 0
        for compbasis in self.component_bases:
            for lbl, vel in zip(compbasis.labels, compbasis.vector_elements):
                assert(_sps.issparse(vel) == self.sparse), "Inconsistent sparsity!"
                if self.sparse:
                    mx = _sps.lil_matrix((self.dim, 1))
                    mx[start:start + compbasis.dim, 0] = vel
                    compMxs.append(mx)
                else:
                    compMxs[i, start:start + compbasis.dim] = vel
                i += 1
            start += compbasis.dim

        assert(i == self.size)
        self._vector_elements = compMxs 
Example #6
Source File: basis.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def get_to_std(self):
        '''
        Retrieve the matrix that transforms a vector from this basis to the
        standard basis of this basis's dimension.

        Returns
        -------
        numpy array or scipy.sparse.lil_matrix
            An array of shape `(dim, size)` where `dim` is the dimension
            of this basis (the length of its vectors) and `size` is the
            size of this basis (its number of vectors).
        '''
        if self.sparse:
            toStd = _sps.lil_matrix((self.dim, self.size), dtype='complex')
        else:
            toStd = _np.zeros((self.dim, self.size), 'complex')

        #use vector elements, which are not just flattened elements
        # (and are computed separately)
        for i, vel in enumerate(self.vector_elements):
            toStd[:, i] = vel
        return toStd 
Example #7
Source File: test_basic.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def random_lil(shape, dtype, nnz):
    rval = sp.lil_matrix(shape, dtype=dtype)
    huge = 2 ** 30
    for k in range(nnz):
        # set non-zeros in random locations (row x, col y)
        idx = numpy.random.random_integers(huge, size=2) % shape
        value = numpy.random.rand()
        # if dtype *int*, value will always be zeros!
        if "int" in dtype:
            value = int(value * 100)
        # The call to tuple is needed as scipy 0.13.1 do not support
        # ndarray with lenght 2 as idx tuple.
        rval.__setitem__(
            tuple(idx),
            value)
    return rval 
Example #8
Source File: test_basic.py    From attention-lvcsr with MIT License 6 votes vote down vote up
def random_lil(shape, dtype, nnz):
    rval = sp.lil_matrix(shape, dtype=dtype)
    huge = 2 ** 30
    for k in range(nnz):
        # set non-zeros in random locations (row x, col y)
        idx = numpy.random.random_integers(huge, size=2) % shape
        value = numpy.random.rand()
        # if dtype *int*, value will always be zeros!
        if "int" in dtype:
            value = int(value * 100)
        # The call to tuple is needed as scipy 0.13.1 do not support
        # ndarray with lenght 2 as idx tuple.
        rval.__setitem__(
            tuple(idx),
            value)
    return rval 
Example #9
Source File: other.py    From StageDP with MIT License 6 votes vote down vote up
def vectorize(features, vocab):
    """ Transform a features list into a numeric vector
        with a given vocab

    :type dpvocab: dict
    :param dpvocab: vocab for distributional representation

    :type projmat: scipy.lil_matrix
    :param projmat: projection matrix for disrep
    """
    vec = lil_matrix((1, len(vocab)))

    for feat in features:
        try:
            fidx = vocab[feat]
            vec[0, fidx] += 1.0
        except KeyError:
            pass
    # Normalization
    vec = normalize(vec)
    return vec 
Example #10
Source File: CombineDocsIntoDomains.py    From scattertext with Apache License 2.0 6 votes vote down vote up
def get_new_term_doc_mat(self, doc_domains, non_text=False):
		'''
		Combines documents together that are in the same domain

		Parameters
		----------
		doc_domains : array-like

		Returns
		-------
		scipy.sparse.csr_matrix

		'''
		assert len(doc_domains) == self.term_doc_matrix.get_num_docs()
		doc_domain_set = set(doc_domains)
		num_terms = self.term_doc_matrix.get_num_metadata() if non_text else self.term_doc_matrix.get_num_terms()
		num_domains = len(doc_domain_set)
		domain_mat = lil_matrix((num_domains, num_terms), dtype=int)
		X = self.term_doc_matrix.get_metadata_doc_mat() if non_text else self.term_doc_matrix.get_term_doc_mat()
		for i, domain in enumerate(doc_domain_set):
			domain_mat[i, :] = X[np.array(doc_domains == domain)].sum(axis=0)
		return domain_mat.tocsr() 
Example #11
Source File: mnist.py    From spektral with MIT License 6 votes vote down vote up
def _flip_random_edges(A, percent):
    """
    Flips values of A randomly.
    :param A: binary scipy sparse matrix.
    :param percent: percent of the edges to flip.
    :return: binary scipy sparse matrix.
    """
    if not A.shape[0] == A.shape[1]:
        raise ValueError('A must be a square matrix.')
    dtype = A.dtype
    A = sp.lil_matrix(A).astype(np.bool)
    n_elem = A.shape[0] ** 2
    n_elem_to_flip = round(percent * n_elem)
    unique_idx = np.random.choice(n_elem, replace=False, size=n_elem_to_flip)
    row_idx = unique_idx // A.shape[0]
    col_idx = unique_idx % A.shape[0]
    idxs = np.stack((row_idx, col_idx)).T
    for i in idxs:
        i = tuple(i)
        A[i] = np.logical_not(A[i])
    A = A.tocsr().astype(dtype)
    A.eliminate_zeros()
    return A 
Example #12
Source File: test_basic.py    From D-VAE with MIT License 6 votes vote down vote up
def random_lil(shape, dtype, nnz):
    rval = sp.lil_matrix(shape, dtype=dtype)
    huge = 2 ** 30
    for k in range(nnz):
        # set non-zeros in random locations (row x, col y)
        idx = numpy.random.random_integers(huge, size=2) % shape
        value = numpy.random.rand()
        # if dtype *int*, value will always be zeros!
        if "int" in dtype:
            value = int(value * 100)
        # The call to tuple is needed as scipy 0.13.1 do not support
        # ndarray with lenght 2 as idx tuple.
        rval.__setitem__(
            tuple(idx),
            value)
    return rval 
Example #13
Source File: pde.py    From findiff with MIT License 6 votes vote down vote up
def solve(self):

        shape = self.bcs.shape
        if self._L is None:
            self._L = self.lhs.matrix(shape) # expensive operation, so cache it

        L = sparse.lil_matrix(self._L)
        f = self.rhs.reshape(-1, 1)

        nz = list(self.bcs.row_inds())

        L[nz, :] = self.bcs.lhs[nz, :]
        f[nz] = np.array(self.bcs.rhs[nz].toarray()).reshape(-1, 1)

        L = sparse.csr_matrix(L)
        return spsolve(L, f).reshape(shape) 
Example #14
Source File: pde.py    From findiff with MIT License 6 votes vote down vote up
def __setitem__(self, key, value):

        lng_inds = self.long_indices[key]

        if isinstance(value, tuple): # Neumann BC
            op, value = value
            # Avoid calling matrix for the whole grid! Optimize later!
            mat = sparse.lil_matrix(op.matrix(self.shape))
            self.lhs[lng_inds, :] = mat[lng_inds, :]
        else: # Dirichlet BC
            self.lhs[lng_inds, lng_inds] = 1

        if isinstance(value, np.ndarray):
            value = value.reshape(-1)[lng_inds]
            for i, v in zip(lng_inds, value):
                self.rhs[i] = v
        else:
            self.rhs[lng_inds] = value 
Example #15
Source File: test_rbm.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_score_samples():
    # Test score_samples (pseudo-likelihood) method.
    # Assert that pseudo-likelihood is computed without clipping.
    # See Fabian's blog, http://bit.ly/1iYefRk
    rng = np.random.RandomState(42)
    X = np.vstack([np.zeros(1000), np.ones(1000)])
    rbm1 = BernoulliRBM(n_components=10, batch_size=2,
                        n_iter=10, random_state=rng)
    rbm1.fit(X)
    assert (rbm1.score_samples(X) < -300).all()

    # Sparse vs. dense should not affect the output. Also test sparse input
    # validation.
    rbm1.random_state = 42
    d_score = rbm1.score_samples(X)
    rbm1.random_state = 42
    s_score = rbm1.score_samples(lil_matrix(X))
    assert_almost_equal(d_score, s_score)

    # Test numerical stability (#2785): would previously generate infinities
    # and crash with an exception.
    with np.errstate(under='ignore'):
        rbm1.score_samples([np.arange(1000) * 100]) 
Example #16
Source File: xc_metrics.py    From pyxclib with MIT License 6 votes vote down vote up
def _setup_metric(X, true_labels, inv_psp=None, k=5):
    assert compatible_shapes(X, true_labels), \
        "ground truth and prediction matrices must have same shape."
    num_instances, num_labels = true_labels.shape
    indices = _get_topk(X, num_labels, k)
    ps_indices = None
    if inv_psp is not None:
        ps_indices = _get_topk(
            true_labels.dot(
                sp.spdiags(inv_psp, diags=0,
                           m=num_labels, n=num_labels)),
            num_labels, k)
        inv_psp = np.hstack([inv_psp, np.zeros((1))])

    true_labels = sp.hstack([true_labels,
                             sp.lil_matrix((num_instances, 1),
                                           dtype=np.int32)]).tocsr()
    return indices, true_labels, ps_indices, inv_psp 
Example #17
Source File: test_mean_shift.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_estimate_bandwidth_with_sparse_matrix():
    # Test estimate_bandwidth with sparse matrix
    X = sparse.lil_matrix((1000, 1000))
    msg = "A sparse matrix was passed, but dense data is required."
    assert_raise_message(TypeError, msg, estimate_bandwidth, X, 200) 
Example #18
Source File: test_sparse_coordinate_descent.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_enet_toy_explicit_sparse_input():
    # Test ElasticNet for various values of alpha and l1_ratio with sparse X
    f = ignore_warnings
    # training samples
    X = sp.lil_matrix((3, 1))
    X[0, 0] = -1
    # X[1, 0] = 0
    X[2, 0] = 1
    Y = [-1, 0, 1]       # just a straight line (the identity function)

    # test samples
    T = sp.lil_matrix((3, 1))
    T[0, 0] = 2
    T[1, 0] = 3
    T[2, 0] = 4

    # this should be the same as lasso
    clf = ElasticNet(alpha=0, l1_ratio=1.0)
    f(clf.fit)(X, Y)
    pred = clf.predict(T)
    assert_array_almost_equal(clf.coef_, [1])
    assert_array_almost_equal(pred, [2, 3, 4])
    assert_almost_equal(clf.dual_gap_, 0)

    clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
    clf.fit(X, Y)
    pred = clf.predict(T)
    assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
    assert_array_almost_equal(pred, [1.0163,  1.5245,  2.0327], decimal=3)
    assert_almost_equal(clf.dual_gap_, 0)

    clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
    clf.fit(X, Y)
    pred = clf.predict(T)
    assert_array_almost_equal(clf.coef_, [0.45454], 3)
    assert_array_almost_equal(pred, [0.9090,  1.3636,  1.8181], 3)
    assert_almost_equal(clf.dual_gap_, 0) 
Example #19
Source File: test_hierarchical.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_scikit_vs_scipy():
    # Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
    n, p, k = 10, 5, 3
    rng = np.random.RandomState(0)

    # Not using a lil_matrix here, just to check that non sparse
    # matrices are well handled
    connectivity = np.ones((n, n))
    for linkage in _TREE_BUILDERS.keys():
        for i in range(5):
            X = .1 * rng.normal(size=(n, p))
            X -= 4. * np.arange(n)[:, np.newaxis]
            X -= X.mean(axis=1)[:, np.newaxis]

            out = hierarchy.linkage(X, method=linkage)

            children_ = out[:, :2].astype(np.int, copy=False)
            children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)

            # Sort the order of child nodes per row for consistency
            children.sort(axis=1)
            assert_array_equal(children, children_, 'linkage tree differs'
                                                    ' from scipy impl for'
                                                    ' linkage: ' + linkage)

            cut = _hc_cut(k, children, n_leaves)
            cut_ = _hc_cut(k, children_, n_leaves)
            assess_same_labelling(cut, cut_)

    # Test error management in _hc_cut
    assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves) 
Example #20
Source File: test_extmath.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_randomized_svd_sparse_warnings():
    # randomized_svd throws a warning for lil and dok matrix
    rng = np.random.RandomState(42)
    X = make_low_rank_matrix(50, 20, effective_rank=10, random_state=rng)
    n_components = 5
    for cls in (sparse.lil_matrix, sparse.dok_matrix):
        X = cls(X)
        assert_warns_message(
            sparse.SparseEfficiencyWarning,
            "Calculating SVD of a {} is expensive. "
            "csr_matrix is more efficient.".format(cls.__name__),
            randomized_svd, X, n_components, n_iter=1,
            power_iteration_normalizer='none') 
Example #21
Source File: test_ridge.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_sparse_design_with_sample_weights():
    # Sample weights must work with sparse matrices

    n_sampless = [2, 3]
    n_featuress = [3, 2]

    rng = np.random.RandomState(42)

    sparse_matrix_converters = [sp.coo_matrix,
                                sp.csr_matrix,
                                sp.csc_matrix,
                                sp.lil_matrix,
                                sp.dok_matrix
                                ]

    sparse_ridge = Ridge(alpha=1., fit_intercept=False)
    dense_ridge = Ridge(alpha=1., fit_intercept=False)

    for n_samples, n_features in zip(n_sampless, n_featuress):
        X = rng.randn(n_samples, n_features)
        y = rng.randn(n_samples)
        sample_weights = rng.randn(n_samples) ** 2 + 1
        for sparse_converter in sparse_matrix_converters:
            X_sparse = sparse_converter(X)
            sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
            dense_ridge.fit(X, y, sample_weight=sample_weights)

            assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
                                      decimal=6) 
Example #22
Source File: nettack.py    From nettack with MIT License 5 votes vote down vote up
def compute_cooccurrence_constraint(self, nodes):
        """
        Co-occurrence constraint as described in the paper.

        Parameters
        ----------
        nodes: np.array
            Nodes whose features are considered for change

        Returns
        -------
        np.array [len(nodes), D], dtype bool
            Binary matrix of dimension len(nodes) x D. A 1 in entry n,d indicates that
            we are allowed to add feature d to the features of node n.

        """

        words_graph = self.cooc_matrix.copy()
        D = self.X_obs.shape[1]
        words_graph.setdiag(0)
        words_graph = (words_graph > 0)
        word_degrees = np.sum(words_graph, axis=0).A1

        inv_word_degrees = np.reciprocal(word_degrees.astype(float) + 1e-8)

        sd = np.zeros([self.N])
        for n in range(self.N):
            n_idx = self.X_obs[n, :].nonzero()[1]
            sd[n] = np.sum(inv_word_degrees[n_idx.tolist()])

        scores_matrix = sp.lil_matrix((self.N, D))

        for n in nodes:
            common_words = words_graph.multiply(self.X_obs[n])
            idegs = inv_word_degrees[common_words.nonzero()[1]]
            nnz = common_words.nonzero()[0]
            scores = np.array([idegs[nnz == ix].sum() for ix in range(D)])
            scores_matrix[n] = scores
        self.cooc_constraint = sp.csr_matrix(scores_matrix - 0.5 * sd[:, None] > 0) 
Example #23
Source File: test_unsupervised.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_silhouette():
    # Tests the Silhouette Coefficient.
    dataset = datasets.load_iris()
    X_dense = dataset.data
    X_csr = csr_matrix(X_dense)
    X_dok = sp.dok_matrix(X_dense)
    X_lil = sp.lil_matrix(X_dense)
    y = dataset.target

    for X in [X_dense, X_csr, X_dok, X_lil]:
        D = pairwise_distances(X, metric='euclidean')
        # Given that the actual labels are used, we can assume that S would be
        # positive.
        score_precomputed = silhouette_score(D, y, metric='precomputed')
        assert_greater(score_precomputed, 0)
        # Test without calculating D
        score_euclidean = silhouette_score(X, y, metric='euclidean')
        pytest.approx(score_precomputed, score_euclidean)

        if X is X_dense:
            score_dense_without_sampling = score_precomputed
        else:
            pytest.approx(score_euclidean,
                          score_dense_without_sampling)

        # Test with sampling
        score_precomputed = silhouette_score(D, y, metric='precomputed',
                                             sample_size=int(X.shape[0] / 2),
                                             random_state=0)
        score_euclidean = silhouette_score(X, y, metric='euclidean',
                                           sample_size=int(X.shape[0] / 2),
                                           random_state=0)
        assert_greater(score_precomputed, 0)
        assert_greater(score_euclidean, 0)
        pytest.approx(score_euclidean, score_precomputed)

        if X is X_dense:
            score_dense_with_sampling = score_precomputed
        else:
            pytest.approx(score_euclidean, score_dense_with_sampling) 
Example #24
Source File: diff.py    From findiff with MIT License 5 votes vote down vote up
def matrix(self, shape):
        siz =  np.prod(shape)
        mat = sparse.lil_matrix((siz, siz))
        diag = list(range(siz))
        mat[diag, diag] = 1
        return sparse.csr_matrix(mat) 
Example #25
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def __init__(self, n=100, mode='sparse'):
        np.random.seed(0)

        self.n = n

        self.x0 = -np.ones(n)
        self.lb = np.linspace(-2, -1.5, n)
        self.ub = np.linspace(-0.8, 0.0, n)

        self.lb += 0.1 * np.random.randn(n)
        self.ub += 0.1 * np.random.randn(n)

        self.x0 += 0.1 * np.random.randn(n)
        self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub)

        if mode == 'sparse':
            self.sparsity = lil_matrix((n, n), dtype=int)
            i = np.arange(n)
            self.sparsity[i, i] = 1
            i = np.arange(1, n)
            self.sparsity[i, i - 1] = 1
            i = np.arange(n - 1)
            self.sparsity[i, i + 1] = 1

            self.jac = self._jac
        elif mode == 'operator':
            self.jac = lambda x: aslinearoperator(self._jac(x))
        elif mode == 'dense':
            self.sparsity = None
            self.jac = lambda x: self._jac(x).toarray()
        else:
            assert_(False) 
Example #26
Source File: nettack.py    From nettack with MIT License 5 votes vote down vote up
def feature_scores(self):
        """
        Compute feature scores for all possible feature changes.
        """

        if self.cooc_constraint is None:
            self.compute_cooccurrence_constraint(self.influencer_nodes)
        logits = self.compute_logits()
        best_wrong_class = self.strongest_wrong_class(logits)
        gradient = self.gradient_wrt_x(self.label_u) - self.gradient_wrt_x(best_wrong_class)
        surrogate_loss = logits[self.label_u] - logits[best_wrong_class]

        gradients_flipped = (gradient * -1).tolil()
        gradients_flipped[self.X_obs.nonzero()] *= -1

        X_influencers = sp.lil_matrix(self.X_obs.shape)
        X_influencers[self.influencer_nodes] = self.X_obs[self.influencer_nodes]
        gradients_flipped = gradients_flipped.multiply((self.cooc_constraint + X_influencers) > 0)
        nnz_ixs = np.array(gradients_flipped.nonzero()).T

        sorting = np.argsort(gradients_flipped[tuple(nnz_ixs.T)]).A1
        sorted_ixs = nnz_ixs[sorting]
        grads = gradients_flipped[tuple(nnz_ixs[sorting].T)]

        scores = surrogate_loss - grads
        return sorted_ixs[::-1], scores.A1[::-1] 
Example #27
Source File: pde.py    From findiff with MIT License 5 votes vote down vote up
def __init__(self, shape):
        self.shape = shape
        siz = np.prod(shape)
        self.long_indices = np.array(list(range(siz))).reshape(shape)
        self.lhs = sparse.lil_matrix((siz, siz))
        self.rhs = sparse.lil_matrix((siz, 1)) 
Example #28
Source File: basis.py    From pyGSTi with Apache License 2.0 5 votes vote down vote up
def get_to_element_std(self):
        '''
        Get the matrix that transforms vectors in this basis (with length equal
        to the `dim` of this basis) to vectors in the "element space" - that
        is, vectors in the same standard basis that the *elements* of this basis
        are expressed in.

        Returns
        -------
        numpy array
            An array of shape `(element_dim, size)` where `element_dim` is the
            dimension, i.e. size, of the elements of this basis (e.g. 16 if the
            elements are 4x4 matrices) and `size` is the size of this basis (its
            number of vectors).
        '''
        assert(not self.sparse), "get_to_element_std not implemented for sparse mode"
        expanddim = self.elsize  # == _np.product(self.elshape)
        if self.sparse:
            toSimpleStd = _sps.lil_matrix((expanddim, self.size), dtype='complex')
        else:
            toSimpleStd = _np.zeros((expanddim, self.size), 'complex')

        for i, el in enumerate(self.elements):
            if self.sparse:
                vel = _sps.lil_matrix(el.reshape(-1, 1))  # sparse vector == sparse n x 1 matrix
            else:
                vel = el.flatten()
            toSimpleStd[:, i] = vel
        return toSimpleStd 
Example #29
Source File: m_vxc_lil.py    From pyscf with Apache License 2.0 5 votes vote down vote up
def vxc_lil(self, **kw):
  """
    Computes the exchange-correlation matrix elements
    Args:
      sv : (System Variables), this must have arrays of coordinates and species, etc
    Returns:
      fxc,vxc,exc
  """
  from pyscf.nao.m_xc_scalar_ni import xc_scalar_ni
  from pyscf.nao.m_ao_matelem import ao_matelem_c
  from scipy.sparse import lil_matrix

  #dm, xc_code, deriv, ao_log=None, dtype=float64, **kvargs

  dm = kw['dm'] if 'dm' in kw else self.make_rdm1()
  kernel = kw['kernel'] if 'kernel' in kw else None
  ao_log = kw['ao_log'] if 'ao_log' in kw else self.ao_log
  xc_code = kw['xc_code'] if 'xc_code' in kw else self.xc_code
  kw.pop('xc_code',None)
  dtype = kw['dtype'] if 'dtype' in kw else self.dtype
  
  aome = ao_matelem_c(self.ao_log.rr, self.ao_log.pp, self, dm)
  me = aome.init_one_set(self.ao_log) if ao_log is None else aome.init_one_set(ao_log)
  atom2s = zeros((self.natm+1), dtype=int64)
  for atom,sp in enumerate(self.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp]
  
  lil = [lil_matrix((atom2s[-1],atom2s[-1]), dtype=dtype) for i in range((self.nspin-1)*2+1)]

  for atom1,[sp1,rv1,s1,f1] in enumerate(zip(self.atom2sp,self.atom2coord,atom2s,atom2s[1:])):
    for atom2,[sp2,rv2,s2,f2] in enumerate(zip(self.atom2sp,self.atom2coord,atom2s,atom2s[1:])):
      blk = xc_scalar_ni(me,sp1,rv1,sp2,rv2,xc_code=xc_code,**kw)
      for i,b in enumerate(blk): lil[i][s1:f1,s2:f2] = b[:,:]

  return lil 
Example #30
Source File: test_least_squares.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _jac(self, x):
        J = lil_matrix((self.n, self.n))
        i = np.arange(self.n)
        J[i, i] = 3 - 2 * x
        i = np.arange(1, self.n)
        J[i, i - 1] = -1
        i = np.arange(self.n - 1)
        J[i, i + 1] = -2
        return J