Python numpy.diag() Examples

The following are 30 code examples of numpy.diag(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: data_helper.py    From LanczosNetwork with MIT License 7 votes vote down vote up
def normalize_adj(A, is_sym=True, exponent=0.5):
  """
    Normalize adjacency matrix

    is_sym=True: D^{-1/2} A D^{-1/2}
    is_sym=False: D^{-1} A
  """
  rowsum = np.array(A.sum(1))

  if is_sym:
    r_inv = np.power(rowsum, -exponent).flatten()
  else:
    r_inv = np.power(rowsum, -1.0).flatten()

  r_inv[np.isinf(r_inv)] = 0.

  if sp.isspmatrix(A):
    r_mat_inv = sp.diags(r_inv.squeeze())
  else:
    r_mat_inv = np.diag(r_inv)

  if is_sym:
    return r_mat_inv.dot(A).dot(r_mat_inv)
  else:
    return r_mat_inv.dot(A) 
Example #2
Source File: test_masks.py    From mmdetection with Apache License 2.0 7 votes vote down vote up
def test_bitmap_mask_resize():
    # resize with empty bitmap masks
    raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
    bitmap_masks = BitmapMasks(raw_masks, 28, 28)
    resized_masks = bitmap_masks.resize((56, 72))
    assert len(resized_masks) == 0
    assert resized_masks.height == 56
    assert resized_masks.width == 72

    # resize with bitmap masks contain 1 instances
    raw_masks = np.diag(np.ones(4, dtype=np.uint8))[np.newaxis, ...]
    bitmap_masks = BitmapMasks(raw_masks, 4, 4)
    resized_masks = bitmap_masks.resize((8, 8))
    assert len(resized_masks) == 1
    assert resized_masks.height == 8
    assert resized_masks.width == 8
    truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
                       [0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0],
                       [0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0],
                       [0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1]]])
    assert (resized_masks.masks == truth).all() 
Example #3
Source File: zca.py    From zca with GNU General Public License v3.0 7 votes vote down vote up
def fit(self, X, y=None):
        """Compute the mean, whitening and dewhitening matrices.

        Parameters
        ----------
        X : array-like with shape [n_samples, n_features]
            The data used to compute the mean, whitening and dewhitening
            matrices.
        """
        X = check_array(X, accept_sparse=None, copy=self.copy,
                        ensure_2d=True)
        X = as_float_array(X, copy=self.copy)
        self.mean_ = X.mean(axis=0)
        X_ = X - self.mean_
        cov = np.dot(X_.T, X_) / (X_.shape[0]-1)
        U, S, _ = linalg.svd(cov)
        s = np.sqrt(S.clip(self.regularization))
        s_inv = np.diag(1./s)
        s = np.diag(s)
        self.whiten_ = np.dot(np.dot(U, s_inv), U.T)
        self.dewhiten_ = np.dot(np.dot(U, s), U.T)
        return self 
Example #4
Source File: SpectralClustering.py    From sparse-subspace-clustering-python with MIT License 6 votes vote down vote up
def SpectralClustering(CKSym, n):
    # This is direct port of JHU vision lab code. Could probably use sklearn SpectralClustering.
    CKSym = CKSym.astype(float)
    N, _ = CKSym.shape
    MAXiter = 1000  # Maximum number of iterations for KMeans
    REPlic = 20  # Number of replications for KMeans

    DN = np.diag(np.divide(1, np.sqrt(np.sum(CKSym, axis=0) + np.finfo(float).eps)))
    LapN = identity(N).toarray().astype(float) - np.matmul(np.matmul(DN, CKSym), DN)
    _, _, vN = np.linalg.svd(LapN)
    vN = vN.T
    kerN = vN[:, N - n:N]
    normN = np.sqrt(np.sum(np.square(kerN), axis=1))
    kerNS = np.divide(kerN, normN.reshape(len(normN), 1) + np.finfo(float).eps)
    km = KMeans(n_clusters=n, n_init=REPlic, max_iter=MAXiter, n_jobs=-1).fit(kerNS)
    return km.labels_ 
Example #5
Source File: dynamic.py    From StructEngPy with MIT License 6 votes vote down vote up
def spectrum_analysis(model,n,spec):
    """
    sepctrum analysis
    
    params:
        n: number of modes to use\n
        spec: a list of tuples (period,acceleration response)
    """
    freq,mode=eigen_mode(model,n)
    M_=np.dot(mode.T,model.M)
    M_=np.dot(M_,mode)
    K_=np.dot(mode.T,model.K)
    K_=np.dot(K_,mode)
    C_=np.dot(mode.T,model.C)
    C_=np.dot(C_,mode)
    d_=[]
    for (m_,k_,c_) in zip(M_.diag(),K_.diag(),C_.diag()):
        sdof=SDOFSystem(m_,k_)
        T=sdof.omega_d()
        d_.append(np.interp(T,spec[0],spec[1]*m_))
    d=np.dot(d_,mode)
    #CQC
    return d 
Example #6
Source File: eom_kccsd_ghf.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def get_init_guess(self, kshift, nroots=1, koopmans=False, diag=None, **kwargs):
        """Initial guess vectors of R coefficients"""
        size = self.vector_size(kshift)
        dtype = getattr(diag, 'dtype', np.complex)
        nroots = min(nroots, size)
        guess = []
        # TODO do Koopmans later
        if koopmans:
            raise NotImplementedError
        else:
            idx = diag.argsort()[:nroots]
            for i in idx:
                g = np.zeros(int(size), dtype=dtype)
                g[i] = 1.0
                # TODO do mask_frozen later
                guess.append(g)
        return guess 
Example #7
Source File: test_uks.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def test_pp_UKS(self):
        cell = pbcgto.Cell()

        cell.unit = 'A'
        cell.atom = '''
            Si    2.715348700    2.715348700    0.000000000;
            Si    2.715348700    0.000000000    2.715348700;
        '''
        cell.basis = 'gth-szv'
        cell.pseudo = 'gth-pade'

        Lx = Ly = Lz = 5.430697500
        cell.a = np.diag([Lx,Ly,Lz])
        cell.mesh = np.array([17]*3)

        cell.verbose = 5
        cell.output = '/dev/null'
        cell.build()

        mf = pbcdft.UKS(cell)
        mf.xc = 'blyp'
        self.assertAlmostEqual(mf.scf(), -7.6058004283213396, 8)

        mf.xc = 'lda,vwn'
        self.assertAlmostEqual(mf.scf(), -7.6162130840535092, 8) 
Example #8
Source File: kalman_filter.py    From kalman_filter_multi_object_tracking with MIT License 6 votes vote down vote up
def __init__(self):
        """Initialize variable used by Kalman Filter class
        Args:
            None
        Return:
            None
        """
        self.dt = 0.005  # delta time

        self.A = np.array([[1, 0], [0, 1]])  # matrix in observation equations
        self.u = np.zeros((2, 1))  # previous state vector

        # (x,y) tracking object center
        self.b = np.array([[0], [255]])  # vector of observations

        self.P = np.diag((3.0, 3.0))  # covariance matrix
        self.F = np.array([[1.0, self.dt], [0.0, 1.0]])  # state transition mat

        self.Q = np.eye(self.u.shape[0])  # process noise matrix
        self.R = np.eye(self.b.shape[0])  # observation noise matrix
        self.lastResult = np.array([[0], [255]]) 
Example #9
Source File: KernelRidgeRegression.py    From fuku-ml with MIT License 6 votes vote down vote up
def train(self):

        if (self.status != 'init'):
            print("Please load train data and init W first.")
            return self.W

        self.status = 'train'

        original_X = self.train_X[:, 1:]
        K = utility.Kernel.kernel_matrix(self, original_X)
        I = np.diag(np.ones(self.data_num))

        inverse_part = np.linalg.inv(self.lambda_p * I + K)
        self.beta = np.dot(inverse_part, self.train_Y)

        return self.W 
Example #10
Source File: eom_kccsd_ghf.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def get_init_guess(self, kshift, nroots=1, koopmans=False, diag=None):
        size = self.vector_size()
        dtype = getattr(diag, 'dtype', np.complex)
        nroots = min(nroots, size)
        guess = []
        if koopmans:
            for n in self.nonzero_vpadding[kshift][:nroots]:
                g = np.zeros(int(size), dtype=dtype)
                g[n] = 1.0
                g = self.mask_frozen(g, kshift, const=0.0)
                guess.append(g)
        else:
            idx = diag.argsort()[:nroots]
            for i in idx:
                g = np.zeros(int(size), dtype=dtype)
                g[i] = 1.0
                g = self.mask_frozen(g, kshift, const=0.0)
                guess.append(g)
        return guess 
Example #11
Source File: point_cloud.py    From FRIDA with MIT License 6 votes vote down vote up
def classical_mds(self, D):
        ''' 
        Classical multidimensional scaling

        Parameters
        ----------
        D : square 2D ndarray
            Euclidean Distance Matrix (matrix containing squared distances between points
        '''

        # Apply MDS algorithm for denoising
        n = D.shape[0]
        J = np.eye(n) - np.ones((n,n))/float(n)
        G = -0.5*np.dot(J, np.dot(D, J))

        s, U = np.linalg.eig(G)

        # we need to sort the eigenvalues in decreasing order
        s = np.real(s)
        o = np.argsort(s)
        s = s[o[::-1]]
        U = U[:,o[::-1]]

        S = np.diag(s)[0:self.dim,:]
        self.X = np.dot(np.sqrt(S),U.T) 
Example #12
Source File: ZIFA.py    From ZIFA with MIT License 6 votes vote down vote up
def invertFast(A, d):
	"""
	given an array A of shape d x k and a d x 1 vector d, computes (A * A.T + diag(d)) ^{-1}
	Checked.
	"""
	assert(A.shape[0] == d.shape[0])
	assert(d.shape[1] == 1)

	k = A.shape[1]
	A = np.array(A)
	d_vec = np.array(d)
	d_inv = np.array(1 / d_vec[:, 0])

	inv_d_squared = np.dot(np.atleast_2d(d_inv).T, np.atleast_2d(d_inv))
	M = np.diag(d_inv) - inv_d_squared * np.dot(np.dot(A, np.linalg.inv(np.eye(k, k) + np.dot(A.T, mult_diag(d_inv, A)))), A.T)

	return M 
Example #13
Source File: utils.py    From tsn-pytorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def class_accuracy(prediction, label):
    cf = confusion_matrix(prediction, label)
    cls_cnt = cf.sum(axis=1)
    cls_hit = np.diag(cf)

    cls_acc = cls_hit / cls_cnt.astype(float)

    mean_cls_acc = cls_acc.mean()

    return cls_acc, mean_cls_acc 
Example #14
Source File: fermionic_simulation.py    From OpenFermion-Cirq with Apache License 2.0 6 votes vote down vote up
def _eigen_components(self):
        components = [(0, np.diag([1, 1, 1, 0, 1, 0, 0, 1]))]
        nontrivial_part = np.zeros((3, 3), dtype=np.complex128)
        for ij, w in zip([(1, 2), (0, 2), (0, 1)], self.weights):
            nontrivial_part[ij] = w
            nontrivial_part[ij[::-1]] = w.conjugate()
        assert np.allclose(nontrivial_part, nontrivial_part.conj().T)
        eig_vals, eig_vecs = np.linalg.eigh(nontrivial_part)
        for eig_val, eig_vec in zip(eig_vals, eig_vecs.T):
            exp_factor = -eig_val / np.pi
            proj = np.zeros((8, 8), dtype=np.complex128)
            nontrivial_indices = np.array([3, 5, 6], dtype=np.intp)
            proj[nontrivial_indices[:, np.newaxis], nontrivial_indices] = (
                np.outer(eig_vec.conjugate(), eig_vec))
            components.append((exp_factor, proj))
        return components 
Example #15
Source File: fermionic_simulation.py    From OpenFermion-Cirq with Apache License 2.0 6 votes vote down vote up
def _eigen_components(self):
        # projector onto subspace spanned by basis states with
        # Hamming weight != 2
        zero_component = np.diag(
            [int(bin(i).count('1') != 2) for i in range(16)])

        state_pairs = (('0110', '1001'), ('0101', '1010'), ('0011', '1100'))

        plus_minus_components = tuple(
            (-abs(weight) * sign / np.pi,
             state_swap_eigen_component(state_pair[0], state_pair[1], sign,
                                        np.angle(weight)))
            for weight, state_pair in zip(self.weights, state_pairs)
            for sign in (-1, 1))

        return ((0, zero_component),) + plus_minus_components 
Example #16
Source File: 22-density.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def tda_denisty_matrix(td, state_id):
    '''
    Taking the TDA amplitudes as the CIS coefficients, calculate the density
    matrix (in AO basis) of the excited states
    '''
    cis_t1 = td.xy[state_id][0]
    dm_oo =-np.einsum('ia,ka->ik', cis_t1.conj(), cis_t1)
    dm_vv = np.einsum('ia,ic->ac', cis_t1, cis_t1.conj())

    # The ground state density matrix in mo_basis
    mf = td._scf
    dm = np.diag(mf.mo_occ)

    # Add CIS contribution
    nocc = cis_t1.shape[0]
    dm[:nocc,:nocc] += dm_oo * 2
    dm[nocc:,nocc:] += dm_vv * 2

    # Transform density matrix to AO basis
    mo = mf.mo_coeff
    dm = np.einsum('pi,ij,qj->pq', mo, dm, mo.conj())
    return dm

# Density matrix for the 3rd excited state 
Example #17
Source File: EasyTL.py    From transferlearning with MIT License 6 votes vote down vote up
def get_ma_dist(A, B):
    Y = A.copy()
    X = B.copy()
    
    S = np.cov(X.T)
    try:
        SI = np.linalg.inv(S)
    except:
        print("Singular Matrix: using np.linalg.pinv")
        SI = np.linalg.pinv(S)
    mu = np.mean(X, axis=0)
    
    diff = Y - mu
    Dct_c = np.diag(diff @ SI @ diff.T)
    
    return Dct_c 
Example #18
Source File: gradient_hf_test.py    From OpenFermion-Cirq with Apache License 2.0 6 votes vote down vote up
def test_rhf_func_gen():
    rhf_objective, molecule, parameters, _, _ = make_h6_1_3()
    ansatz, energy, _ = rhf_func_generator(rhf_objective)
    assert np.isclose(molecule.hf_energy, energy(parameters))

    ansatz, energy, _, opdm_func = rhf_func_generator(
        rhf_objective, initial_occ_vec=[1] * 3 + [0] * 3, get_opdm_func=True)
    assert np.isclose(molecule.hf_energy, energy(parameters))
    test_opdm = opdm_func(parameters)
    u = ansatz(parameters)
    initial_opdm = np.diag([1] * 3 + [0] * 3)
    final_odpm = u @ initial_opdm @ u.T
    assert np.allclose(test_opdm, final_odpm)

    result = rhf_minimization(rhf_objective, initial_guess=parameters)
    assert np.allclose(result.x, parameters) 
Example #19
Source File: graphTools.py    From graph-neural-networks with GNU General Public License v3.0 6 votes vote down vote up
def adjacencyToLaplacian(W):
    """
    adjacencyToLaplacian: Computes the Laplacian from an Adjacency matrix

    Input:

        W (np.array): adjacency matrix

    Output:

        L (np.array): Laplacian matrix
    """
    # Check that the matrix is square
    assert W.shape[0] == W.shape[1]
    # Compute the degree vector
    d = np.sum(W, axis = 1)
    # And build the degree matrix
    D = np.diag(d)
    # Return the Laplacian
    return D - W 
Example #20
Source File: graphTools.py    From graph-neural-networks with GNU General Public License v3.0 6 votes vote down vote up
def normalizeAdjacency(W):
    """
    NormalizeAdjacency: Computes the degree-normalized adjacency matrix

    Input:

        W (np.array): adjacency matrix

    Output:

        A (np.array): degree-normalized adjacency matrix
    """
    # Check that the matrix is square
    assert W.shape[0] == W.shape[1]
    # Compute the degree vector
    d = np.sum(W, axis = 1)
    # Invert the square root of the degree
    d = 1/np.sqrt(d)
    # And build the square root inverse degree matrix
    D = np.diag(d)
    # Return the Normalized Adjacency
    return D @ W @ D 
Example #21
Source File: graphTools.py    From graph-neural-networks with GNU General Public License v3.0 6 votes vote down vote up
def normalizeLaplacian(L):
    """
    NormalizeLaplacian: Computes the degree-normalized Laplacian matrix

    Input:

        L (np.array): Laplacian matrix

    Output:

        normL (np.array): degree-normalized Laplacian matrix
    """
    # Check that the matrix is square
    assert L.shape[0] == L.shape[1]
    # Compute the degree vector (diagonal elements of L)
    d = np.diag(L)
    # Invert the square root of the degree
    d = 1/np.sqrt(d)
    # And build the square root inverse degree matrix
    D = np.diag(d)
    # Return the Normalized Laplacian
    return D @ L @ D 
Example #22
Source File: spectral_graph_partition.py    From LanczosNetwork with MIT License 6 votes vote down vote up
def get_L_cluster_cut(L, node_label):
  adj = L - np.diag(np.diag(L))
  adj[adj != 0] = 1.0
  num_nodes = adj.shape[0]
  idx_row, idx_col = np.meshgrid(range(num_nodes), range(num_nodes))
  idx_row, idx_col = idx_row.flatten().astype(
      np.int64), idx_col.flatten().astype(np.int64)
  mask = (node_label[idx_row] == node_label[idx_col]).reshape(
      num_nodes, num_nodes).astype(np.float)

  adj_cluster = adj * mask
  adj_cut = adj - adj_cluster
  L_cut = get_laplacian(adj_cut, graph_laplacian_type='L4')
  L_cluster = get_laplacian(adj_cluster, graph_laplacian_type='L4')

  return L_cluster, L_cut 
Example #23
Source File: metrics.py    From overhaul-distillation with MIT License 5 votes vote down vote up
def Pixel_Accuracy_Class(self):
        Acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)
        Acc = np.nanmean(Acc)
        return Acc 
Example #24
Source File: kalman_filter.py    From deep_sort with GNU General Public License v3.0 5 votes vote down vote up
def initiate(self, measurement):
        """Create track from unassociated measurement.

        Parameters
        ----------
        measurement : ndarray
            Bounding box coordinates (x, y, a, h) with center position (x, y),
            aspect ratio a, and height h.

        Returns
        -------
        (ndarray, ndarray)
            Returns the mean vector (8 dimensional) and covariance matrix (8x8
            dimensional) of the new track. Unobserved velocities are initialized
            to 0 mean.

        """
        mean_pos = measurement
        mean_vel = np.zeros_like(mean_pos)
        mean = np.r_[mean_pos, mean_vel]

        std = [
            2 * self._std_weight_position * measurement[3],
            2 * self._std_weight_position * measurement[3],
            1e-2,
            2 * self._std_weight_position * measurement[3],
            10 * self._std_weight_velocity * measurement[3],
            10 * self._std_weight_velocity * measurement[3],
            1e-5,
            10 * self._std_weight_velocity * measurement[3]]
        covariance = np.diag(np.square(std))
        return mean, covariance 
Example #25
Source File: kalman_filter.py    From deep_sort with GNU General Public License v3.0 5 votes vote down vote up
def predict(self, mean, covariance):
        """Run Kalman filter prediction step.

        Parameters
        ----------
        mean : ndarray
            The 8 dimensional mean vector of the object state at the previous
            time step.
        covariance : ndarray
            The 8x8 dimensional covariance matrix of the object state at the
            previous time step.

        Returns
        -------
        (ndarray, ndarray)
            Returns the mean vector and covariance matrix of the predicted
            state. Unobserved velocities are initialized to 0 mean.

        """
        std_pos = [
            self._std_weight_position * mean[3],
            self._std_weight_position * mean[3],
            1e-2,
            self._std_weight_position * mean[3]]
        std_vel = [
            self._std_weight_velocity * mean[3],
            self._std_weight_velocity * mean[3],
            1e-5,
            self._std_weight_velocity * mean[3]]
        motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))

        mean = np.dot(self._motion_mat, mean)
        covariance = np.linalg.multi_dot((
            self._motion_mat, covariance, self._motion_mat.T)) + motion_cov

        return mean, covariance 
Example #26
Source File: metrics.py    From overhaul-distillation with MIT License 5 votes vote down vote up
def Frequency_Weighted_Intersection_over_Union(self):
        freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)
        iu = np.diag(self.confusion_matrix) / (
                    np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
                    np.diag(self.confusion_matrix))

        FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
        return FWIoU 
Example #27
Source File: metrics.py    From overhaul-distillation with MIT License 5 votes vote down vote up
def Pixel_Accuracy(self):
        Acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()
        return Acc 
Example #28
Source File: 020-dmrg_casscf_nevpt2_for_FeS.py    From pyscf with Apache License 2.0 5 votes vote down vote up
def psort(ova, fav, coeff):
    # pT is density matrix, fav is Fock matrix
    # OCC-SORT
    pTnew = 2.0*reduce(numpy.dot,(coeff.T,s12,pT,s12,coeff))
    nocc  = numpy.diag(pTnew)
    index = numpy.argsort(-nocc)
    ncoeff = coeff[:,index]
    nocc   = nocc[index]
    enorb = numpy.diag(reduce(numpy.dot,(ncoeff.T,ova,fav,ova,ncoeff)))
    return ncoeff, nocc, enorb

# E-SORT 
Example #29
Source File: performance.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def gram_schmidt(X, row_vecs=True, norm=True):
    if not row_vecs:
        X = X.T
    Y = X[0:1, :].copy()
    for i in range(1, X.shape[0]):
        proj = np.diag((X[i, :].dot(Y.T) / np.linalg.norm(Y, axis=1) ** 2).flat).dot(Y)
        Y = np.vstack((Y, X[i, :] - proj.sum(0)))
    if norm:
        Y = np.diag(1 / np.linalg.norm(Y, axis=1)).dot(Y)
    if row_vecs:
        return Y
    else:
        return Y.T 
Example #30
Source File: kalman_filter.py    From deep_sort with GNU General Public License v3.0 5 votes vote down vote up
def project(self, mean, covariance):
        """Project state distribution to measurement space.

        Parameters
        ----------
        mean : ndarray
            The state's mean vector (8 dimensional array).
        covariance : ndarray
            The state's covariance matrix (8x8 dimensional).

        Returns
        -------
        (ndarray, ndarray)
            Returns the projected mean and covariance matrix of the given state
            estimate.

        """
        std = [
            self._std_weight_position * mean[3],
            self._std_weight_position * mean[3],
            1e-1,
            self._std_weight_position * mean[3]]
        innovation_cov = np.diag(np.square(std))

        mean = np.dot(self._update_mat, mean)
        covariance = np.linalg.multi_dot((
            self._update_mat, covariance, self._update_mat.T))
        return mean, covariance + innovation_cov