Python scipy.linalg.inv() Examples

The following are 30 code examples of scipy.linalg.inv(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.linalg , or try the search function .
Example #1
Source File: lobpcg.py    From GraphicDesignPatternByPython with MIT License 7 votes vote down vote up
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
    if blockVectorBV is None:
        if B is not None:
            blockVectorBV = B(blockVectorV)
        else:
            blockVectorBV = blockVectorV  # Shared data!!!
    gramVBV = np.dot(blockVectorV.T.conj(), blockVectorBV)
    gramVBV = cholesky(gramVBV)
    gramVBV = inv(gramVBV, overwrite_a=True)
    # gramVBV is now R^{-1}.
    blockVectorV = np.dot(blockVectorV, gramVBV)
    if B is not None:
        blockVectorBV = np.dot(blockVectorBV, gramVBV)

    if retInvR:
        return blockVectorV, blockVectorBV, gramVBV
    else:
        return blockVectorV, blockVectorBV 
Example #2
Source File: control_and_filter.py    From QuantEcon.lectures.code with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def predict(self, a_hist, t):
        """
        This function implements the prediction formula discussed is section 6 (1.59)
        It takes a realization for a^N, and the period in which the prediciton is formed

        Output:  E[abar | a_t, a_{t-1}, ..., a_1, a_0]
        """

        N = np.asarray(a_hist).shape[0] - 1
        a_hist = np.asarray(a_hist).reshape(N + 1, 1)
        V = self.construct_V(N + 1)

        aux_matrix = np.zeros((N + 1, N + 1))
        aux_matrix[:(t + 1), :(t + 1)] = np.eye(t + 1)
        L = la.cholesky(V).T
        Ea_hist = la.inv(L) @ aux_matrix @ L @ a_hist

        return Ea_hist 
Example #3
Source File: lobpcg.py    From lambda-packs with MIT License 7 votes vote down vote up
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
    if blockVectorBV is None:
        if B is not None:
            blockVectorBV = B(blockVectorV)
        else:
            blockVectorBV = blockVectorV  # Shared data!!!
    gramVBV = np.dot(blockVectorV.T, blockVectorBV)
    gramVBV = cholesky(gramVBV)
    gramVBV = inv(gramVBV, overwrite_a=True)
    # gramVBV is now R^{-1}.
    blockVectorV = np.dot(blockVectorV, gramVBV)
    if B is not None:
        blockVectorBV = np.dot(blockVectorBV, gramVBV)

    if retInvR:
        return blockVectorV, blockVectorBV, gramVBV
    else:
        return blockVectorV, blockVectorBV 
Example #4
Source File: lobpcg.py    From Computable with MIT License 7 votes vote down vote up
def b_orthonormalize(B, blockVectorV,
                      blockVectorBV=None, retInvR=False):
    """Internal."""
    import scipy.linalg as sla
    if blockVectorBV is None:
        if B is not None:
            blockVectorBV = B(blockVectorV)
        else:
            blockVectorBV = blockVectorV  # Shared data!!!
    gramVBV = sp.dot(blockVectorV.T, blockVectorBV)
    gramVBV = sla.cholesky(gramVBV)
    gramVBV = sla.inv(gramVBV, overwrite_a=True)
    # gramVBV is now R^{-1}.
    blockVectorV = sp.dot(blockVectorV, gramVBV)
    if B is not None:
        blockVectorBV = sp.dot(blockVectorBV, gramVBV)

    if retInvR:
        return blockVectorV, blockVectorBV, gramVBV
    else:
        return blockVectorV, blockVectorBV 
Example #5
Source File: BezierShapeFunctions.py    From PyFEM with GNU General Public License v3.0 6 votes vote down vote up
def getElemBezierData( elemCoords , C , order=4 , method="Gauss" , elemType = 'default' ):

  elemData = elemShapeData()
    
  if elemType == 'default':
    elemType = getElemType( elemCoords )
    
  (intCrds,intWghts) = getIntegrationPoints( "Line3" , order , method )
    
  for xi,intWeight in zip( real(intCrds) , intWghts ):    
    try:
      sData = eval( 'getBezier'+elemType+'(xi,C)' )
    except:
      raise NotImplementedError('Unknown type :'+elemType)

    jac = dot ( sData.dhdxi.transpose() , elemCoords )

    if jac.shape[0] is jac.shape[1]:
      sData.dhdx   = (dot ( inv( jac ) , sData.dhdxi.transpose() )).transpose()
   
    sData.weight = calcWeight( jac ) * intWeight

    elemData.sData.append(sData)

  return elemData 
Example #6
Source File: gaussian_contours.py    From QuantEcon.lectures.code with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def plot4():
    # Density 1
    Z = gen_gaussian_plot_vals(x_hat, Σ)
    cs1 = ax.contour(X, Y, Z, 6, colors="black")
    ax.clabel(cs1, inline=1, fontsize=10)
    # Density 2
    M = Σ * G.T * linalg.inv(G * Σ * G.T + R)
    x_hat_F = x_hat + M * (y - G * x_hat)
    Σ_F = Σ - M * G * Σ
    Z_F = gen_gaussian_plot_vals(x_hat_F, Σ_F)
    cs2 = ax.contour(X, Y, Z_F, 6, colors="black")
    ax.clabel(cs2, inline=1, fontsize=10)
    # Density 3
    new_x_hat = A * x_hat_F
    new_Σ = A * Σ_F * A.T + Q
    new_Z = gen_gaussian_plot_vals(new_x_hat, new_Σ)
    cs3 = ax.contour(X, Y, new_Z, 6, colors="black")
    ax.clabel(cs3, inline=1, fontsize=10)
    ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet)
    ax.text(float(y[0]), float(y[1]), r"$y$", fontsize=20, color="black")

# == Choose a plot to generate == # 
Example #7
Source File: linalg_covmat.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def mvn_loglike(x, sigma):
    '''loglike multivariate normal

    assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)

    brute force from formula
    no checking of correct inputs
    use of inv and log-det should be replace with something more efficient
    '''
    #see numpy thread
    #Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
    sigmainv = linalg.inv(sigma)
    logdetsigma = np.log(np.linalg.det(sigma))
    nobs = len(x)

    llf = - np.dot(x, np.dot(sigmainv, x))
    llf -= nobs * np.log(2 * np.pi)
    llf -= logdetsigma
    llf *= 0.5
    return llf 
Example #8
Source File: write.py    From mne-bids with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _mri_landmarks_to_mri_voxels(mri_landmarks, t1_mgh):
    """Convert landmarks from MRI RAS space to MRI voxel space.

    Parameters
    ----------
    mri_landmarks : array, shape (3, 3)
        The MRI RAS landmark data: rows LPA, NAS, RPA, columns x, y, z.
    t1_mgh : nib.MGHImage
        The image data in MGH format.

    Returns
    -------
    mri_landmarks : array, shape (3, 3)
        The MRI voxel-space landmark data.
    """
    # Get landmarks in voxel space, using the T1 data
    vox2ras_tkr = t1_mgh.header.get_vox2ras_tkr()
    ras2vox_tkr = linalg.inv(vox2ras_tkr)
    mri_landmarks = apply_trans(ras2vox_tkr, mri_landmarks)  # in vox
    return mri_landmarks 
Example #9
Source File: irf.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def H(self):
        k = self.neqs
        Lk = tsa.elimination_matrix(k)
        Kkk = tsa.commutation_matrix(k, k)
        Ik = np.eye(k)

        # B = chain_dot(Lk, np.eye(k**2) + commutation_matrix(k, k),
        #               np.kron(self.P, np.eye(k)), Lk.T)

        # return np.dot(Lk.T, L.inv(B))

        B = chain_dot(Lk,
                      np.dot(np.kron(Ik, self.P), Kkk) + np.kron(self.P, Ik),
                      Lk.T)

        return np.dot(Lk.T, L.inv(B)) 
Example #10
Source File: distributions.py    From particles with MIT License 6 votes vote down vote up
def posterior(self, x, Sigma=None):
        """Posterior for model: X1, ..., Xn ~ N(theta, Sigma).

        Parameters
        ----------
        x: (n, d) ndarray
            data
        Sigma: (d, d) ndarray
            (fixed) covariance matrix in the model
        """
        n = x.shape[0]
        Sigma = np.eye(self.dim) if Sigma is None else Sigma
        Siginv = inv(Sigma)
        Qpost = inv(self.cov) + n * Siginv
        Sigpost = inv(Qpost)
        mupost = (np.matmul(Siginv, self.mean) +
                  np.matmu(Siginv, np.sum(x, axis=0)))
        return MvNormal(loc=mupost, cov=Sigpost)

##################################
# product of independent dists 
Example #11
Source File: test_linsolve.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_twodiags(self):
        A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
        b = array([1, 2, 3, 4, 5])

        # condition number of A
        cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2)

        for t in ['f','d','F','D']:
            eps = finfo(t).eps  # floating point epsilon
            b = b.astype(t)

            for format in ['csc','csr']:
                Asp = A.astype(t).asformat(format)

                x = spsolve(Asp,b)

                assert_(norm(b - Asp*x) < 10 * cond_A * eps) 
Example #12
Source File: test_gaussian_process.py    From RoBO with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_predict(self):
        X_test = np.random.rand(10, 2)

        m, v = self.model.predict(X_test)

        assert len(m.shape) == 1
        assert m.shape[0] == X_test.shape[0]
        assert len(v.shape) == 1
        assert v.shape[0] == X_test.shape[0]

        m, v = self.model.predict(X_test, full_cov=True)

        assert len(m.shape) == 1
        assert m.shape[0] == X_test.shape[0]
        assert len(v.shape) == 2
        assert v.shape[0] == X_test.shape[0]
        assert v.shape[1] == X_test.shape[0]

        K_zz = self.kernel.get_value(X_test)
        K_zx = self.kernel.get_value(X_test, self.X)
        K_nz = self.kernel.get_value(self.X) + self.model.noise * np.eye(self.X.shape[0])
        inv = spla.inv(K_nz)
        K_zz_x = K_zz - np.dot(K_zx, np.inner(inv, K_zx))
        assert np.mean((K_zz_x - v) ** 2) < 10e-5 
Example #13
Source File: test_linsolve.py    From Computable with MIT License 6 votes vote down vote up
def test_twodiags(self):
        A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
        b = array([1, 2, 3, 4, 5])

        # condition number of A
        cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2)

        for t in ['f','d','F','D']:
            eps = finfo(t).eps  # floating point epsilon
            b = b.astype(t)

            for format in ['csc','csr']:
                Asp = A.astype(t).asformat(format)

                x = spsolve(Asp,b)

                assert_(norm(b - Asp*x) < 10 * cond_A * eps) 
Example #14
Source File: test_lapack.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_gh_2691(self):
        # 'lower' argument of dportf/dpotri
        for lower in [True, False]:
            for clean in [True, False]:
                np.random.seed(42)
                x = np.random.normal(size=(3, 3))
                a = x.dot(x.T)

                dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))

                c, info = dpotrf(a, lower, clean=clean)
                dpt = dpotri(c, lower)[0]

                if lower:
                    assert_allclose(np.tril(dpt), np.tril(inv(a)))
                else:
                    assert_allclose(np.triu(dpt), np.triu(inv(a))) 
Example #15
Source File: test_gaussian_mixture.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_property():
    rng = np.random.RandomState(0)
    rand_data = RandomData(rng, scale=7)
    n_components = rand_data.n_components

    for covar_type in COVARIANCE_TYPE:
        X = rand_data.X[covar_type]
        gmm = GaussianMixture(n_components=n_components,
                              covariance_type=covar_type, random_state=rng,
                              n_init=5)
        gmm.fit(X)
        if covar_type == 'full':
            for prec, covar in zip(gmm.precisions_, gmm.covariances_):

                assert_array_almost_equal(linalg.inv(prec), covar)
        elif covar_type == 'tied':
            assert_array_almost_equal(linalg.inv(gmm.precisions_),
                                      gmm.covariances_)
        else:
            assert_array_almost_equal(gmm.precisions_, 1. / gmm.covariances_) 
Example #16
Source File: tracker.py    From Person-Detection-and-Tracking with MIT License 6 votes vote down vote up
def kalman_filter(self, z): 
        '''
        Implement the Kalman Filter, including the predict and the update stages,
        with the measurement z
        '''
        x = self.x_state
        # Predict
        x = dot(self.F, x)
        self.P = dot(self.F, self.P).dot(self.F.T) + self.Q

        #Update
        S = dot(self.H, self.P).dot(self.H.T) + self.R
        K = dot(self.P, self.H.T).dot(inv(S)) # Kalman gain
        y = z - dot(self.H, x) # residual
        x += dot(K, y)
        self.P = self.P - dot(K, self.H).dot(self.P)
        self.x_state = x.astype(int) # convert to integer coordinates 
                                     #(pixel values) 
Example #17
Source File: methods.py    From GSTools with GNU Lesser General Public License v3.0 6 votes vote down vote up
def _get_krige_mat(self):
        """Calculate the inverse matrix of the kriging equation."""
        size = self.cond_no + int(self.unbiased) + self.drift_no
        res = np.empty((size, size), dtype=np.double)
        res[: self.cond_no, : self.cond_no] = self.model.vario_nugget(
            self._get_dists(self._krige_pos)
        )
        if self.unbiased:
            res[self.cond_no, : self.cond_no] = 1
            res[: self.cond_no, self.cond_no] = 1
        for i, f in enumerate(self.drift_functions):
            drift_tmp = f(*self.cond_pos)
            res[-self.drift_no + i, : self.cond_no] = drift_tmp
            res[: self.cond_no, -self.drift_no + i] = drift_tmp
        res[self.cond_no :, self.cond_no :] = 0
        return inv(res) 
Example #18
Source File: test_graph_lasso.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_graph_lasso_cv(random_state=1):
    # Sample data from a sparse multivariate normal
    dim = 5
    n_samples = 6
    random_state = check_random_state(random_state)
    prec = make_sparse_spd_matrix(dim, alpha=.96,
                                  random_state=random_state)
    cov = linalg.inv(prec)
    X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
    # Capture stdout, to smoke test the verbose mode
    orig_stdout = sys.stdout
    try:
        sys.stdout = StringIO()
        # We need verbose very high so that Parallel prints on stdout
        GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
    finally:
        sys.stdout = orig_stdout

    # Smoke test with specified alphas
    GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X) 
Example #19
Source File: try_mlecov.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def mvn_loglike(x, sigma):
    '''loglike multivariate normal

    assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)

    brute force from formula
    no checking of correct inputs
    use of inv and log-det should be replace with something more efficient
    '''
    #see numpy thread
    #Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
    sigmainv = linalg.inv(sigma)
    logdetsigma = np.log(np.linalg.det(sigma))
    nobs = len(x)

    llf = - np.dot(x, np.dot(sigmainv, x))
    llf -= nobs * np.log(2 * np.pi)
    llf -= logdetsigma
    llf *= 0.5
    return llf 
Example #20
Source File: tStudentProcess.py    From pyGPGO with MIT License 6 votes vote down vote up
def fit(self, X, y):
        """
        Fits a t-Student Process regressor

        Parameters
        ----------
        X: np.ndarray, shape=(nsamples, nfeatures)
            Training instances to fit the GP.
        y: np.ndarray, shape=(nsamples,)
            Corresponding continuous target values to `X`.

        """
        self.X = X
        self.y = y
        self.n1 = X.shape[0]

        if self.optimize:
            self.optHyp(param_key=self.covfunc.parameters, param_bounds=self.covfunc.bounds)

        self.K11 = self.covfunc.K(self.X, self.X)
        self.beta1 = np.dot(np.dot(self.y.T, inv(self.K11)), self.y)
        self.logp = logpdf(self.y, self.nu, mu=np.zeros(self.n1), Sigma=self.K11) 
Example #21
Source File: test_0020_scipy_gmres.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def test_scipy_gmres_linop_parameter(self):
    """ This is a test on gmres method with a parameter-dependent linear operator """
    for omega in np.linspace(-10.0, 10.0, 10):
      for eps in np.linspace(-10.0, 10.0, 10):
        
        linop_param = linalg.aslinearoperator(vext2veff_c(omega, eps, n))
        
        Aparam = np.zeros((n,n), np.complex64)
        for i in range(n):
          uv = np.zeros(n, np.complex64); uv[i] = 1.0
          Aparam[:,i] = linop_param.matvec(uv)
        x_ref = np.dot(inv(Aparam), b)
    
        x_itr,info = linalg.lgmres(linop_param, b)
        derr = abs(x_ref-x_itr).sum()/x_ref.size
        self.assertLess(derr, 1e-6) 
Example #22
Source File: test_graphical_lasso.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_graphical_lasso_cv(random_state=1):
    # Sample data from a sparse multivariate normal
    dim = 5
    n_samples = 6
    random_state = check_random_state(random_state)
    prec = make_sparse_spd_matrix(dim, alpha=.96,
                                  random_state=random_state)
    cov = linalg.inv(prec)
    X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
    # Capture stdout, to smoke test the verbose mode
    orig_stdout = sys.stdout
    try:
        sys.stdout = StringIO()
        # We need verbose very high so that Parallel prints on stdout
        GraphicalLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
    finally:
        sys.stdout = orig_stdout

    # Smoke test with specified alphas
    GraphicalLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X) 
Example #23
Source File: test_graph_lasso.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_graph_lasso(random_state=0):
    # Sample data from a sparse multivariate normal
    dim = 20
    n_samples = 100
    random_state = check_random_state(random_state)
    prec = make_sparse_spd_matrix(dim, alpha=.95,
                                  random_state=random_state)
    cov = linalg.inv(prec)
    X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
    emp_cov = empirical_covariance(X)

    for alpha in (0., .1, .25):
        covs = dict()
        icovs = dict()
        for method in ('cd', 'lars'):
            cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
                                             return_costs=True)
            covs[method] = cov_
            icovs[method] = icov_
            costs, dual_gap = np.array(costs).T
            # Check that the costs always decrease (doesn't hold if alpha == 0)
            if not alpha == 0:
                assert_array_less(np.diff(costs), 0)
        # Check that the 2 approaches give similar results
        assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
        assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)

    # Smoke test the estimator
    model = GraphLasso(alpha=.25).fit(X)
    model.score(X)
    assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
    assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)

    # For a centered matrix, assume_centered could be chosen True or False
    # Check that this returns indeed the same result for centered data
    Z = X - X.mean(0)
    precs = list()
    for assume_centered in (False, True):
        prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
        precs.append(prec_)
    assert_array_almost_equal(precs[0], precs[1]) 
Example #24
Source File: test_gaussian_mixture.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_suffstat_sk_tied():
    # use equation Nk * Sk / N = S_tied
    rng = np.random.RandomState(0)
    n_samples, n_features, n_components = 500, 2, 2

    resp = rng.rand(n_samples, n_components)
    resp = resp / resp.sum(axis=1)[:, np.newaxis]
    X = rng.rand(n_samples, n_features)
    nk = resp.sum(axis=0)
    xk = np.dot(resp.T, X) / nk[:, np.newaxis]

    covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
    covars_pred_full = np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full,
                              0) / n_samples

    covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0)

    ecov = EmpiricalCovariance()
    ecov.covariance_ = covars_pred_full
    assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='frobenius'), 0)
    assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='spectral'), 0)

    # check the precision computation
    precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, 'tied')
    precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T)
    precs_est = linalg.inv(covars_pred_tied)
    assert_array_almost_equal(precs_est, precs_pred) 
Example #25
Source File: base.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def get_precision(self):
        """Compute data precision matrix with the generative model.

        Equals the inverse of the covariance but computed with
        the matrix inversion lemma for efficiency.

        Returns
        -------
        precision : array, shape=(n_features, n_features)
            Estimated precision of data.
        """
        n_features = self.components_.shape[1]

        # handle corner cases first
        if self.n_components_ == 0:
            return np.eye(n_features) / self.noise_variance_
        if self.n_components_ == n_features:
            return linalg.inv(self.get_covariance())

        # Get precision using matrix inversion lemma
        components_ = self.components_
        exp_var = self.explained_variance_
        if self.whiten:
            components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
        exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
        precision = np.dot(components_, components_.T) / self.noise_variance_
        precision.flat[::len(precision) + 1] += 1. / exp_var_diff
        precision = np.dot(components_.T,
                           np.dot(linalg.inv(precision), components_))
        precision /= -(self.noise_variance_ ** 2)
        precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
        return precision 
Example #26
Source File: factor_analysis.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def get_precision(self):
        """Compute data precision matrix with the FactorAnalysis model.

        Returns
        -------
        precision : array, shape (n_features, n_features)
            Estimated precision of data.
        """
        check_is_fitted(self, 'components_')

        n_features = self.components_.shape[1]

        # handle corner cases first
        if self.n_components == 0:
            return np.diag(1. / self.noise_variance_)
        if self.n_components == n_features:
            return linalg.inv(self.get_covariance())

        # Get precision using matrix inversion lemma
        components_ = self.components_
        precision = np.dot(components_ / self.noise_variance_, components_.T)
        precision.flat[::len(precision) + 1] += 1.
        precision = np.dot(components_.T,
                           np.dot(linalg.inv(precision), components_))
        precision /= self.noise_variance_[:, np.newaxis]
        precision /= -self.noise_variance_[np.newaxis, :]
        precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
        return precision 
Example #27
Source File: test_xrft.py    From xrft with MIT License 5 votes vote down vote up
def numpy_detrend(da):
    """
    Detrend a 2D field by subtracting out the least-square plane fit.

    Parameters
    ----------
    da : `numpy.array`
        The data to be detrended

    Returns
    -------
    da : `numpy.array`
        The detrended input data
    """
    N = da.shape

    G = np.ones((N[0]*N[1],3))
    for i in range(N[0]):
        G[N[1]*i:N[1]*i+N[1], 1] = i+1
        G[N[1]*i:N[1]*i+N[1], 2] = np.arange(1, N[1]+1)

    d_obs = np.reshape(da.copy(), (N[0]*N[1],1))
    m_est = np.dot(np.dot(spl.inv(np.dot(G.T, G)), G.T), d_obs)
    d_est = np.dot(G, m_est)

    lin_trend = np.reshape(d_est, N)

    return da - lin_trend 
Example #28
Source File: test_gaussian_mixture.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def __init__(self, rng, n_samples=500, n_components=2, n_features=2,
                 scale=50):
        self.n_samples = n_samples
        self.n_components = n_components
        self.n_features = n_features

        self.weights = rng.rand(n_components)
        self.weights = self.weights / self.weights.sum()
        self.means = rng.rand(n_components, n_features) * scale
        self.covariances = {
            'spherical': .5 + rng.rand(n_components),
            'diag': (.5 + rng.rand(n_components, n_features)) ** 2,
            'tied': make_spd_matrix(n_features, random_state=rng),
            'full': np.array([
                make_spd_matrix(n_features, random_state=rng) * .5
                for _ in range(n_components)])}
        self.precisions = {
            'spherical': 1. / self.covariances['spherical'],
            'diag': 1. / self.covariances['diag'],
            'tied': linalg.inv(self.covariances['tied']),
            'full': np.array([linalg.inv(covariance)
                             for covariance in self.covariances['full']])}

        self.X = dict(zip(COVARIANCE_TYPE, [generate_data(
            n_samples, n_features, self.weights, self.means, self.covariances,
            covar_type) for covar_type in COVARIANCE_TYPE]))
        self.Y = np.hstack([np.full(int(np.round(w * n_samples)), k,
                                    dtype=np.int)
                            for k, w in enumerate(self.weights)]) 
Example #29
Source File: factor_analysis.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def transform(self, X):
        """Apply dimensionality reduction to X using the model.

        Compute the expected mean of the latent variables.
        See Barber, 21.2.33 (or Bishop, 12.66).

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Training data.

        Returns
        -------
        X_new : array-like, shape (n_samples, n_components)
            The latent variables of X.
        """
        check_is_fitted(self, 'components_')

        X = check_array(X)
        Ih = np.eye(len(self.components_))

        X_transformed = X - self.mean_

        Wpsi = self.components_ / self.noise_variance_
        cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
        tmp = np.dot(X_transformed, Wpsi.T)
        X_transformed = np.dot(tmp, cov_z)

        return X_transformed 
Example #30
Source File: lpc.py    From Speech_Signal_Processing_and_Classification with MIT License 5 votes vote down vote up
def LPC_autocorrelation(order=13):
	#Takes in a signal and determines lpc coefficients(through autocorrelation method) and gain for inverse filter.
	folder = raw_input('Give the name of the folder that you want to read data: ')
	amount = raw_input('Give the number of samples in the specific folder: ')
	for x in range(1,int(amount)+1):
		wav = '/'+folder+'/'+str(x)+'.wav'
		print wav
		#preemhasis filter
		emphasizedSignal,signal,rate = preEmphasis(wav)
		length = emphasizedSignal.size
		#prepare the signal for autocorrelation , fast Fourier transform method
		autocorrelation = sig.fftconvolve(emphasizedSignal, emphasizedSignal[::-1])
		#autocorrelation method
		autocorr_coefficients = autocorrelation[autocorrelation.size/2:][:(order + 1)]
		
		
		#using levinson_durbin method instead of solving toeplitz
		lpc_coefficients_levinson = levinson_durbin(autocorr_coefficients,13)
		print 'With levinson_durbin instead of toeplitz ' , lpc_coefficients_levinson.numerator
		
		
		#The Toeplitz matrix has constant diagonals, with c as its first column and r as its first row. If r is not given
		R = linalg.toeplitz(autocorr_coefficients[:order])
		#Given a square matrix a, return the matrix ainv satisfying
		lpc_coefficients = np.dot(linalg.inv(R), autocorr_coefficients[1:order+1])
		#(Multiplicative) inverse of the matrix (inv),  Returns the dot product of a and b. If a and b are both scalars 
		#or both 1-D arrays then a scalar is returned; otherwise an array is returned. If out is given, then it is returned  (np.dot())
		lpc_features=[]
		for x in lpc_coefficients:
			lpc_features.append(x)
		print lpc_features