Python scipy.linalg.LinAlgError() Examples

The following are 30 code examples of scipy.linalg.LinAlgError(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.linalg , or try the search function .
Example #1
Source File: utils.py    From vnpy_crypto with MIT License 7 votes vote down vote up
def log_multivariate_normal_density(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices. """
    if hasattr(linalg, 'solve_triangular'):
        # only in scipy since 0.9
        solve_triangular = linalg.solve_triangular
    else:
        # slower, but works
        solve_triangular = linalg.solve
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probabily stuck in a component with too
            # few observations, we need to reinitialize this components
            cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                      lower=True)
        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) + \
                                     n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #2
Source File: gmm.py    From bhmm with GNU Lesser General Public License v3.0 7 votes vote down vote up
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices.
    """
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                      lower=True)
        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #3
Source File: mvnormal.py    From cgpm with Apache License 2.0 6 votes vote down vote up
def _covariance_factor(Sigma):
  # Assume it is positive-definite and try Cholesky decomposition.
  try:
    return Covariance_Cholesky(Sigma)
  except la.LinAlgError:
    pass

  # XXX In the past, we tried LU decomposition if, owing to
  # floating-point rounding error, the matrix is merely nonsingular,
  # not positive-definite.  However, empirically, that seemed to lead
  # to bad numerical results.  Until we have better numerical analysis
  # of the situation, let's try just falling back to least-squares
  # pseudoinverse approximation.

  # Otherwise, fall back to whatever heuristics scipy can manage.
  return Covariance_Loser(Sigma) 
Example #4
Source File: nonlin.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def solve(self, f, tol=0):
        dx = -self.alpha*f

        n = len(self.dx)
        if n == 0:
            return dx

        df_f = np.empty(n, dtype=f.dtype)
        for k in xrange(n):
            df_f[k] = vdot(self.df[k], f)

        try:
            gamma = solve(self.a, df_f)
        except LinAlgError:
            # singular; reset the Jacobian approximation
            del self.dx[:]
            del self.df[:]
            return dx

        for m in xrange(n):
            dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
        return dx 
Example #5
Source File: stats.py    From hmmlearn with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices."""
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            try:
                cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                          lower=True)
            except linalg.LinAlgError:
                raise ValueError("'covars' must be symmetric, "
                                 "positive-definite")

        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #6
Source File: gmm.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices."""
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            try:
                cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                          lower=True)
            except linalg.LinAlgError:
                raise ValueError("'covars' must be symmetric, "
                                 "positive-definite")

        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #7
Source File: model.py    From PyDeepGP with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def optimize(self, optimizer=None, start=None, **kwargs):
        self._IN_OPTIMIZATION_ = True
        if self.mpi_comm is None:
            super(DeepGP, self).optimize(optimizer,start,**kwargs)
        elif self.mpi_comm.rank==self.mpi_root:
            super(DeepGP, self).optimize(optimizer,start,**kwargs)
            self.mpi_comm.Bcast(np.int32(-1),root=self.mpi_root)
        elif self.mpi_comm.rank!=self.mpi_root:
            x = self.optimizer_array.copy()
            flag = np.empty(1,dtype=np.int32)
            while True:
                self.mpi_comm.Bcast(flag,root=self.mpi_root)
                if flag==1:
                    try:
                        self.optimizer_array = x
                    except (LinAlgError, ZeroDivisionError, ValueError):
                        pass
                elif flag==-1:
                    break
                else:
                    self._IN_OPTIMIZATION_ = False
                    raise Exception("Unrecognizable flag for synchronization!")
        self._IN_OPTIMIZATION_ = False 
Example #8
Source File: clustered_kde.py    From kombine with MIT License 6 votes vote down vote up
def __init__(self, data):
        self._data = np.atleast_2d(data)

        self._mean = np.mean(data, axis=0)
        self._cov = None

        if self.data.shape[0] > 1:
            try:
                self._cov = np.cov(data.T)

                # Try factoring now to see if regularization is needed
                la.cho_factor(self._cov)

            except la.LinAlgError:
                self._cov = oas_cov(data)

        self._set_bandwidth()

        # store transformation variables for drawing random values
        alphas = np.std(data, axis=0)
        ms = 1./alphas
        m_i, m_j = np.meshgrid(ms, ms)
        ms = m_i * m_j
        self._draw_cov = ms * self._kernel_cov
        self._scale_fac = alphas 
Example #9
Source File: cpu.py    From ggmm with MIT License 6 votes vote down vote up
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    '''Log probability for full covariance matrices.
    '''
    n_samples, n_dimensions = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dimensions),
                                      lower=True)
        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dimensions * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #10
Source File: nonlin.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def solve(self, f, tol=0):
        dx = -self.alpha*f

        n = len(self.dx)
        if n == 0:
            return dx

        df_f = np.empty(n, dtype=f.dtype)
        for k in xrange(n):
            df_f[k] = vdot(self.df[k], f)

        try:
            gamma = solve(self.a, df_f)
        except LinAlgError:
            # singular; reset the Jacobian approximation
            del self.dx[:]
            del self.df[:]
            return dx

        for m in xrange(n):
            dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
        return dx 
Example #11
Source File: nonlin.py    From Computable with MIT License 6 votes vote down vote up
def solve(self, f, tol=0):
        dx = -self.alpha*f

        n = len(self.dx)
        if n == 0:
            return dx

        df_f = np.empty(n, dtype=f.dtype)
        for k in xrange(n):
            df_f[k] = vdot(self.df[k], f)

        try:
            gamma = solve(self.a, df_f)
        except LinAlgError:
            # singular; reset the Jacobian approximation
            del self.dx[:]
            del self.df[:]
            return dx

        for m in xrange(n):
            dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
        return dx 
Example #12
Source File: nonlin.py    From lambda-packs with MIT License 6 votes vote down vote up
def solve(self, f, tol=0):
        dx = -self.alpha*f

        n = len(self.dx)
        if n == 0:
            return dx

        df_f = np.empty(n, dtype=f.dtype)
        for k in xrange(n):
            df_f[k] = vdot(self.df[k], f)

        try:
            gamma = solve(self.a, df_f)
        except LinAlgError:
            # singular; reset the Jacobian approximation
            del self.dx[:]
            del self.df[:]
            return dx

        for m in xrange(n):
            dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
        return dx 
Example #13
Source File: gmm.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
    """Log probability for full covariance matrices."""
    n_samples, n_dim = X.shape
    nmix = len(means)
    log_prob = np.empty((n_samples, nmix))
    for c, (mu, cv) in enumerate(zip(means, covars)):
        try:
            cv_chol = linalg.cholesky(cv, lower=True)
        except linalg.LinAlgError:
            # The model is most probably stuck in a component with too
            # few observations, we need to reinitialize this components
            try:
                cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
                                          lower=True)
            except linalg.LinAlgError:
                raise ValueError("'covars' must be symmetric, "
                                 "positive-definite")

        cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
        cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
        log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
                                 n_dim * np.log(2 * np.pi) + cv_log_det)

    return log_prob 
Example #14
Source File: smc_samplers.py    From particles with MIT License 5 votes vote down vote up
def __init__(self, x, scale=None, adaptive=True):
            if adaptive:
                if scale is None:
                    scale = 2.38 / np.sqrt(x.shape[1])
                cov = np.cov(x.T)
                try:
                    self.L = scale * cholesky(cov, lower=True)
                except LinAlgError:
                    self.L = scale * np.diag(np.sqrt(np.diag(cov)))
                    print('Warning: could not compute Cholesky decomposition, using diag matrix instead')
            else:
                if scale is None:
                    scale = 1.
                self.L = scale * np.eye(x.shape[1]) 
Example #15
Source File: mcmc.py    From particles with MIT License 5 votes vote down vote up
def update(self, v):
        """Adds point v"""
        self.t += 1
        g = self.gamma()
        self.mu = (1. - g) * self.mu + g * v
        mv = v - self.mu
        self.Sigma = ((1. - g) * self.Sigma
                      + g * np.dot(mv[:, np.newaxis], mv[np.newaxis, :]))
        try:
            self.L = cholesky(self.Sigma, lower=True)
        except LinAlgError:
            self.L = self.L0 
Example #16
Source File: test_decomp_update.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_economic_1_col_bad_update(self):
        # When the column to be added lies in the span of Q, the update is
        # not meaningful.  This is detected, and a LinAlgError is issued.
        q = np.eye(5, 3, dtype=self.dtype)
        r = np.eye(3, dtype=self.dtype)
        u = np.array([1, 0, 0, 0, 0], self.dtype)
        assert_raises(linalg.LinAlgError, qr_insert, q, r, u, 0, 'col')

    # for column adds to economic matrices there are three cases to test
    # eco + pcol --> eco
    # eco + pcol --> sqr
    # eco + pcol --> fat 
Example #17
Source File: utils.py    From choix with MIT License 5 votes vote down vote up
def statdist(generator):
    """Compute the stationary distribution of a Markov chain.

    Parameters
    ----------
    generator : array_like
        Infinitesimal generator matrix of the Markov chain.

    Returns
    -------
    dist : numpy.ndarray
        The unnormalized stationary distribution of the Markov chain.

    Raises
    ------
    ValueError
        If the Markov chain does not have a unique stationary distribution.
    """
    generator = np.asarray(generator)
    n = generator.shape[0]
    with warnings.catch_warnings():
        # The LU decomposition raises a warning when the generator matrix is
        # singular (which it, by construction, is!).
        warnings.filterwarnings("ignore")
        lu, piv = spl.lu_factor(generator.T, check_finite=False)
    # The last row contains 0's only.
    left = lu[:-1,:-1]
    right = -lu[:-1,-1]
    # Solves system `left * x = right`. Assumes that `left` is
    # upper-triangular (ignores lower triangle).
    try:
        res = spl.solve_triangular(left, right, check_finite=False)
    except:
        # Ideally we would like to catch `spl.LinAlgError` only, but there seems
        # to be a bug in scipy, in the code that raises the LinAlgError (!!).
        raise ValueError(
                "stationary distribution could not be computed. "
                "Perhaps the Markov chain has more than one absorbing class?")
    res = np.append(res, 1.0)
    return (n / res.sum()) * res 
Example #18
Source File: sgcrf.py    From sgcrfpy with MIT License 5 votes vote down vote up
def check_pd(A, lower=True):
    """
    Checks if A is PD.
    If so returns True and Cholesky decomposition,
    otherwise returns False and None
    """
    try:
        return True, np.tril(cho_factor(A, lower=lower)[0])
    except LinAlgError as err:
        if 'not positive definite' in str(err):
            return False, None 
Example #19
Source File: decompose_kavan.py    From skinning_decomposition_kavan with MIT License 5 votes vote down vote up
def optimize_transformations(verts0, W, C):
    X = form_X_matrix(verts0, W)
    try:
        Tr = linalg.solve(np.dot(X, X.T), np.dot(C, X.T).T).T
    except linalg.LinAlgError:
        print("singular matrix in optimize_transformations, using slower pseudo-inverse")
        Tr = np.dot(
            np.linalg.pinv(np.dot(X, X.T)),
            np.dot(C, X.T).T).T
    return Tr
    #T = np.dot(B, Tr)
    #T2 = linalg.solve(np.dot(X, X.T), np.dot(A, X.T).T).T 
Example #20
Source File: infinitephasescreen.py    From aotools with GNU Lesser General Public License v3.0 5 votes vote down vote up
def makeAMatrix(self):
        """
        Calculates the "A" matrix, that uses the existing data to find a new 
        component of the new phase vector.
        """
        # Cholsky solve can fail - if so do brute force inversion
        try:
            cf = linalg.cho_factor(self.cov_mat_zz)
            inv_cov_zz = linalg.cho_solve(cf, numpy.identity(self.cov_mat_zz.shape[0]))
        except linalg.LinAlgError:
            raise linalg.LinAlgError("Could not invert Covariance Matrix to for A and B Matrices. Try with a larger pixel scale")

        self.A_mat = self.cov_mat_xz.dot(inv_cov_zz) 
Example #21
Source File: _eigenpro.py    From scikit-learn-extra with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _nystrom_svd(self, X, n_components):
        """Compute the top eigensystem of a kernel
        operator using Nystrom method

        Parameters
        ----------
        X : {float, array}, shape = [n_subsamples, n_features]
            Subsample feature matrix.

        n_components : int
            Number of top eigencomponents to be restored.

        Returns
        -------
        E : {float, array}, shape = [k]
            Top eigenvalues.

        Lambda : {float, array}, shape = [n_subsamples, k]
            Top eigenvectors of a subsample kernel matrix (which can be
            directly used to approximate the eigenfunctions of the kernel
            operator).
        """
        m, _ = X.shape
        K = self._kernel(X, X)

        W = K / m
        try:
            E, Lambda = eigh(W, eigvals=(m - n_components, m - 1))
        except LinAlgError:
            # Use float64 when eigh fails due to precision
            W = np.float64(W)
            E, Lambda = eigh(W, eigvals=(m - n_components, m - 1))
            E, Lambda = np.float32(E), np.float32(Lambda)
        # Flip so eigenvalues are in descending order.
        E = np.maximum(np.float32(1e-7), np.flipud(E))
        Lambda = np.fliplr(Lambda)[:, :n_components] / np.sqrt(
            m, dtype="float32"
        )

        return E, Lambda 
Example #22
Source File: bayesian.py    From nni with MIT License 5 votes vote down vote up
def incremental_fit(self, train_x, train_y):
        """ Incrementally fit the regressor. """
        if not self._first_fitted:
            raise ValueError(
                "The first_fit function needs to be called first.")

        train_x, train_y = np.array(train_x), np.array(train_y)

        # Incrementally compute K
        up_right_k = edit_distance_matrix(self._x, train_x)
        down_left_k = np.transpose(up_right_k)
        down_right_k = edit_distance_matrix(train_x)
        up_k = np.concatenate((self._distance_matrix, up_right_k), axis=1)
        down_k = np.concatenate((down_left_k, down_right_k), axis=1)
        temp_distance_matrix = np.concatenate((up_k, down_k), axis=0)
        k_matrix = bourgain_embedding_matrix(temp_distance_matrix)
        diagonal = np.diag_indices_from(k_matrix)
        diagonal = (diagonal[0][-len(train_x):], diagonal[1][-len(train_x):])
        k_matrix[diagonal] += self.alpha

        try:
            self._l_matrix = cholesky(k_matrix, lower=True)  # Line 2
        except LinAlgError:
            return self

        self._x = np.concatenate((self._x, train_x), axis=0)
        self._y = np.concatenate((self._y, train_y), axis=0)
        self._distance_matrix = temp_distance_matrix

        self._alpha_vector = cho_solve(
            (self._l_matrix, True), self._y)  # Line 3

        return self 
Example #23
Source File: controller.py    From pybobyqa with GNU General Public License v3.0 5 votes vote down vote up
def geometry_step(self, knew, adelt, number_of_samples, params):
        if self.do_logging:
            logging.debug("Running geometry-fixing step")
        try:
            c, g, H = self.model.lagrange_polynomial(knew)  # based at xopt
            # Solve problem: bounds are sl <= xnew <= su, and ||xnew-xopt|| <= adelt
            xnew = trsbox_geometry(self.model.xopt(), c, g, H, self.model.sl, self.model.su, adelt)
        except LA.LinAlgError:
            exit_info = ExitInformation(EXIT_LINALG_ERROR, "Singular matrix encountered in geometry step")
            return exit_info  # didn't fix geometry - return & quit

        gopt, H = self.model.build_full_model()  # save here, to calculate predicted value from geometry step
        fopt = self.model.fopt()  # again, evaluate now, before model.change_point()
        d = xnew - self.model.xopt()
        x = self.model.as_absolute_coordinates(xnew)
        f_list, num_samples_run, exit_info = self.evaluate_objective(x, number_of_samples, params)

        # Handle exit conditions (f < min obj value or maxfun reached)
        if exit_info is not None:
            if num_samples_run > 0:
                self.model.save_point(x, np.mean(f_list[:num_samples_run]), num_samples_run, x_in_abs_coords=True)
            return exit_info  # didn't fix geometry - return & quit

        # Otherwise, add new results
        self.model.change_point(knew, xnew, f_list[0])  # expect step, not absolute x
        for i in range(1, num_samples_run):
            self.model.add_new_sample(knew, f_extra=f_list[i])

        # Estimate actual reduction to add to diffs vector
        f = np.mean(f_list[:num_samples_run])  # estimate actual objective value

        # pred_reduction = - calculate_model_value(gopt, H, d)
        pred_reduction = - model_value(gopt, H, d)
        actual_reduction = fopt - f
        self.diffs = [abs(pred_reduction - actual_reduction), self.diffs[0], self.diffs[1]]
        return None  # exit_info = None 
Example #24
Source File: controller.py    From pybobyqa with GNU General Public License v3.0 5 votes vote down vote up
def choose_point_to_replace(self, d, skip_kopt=True):
        delsq = self.delta ** 2
        scaden = None
        knew = None  # may knew never be set here?
        exit_info = None

        for k in range(self.model.npt()):
            if skip_kopt and k == self.model.kopt:
                continue  # skip this k

            # Build Lagrange polynomial
            try:
                c, g, hess = self.model.lagrange_polynomial(k)  # based at xopt
            except LA.LinAlgError:
                exit_info = ExitInformation(EXIT_LINALG_ERROR, "Singular matrix when choosing point to replace")
                break  # end & quit

            den = c + model_value(g, hess, d)

            distsq = sumsq(self.model.xpt(k) - self.model.xopt())
            temp = max(1.0, (distsq / delsq) ** 2)
            if scaden is None or temp * abs(den) > scaden:
                scaden = temp * abs(den)
                knew = k

        return knew, exit_info 
Example #25
Source File: beamformers.py    From beamformers with MIT License 5 votes vote down vote up
def mb_mvdr_weights(mixture_stft, mask_noise, mask_target, phase_correct=False):
    """
    Calculates the MB MVDR weights in frequency domain
    :param mixture_stft: nd_array (channels, freq_bins, time)
    :param mask_noise: 2d array (freq_bins, time)
    :param mask_target: 2d array (freq_bins, time)
    :param phase_correct: whether or not to phase correct (see https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471664)
    :return: the gev weights: 2d array (freq_bins, channels)
    """
    C, F, T = mixture_stft.shape  # (channels, freq_bins, time)

    # covariance matrices
    cov_noise = get_power_spectral_density_matrix(mixture_stft.transpose(1, 0, 2), mask_noise, normalize=False)
    cov_speech = get_power_spectral_density_matrix(mixture_stft.transpose(1, 0, 2), mask_target, normalize=True)
    cov_noise = condition_covariance(cov_noise, 1e-6)
    cov_noise /= np.trace(cov_noise, axis1=-2, axis2=-1)[..., None, None] + 1e-15

    h = []
    for f in range(F):
        try:
            _cov_noise = cov_noise[f]
            _cov_speech = cov_speech[f]

            # mask-based MVDR
            _G = solve(_cov_noise, _cov_speech)
            _lambda = np.trace(_G) + 1e-15
            h_r = (1. / _lambda) * _G[:, 0]
            h.append(h_r)

        except LinAlgError:  # just a precaution if the solve does not work
            h.append(np.ones((C,)) + 1j * np.ones((C,)))

    w = np.array(h)
    if phase_correct:
        w = phase_correction(w)
    return w 
Example #26
Source File: beamformers.py    From beamformers with MIT License 5 votes vote down vote up
def mb_gev_weights(mixture_stft, mask_noise, mask_target, phase_correct=False):
    """
    Calculates the MB GEV weights in frequency domain
    :param mixture_stft: nd_array (channels, freq_bins, time)
    :param mask_noise: 2d array (freq_bins, time)
    :param mask_target: 2d array (freq_bins, time)
    :param phase_correct: whether or not to phase correct (see https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7471664)
    :return: the gev weights: 2d array (freq_bins, channels)
    """
    C, F, T = mixture_stft.shape  # (channels, freq_bins, time)

    # covariance matrices
    cov_noise = get_power_spectral_density_matrix(mixture_stft.transpose(1, 0, 2), mask_noise, normalize=True)
    cov_speech = get_power_spectral_density_matrix(mixture_stft.transpose(1, 0, 2), mask_target, normalize=False)
    cov_noise = condition_covariance(cov_noise, 1e-6)
    cov_noise /= np.trace(cov_noise, axis1=-2, axis2=-1)[..., None, None] + 1e-15

    h = []
    for f in range(F):
        try:
            _cov_noise = cov_noise[f]
            _cov_speech = cov_speech[f]

            # mask-based GEV
            [_d, _v] = eigh(_cov_speech, _cov_noise)
            h.append(_v[:, -1])

        except LinAlgError:  # just a precaution if the solve does not work
            h.append(np.ones((C,)) + 1j * np.ones((C,)))
            print("error")

    w = np.array(h)
    if phase_correct:
        w = phase_correction(w)
    return w 
Example #27
Source File: gaussian_mixture.py    From twitter-stock-recommendation with MIT License 4 votes vote down vote up
def _compute_precision_cholesky(covariances, covariance_type):
    """Compute the Cholesky decomposition of the precisions.

    Parameters
    ----------
    covariances : array-like
        The covariance matrix of the current components.
        The shape depends of the covariance_type.

    covariance_type : {'full', 'tied', 'diag', 'spherical'}
        The type of precision matrices.

    Returns
    -------
    precisions_cholesky : array-like
        The cholesky decomposition of sample precisions of the current
        components. The shape depends of the covariance_type.
    """
    estimate_precision_error_message = (
        "Fitting the mixture model failed because some components have "
        "ill-defined empirical covariance (for instance caused by singleton "
        "or collapsed samples). Try to decrease the number of components, "
        "or increase reg_covar.")

    if covariance_type in 'full':
        n_components, n_features, _ = covariances.shape
        precisions_chol = np.empty((n_components, n_features, n_features))
        for k, covariance in enumerate(covariances):
            try:
                cov_chol = linalg.cholesky(covariance, lower=True)
            except linalg.LinAlgError:
                raise ValueError(estimate_precision_error_message)
            precisions_chol[k] = linalg.solve_triangular(cov_chol,
                                                         np.eye(n_features),
                                                         lower=True).T
    elif covariance_type == 'tied':
        _, n_features = covariances.shape
        try:
            cov_chol = linalg.cholesky(covariances, lower=True)
        except linalg.LinAlgError:
            raise ValueError(estimate_precision_error_message)
        precisions_chol = linalg.solve_triangular(cov_chol, np.eye(n_features),
                                                  lower=True).T
    else:
        if np.any(np.less_equal(covariances, 0.0)):
            raise ValueError(estimate_precision_error_message)
        precisions_chol = 1. / np.sqrt(covariances)
    return precisions_chol


###############################################################################
# Gaussian mixture probability estimators 
Example #28
Source File: ridge.py    From twitter-stock-recommendation with MIT License 4 votes vote down vote up
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
    # dual_coef = inv(X X^t + alpha*Id) y
    n_samples = K.shape[0]
    n_targets = y.shape[1]

    if copy:
        K = K.copy()

    alpha = np.atleast_1d(alpha)
    one_alpha = (alpha == alpha[0]).all()
    has_sw = isinstance(sample_weight, np.ndarray) \
        or sample_weight not in [1.0, None]

    if has_sw:
        # Unlike other solvers, we need to support sample_weight directly
        # because K might be a pre-computed kernel.
        sw = np.sqrt(np.atleast_1d(sample_weight))
        y = y * sw[:, np.newaxis]
        K *= np.outer(sw, sw)

    if one_alpha:
        # Only one penalty, we can solve multi-target problems in one time.
        K.flat[::n_samples + 1] += alpha[0]

        try:
            # Note: we must use overwrite_a=False in order to be able to
            #       use the fall-back solution below in case a LinAlgError
            #       is raised
            dual_coef = linalg.solve(K, y, sym_pos=True,
                                     overwrite_a=False)
        except np.linalg.LinAlgError:
            warnings.warn("Singular matrix in solving dual problem. Using "
                          "least-squares solution instead.")
            dual_coef = linalg.lstsq(K, y)[0]

        # K is expensive to compute and store in memory so change it back in
        # case it was user-given.
        K.flat[::n_samples + 1] -= alpha[0]

        if has_sw:
            dual_coef *= sw[:, np.newaxis]

        return dual_coef
    else:
        # One penalty per target. We need to solve each target separately.
        dual_coefs = np.empty([n_targets, n_samples], K.dtype)

        for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
            K.flat[::n_samples + 1] += current_alpha

            dual_coef[:] = linalg.solve(K, target, sym_pos=True,
                                        overwrite_a=False).ravel()

            K.flat[::n_samples + 1] -= current_alpha

        if has_sw:
            dual_coefs *= sw[np.newaxis, :]

        return dual_coefs.T 
Example #29
Source File: pwlf.py    From piecewise_linear_fit_py with MIT License 4 votes vote down vote up
def r_squared(self):
        r"""
        Calculate the coefficient of determination ("R squared", R^2) value
        after a fit has been performed.
        For more information see:
        https://en.wikipedia.org/wiki/Coefficient_of_determination

        Returns
        -------
        rsq : float
            Coefficient of determination, or 'R squared' value.

        Raises
        ------
        AttributeError
            You have probably not performed a fit yet.
        LinAlgError
            This typically means your regression problem is ill-conditioned.

        Examples
        --------
        Calculate the R squared value after performing a simple fit.

        >>> import pwlf
        >>> x = np.linspace(0.0, 1.0, 10)
        >>> y = np.random.random(10)
        >>> my_pwlf = pwlf.PiecewiseLinFit(x, y)
        >>> breaks = my_pwlf.fitfast(3)
        >>> rsq = my_pwlf.r_squared()

        """
        if self.fit_breaks is None:
            errmsg = 'You do not have any beta parameters. You must perform' \
                     ' a fit before using standard_errors().'
            raise AttributeError(errmsg)
        ssr = self.fit_with_breaks(self.fit_breaks)
        ybar = np.ones(self.n_data) * np.mean(self.y_data)
        ydiff = self.y_data - ybar
        try:
            sst = np.dot(ydiff, ydiff)
            rsq = 1.0 - (ssr/sst)
            return rsq
        except linalg.LinAlgError:
            raise linalg.LinAlgError('Singular matrix') 
Example #30
Source File: base.py    From dislib with Apache License 2.0 4 votes vote down vote up
def _compute_precision_cholesky(covariances, covariance_type):
    """Compute the Cholesky decomposition of the precisions.

    Parameters
    ----------
    covariances : array-like
        The covariance matrix of the current components.
        The shape depends of the covariance_type.

    covariance_type : {'full', 'tied', 'diag', 'spherical'}
        The type of precision matrices.

    Returns
    -------
    precisions_cholesky : array-like
        The cholesky decomposition of sample precisions of the current
        components. The shape depends of the covariance_type.
    """
    estimate_precision_error_message = (
        "Fitting the mixture model failed because some components have "
        "ill-defined empirical covariance (for instance caused by singleton "
        "or collapsed samples). Try to decrease the number of components, "
        "or increase reg_covar.")

    if covariance_type in 'full':
        n_components, n_features, _ = covariances.shape
        precisions_chol = np.empty((n_components, n_features, n_features))
        for k, covariance in enumerate(covariances):
            try:
                cov_chol = linalg.cholesky(covariance, lower=True)
            except linalg.LinAlgError:
                raise ValueError(estimate_precision_error_message)
            precisions_chol[k] = linalg.solve_triangular(cov_chol,
                                                         np.eye(n_features),
                                                         lower=True).T
    elif covariance_type == 'tied':
        _, n_features = covariances.shape
        try:
            cov_chol = linalg.cholesky(covariances, lower=True)
        except linalg.LinAlgError:
            raise ValueError(estimate_precision_error_message)
        precisions_chol = linalg.solve_triangular(cov_chol,
                                                  np.eye(n_features),
                                                  lower=True).T
    else:
        if np.any(np.less_equal(covariances, 0.0)):
            raise ValueError(estimate_precision_error_message)
        precisions_chol = 1. / np.sqrt(covariances)
    return precisions_chol