Python numpy.linalg() Examples

The following are code examples for showing how to use numpy.linalg(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the ones you don't like. You can also save this page to your account.

Example 1
Project: camera_calibration_frontend   Author: groundmelon   File: calibrator.py    (license) View Source Project 7 votes vote down vote up
def _get_skew(corners, board):
    """
    Get skew for given checkerboard detection.
    Scaled to [0,1], which 0 = no skew, 1 = high skew
    Skew is proportional to the divergence of three outside corners from 90 degrees.
    """
    # TODO Using three nearby interior corners might be more robust, outside corners occasionally
    # get mis-detected
    up_left, up_right, down_right, _ = _get_outside_corners(corners, board)

    def angle(a, b, c):
        """
        Return angle between lines ab, bc
        """
        ab = a - b
        cb = c - b
        return math.acos(numpy.dot(ab,cb) / (numpy.linalg.norm(ab) * numpy.linalg.norm(cb)))

    skew = min(1.0, 2. * abs((math.pi / 2.) - angle(up_left, up_right, down_right)))
    return skew 
Example 2
Project: neurotools   Author: michaelerule   File: matrix.py    (license) View Source Project 7 votes vote down vote up
def logdet(C,eps=1e-6,safe=0):
    '''
    Logarithm of the determinant of a matrix
    Works only with real-valued positive definite matrices
    '''
    try:
        return 2.0*np.sum(np.log(np.diag(chol(C))))
    except numpy.linalg.linalg.LinAlgError:
        if safe: C = check_covmat(C,eps=eps)
        w = np.linalg.eigh(C)[0]
        w = np.real(w)
        w[w<eps]=eps
        det = np.sum(np.log(w))
        return det 
Example 3
Project: FermiLib   Author: ProjectQ-Framework   File: _sparse_tools.py    (license) View Source Project 6 votes vote down vote up
def get_ground_state(sparse_operator):
    """Compute lowest eigenvalue and eigenstate.

    Returns:
        eigenvalue: The lowest eigenvalue, a float.
        eigenstate: The lowest eigenstate in scipy.sparse csc format.
    """
    if not is_hermitian(sparse_operator):
        raise ValueError('sparse_operator must be Hermitian.')

    values, vectors = scipy.sparse.linalg.eigsh(
        sparse_operator, 2, which='SA', maxiter=1e7)

    eigenstate = scipy.sparse.csc_matrix(vectors[:, 0])
    eigenvalue = values[0]
    return eigenvalue, eigenstate.getH() 
Example 4
Project: maze-builder   Author: kcsaff   File: mesh.py    (license) View Source Project 6 votes vote down vote up
def __init__(self, mesh, transform_out=None, transform_in=None):
        transform_out = numpy.matrix(transform_out) if transform_out is not None else None
        transform_in = numpy.matrix(transform_in) if transform_in is not None else None

        if transform_in is None and transform_out is None:
            transform_in = numpy.identity(3)
            transform_out = numpy.identity(3)
        elif transform_in is None:
            try:
                transform_in = numpy.linalg.inv(transform_out)
            except:
                transform_in = None
        elif transform_out is None:
            try:
                transform_out = numpy.linalg.inv(transform_in)
            except:
                transform_out = None

        self.transform_out, self.transform_in = transform_out, transform_in

        super().__init__(
            mesh,
            warp_in=lambda vertex: self.transform_in.dot(vertex).tolist()[0][:3] if self.transform_in else None,
            warp_out=lambda vertex: self.transform_out.dot(vertex).tolist()[0][:3] if self.transform_out else None,
        ) 
Example 5
Project: gail-driver   Author: sisl   File: cma_es_lib.py    (license) View Source Project 6 votes vote down vote up
def likelihood(x, m=None, Cinv=None, sigma=1, detC=None):
        """return likelihood of x for the normal density N(m, sigma**2 * Cinv**-1)"""
        # testing: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
        # for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0],
        # None, 3) for r in rand(10000,dim)]) * 20**dim
        if m is None:
            dx = x
        else:
            dx = x - m  # array(x) - array(m)
        n = len(x)
        s2pi = (2 * np.pi)**(n / 2.)
        if Cinv is None:
            return exp(-sum(dx**2) / sigma**2 / 2) / s2pi / sigma**n
        if detC is None:
            detC = 1. / np.linalg.linalg.det(Cinv)
        return exp(-np.dot(dx, np.dot(Cinv, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n 
Example 6
Project: Theano-Deep-learning   Author: GeekLiB   File: test_nlinalg.py    (license) View Source Project 6 votes vote down vote up
def test_pseudoinverse_correctness():
    rng = numpy.random.RandomState(utt.fetch_seed())
    d1 = rng.randint(4) + 2
    d2 = rng.randint(4) + 2
    r = rng.randn(d1, d2).astype(theano.config.floatX)

    x = tensor.matrix()
    xi = pinv(x)

    ri = function([x], xi)(r)
    assert ri.shape[0] == r.shape[1]
    assert ri.shape[1] == r.shape[0]
    assert ri.dtype == r.dtype
    # Note that pseudoinverse can be quite unprecise so I prefer to compare
    # the result with what numpy.linalg returns
    assert _allclose(ri, numpy.linalg.pinv(r)) 
Example 7
Project: Theano-Deep-learning   Author: GeekLiB   File: test_nlinalg.py    (license) View Source Project 6 votes vote down vote up
def test_numpy_compare(self):
        rng = numpy.random.RandomState(utt.fetch_seed())

        M = tensor.matrix("A", dtype=theano.config.floatX)
        V = tensor.vector("V", dtype=theano.config.floatX)

        a = rng.rand(4, 4).astype(theano.config.floatX)
        b = rng.rand(4).astype(theano.config.floatX)

        A = (   [None, 'fro', 'inf', '-inf', 1, -1, None, 'inf', '-inf', 0, 1, -1, 2, -2],
                [M, M, M, M, M, M, V, V, V, V, V, V, V, V],
                [a, a, a, a, a, a, b, b, b, b, b, b, b, b],
                [None, 'fro', inf, -inf, 1, -1, None, inf, -inf, 0, 1, -1, 2, -2])

        for i in range(0, 14):
            f = function([A[1][i]], norm(A[1][i], A[0][i]))
            t_n = f(A[2][i])
            n_n = numpy.linalg.norm(A[2][i], A[3][i])
            assert _allclose(n_n, t_n) 
Example 8
Project: Theano-Deep-learning   Author: GeekLiB   File: test_nlinalg.py    (license) View Source Project 6 votes vote down vote up
def test_eval(self):
        A = self.A
        Ai = tensorinv(A)
        n_ainv = numpy.linalg.tensorinv(self.a)
        tf_a = function([A], [Ai])
        t_ainv = tf_a(self.a)
        assert _allclose(n_ainv, t_ainv)

        B = self.B
        Bi = tensorinv(B)
        Bi1 = tensorinv(B, ind=1)
        n_binv = numpy.linalg.tensorinv(self.b)
        n_binv1 = numpy.linalg.tensorinv(self.b1, ind=1)
        tf_b = function([B], [Bi])
        tf_b1 = function([B], [Bi1])
        t_binv = tf_b(self.b)
        t_binv1 = tf_b1(self.b1)
        assert _allclose(t_binv, n_binv)
        assert _allclose(t_binv1, n_binv1) 
Example 9
Project: Theano-Deep-learning   Author: GeekLiB   File: test_slinalg.py    (license) View Source Project 6 votes vote down vote up
def test_perform(self):
        if not imported_scipy:
            raise SkipTest('kron tests need the scipy package to be installed')

        for shp0 in [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]:
            x = tensor.tensor(dtype='floatX',
                              broadcastable=(False,) * len(shp0))
            a = numpy.asarray(self.rng.rand(*shp0)).astype(config.floatX)
            for shp1 in [(6,), (6, 7), (6, 7, 8), (6, 7, 8, 9)]:
                if len(shp0) + len(shp1) == 2:
                    continue
                y = tensor.tensor(dtype='floatX',
                                  broadcastable=(False,) * len(shp1))
                f = function([x, y], kron(x, y))
                b = self.rng.rand(*shp1).astype(config.floatX)
                out = f(a, b)
                # Newer versions of scipy want 4 dimensions at least,
                # so we have to add a dimension to a and flatten the result.
                if len(shp0) + len(shp1) == 3:
                    scipy_val = scipy.linalg.kron(
                        a[numpy.newaxis, :], b).flatten()
                else:
                    scipy_val = scipy.linalg.kron(a, b)
                utt.assert_allclose(out, scipy_val) 
Example 10
Project: neurotools   Author: michaelerule   File: matrix.py    (license) View Source Project 6 votes vote down vote up
def real_eig(M,eps=1e-9):
    '''
    This code expects a real hermetian matrix
    and should throw a ValueError if not.
    This is probably redundant to the scipy eigh function.
    Do not use.
    '''
    if not (type(M)==np.ndarray):
        raise ValueError("Expected array; type is %s"%type(M))
    if np.any(np.abs(np.imag(M))>eps):
        raise ValueError("Matrix has imaginary values >%0.2e; will not extract real eigenvalues"%eps)
    M = np.real(M)
    w,v = np.linalg.eig(M)
    if np.any(abs(np.imag(w))>eps):
        raise ValueError('Eigenvalues with imaginary part >%0.2e; matrix has complex eigenvalues'%eps)
    w = np.real(w)
    order = np.argsort(w)
    w = w[order]
    v = v[:,order]
    return w,v 
Example 11
Project: neurotools   Author: michaelerule   File: matrix.py    (license) View Source Project 6 votes vote down vote up
def _getAplus(A):
    '''
    Please see the documentation for nearPDHigham
    '''
    eigval, eigvec = np.linalg.eig(A)
    Q = np.matrix(eigvec)
    xdiag = np.matrix(np.diag(np.maximum(eigval, 0)))
    return Q*xdiag*Q.T 
Example 12
Project: composability_bench   Author: IntelPython   File: bench_stats.py    (MIT License) View Source Project 5 votes vote down vote up
def bench_on(runner, sym, Ns, trials, dtype=None):
    global args, kernel, out, mkl_layer
    prepare = globals().get("prepare_"+sym, prepare_default)
    kernel  = globals().get("kernel_"+sym, None)
    if not kernel:
       kernel = getattr(np.linalg, sym)
    out_lvl = runner.__doc__.split('.')[0].strip()
    func_s  = kernel.__doc__.split('.')[0].strip()
    log.debug('Preparing input data for %s (%s).. ' % (sym, func_s))
    args = [prepare(int(i)) for i in Ns]
    it = range(len(Ns))
    # pprint(Ns)
    out = np.empty(shape=(len(Ns), trials))
    b = body(trials)
    tic, toc = (0, 0)
    log.debug('Warming up %s (%s).. ' % (sym, func_s))
    runner(range(1000), empty_work)
    kernel(*args[0])
    runner(range(1000), empty_work)
    log.debug('Benchmarking %s on %s: ' % (func_s, out_lvl))
    gc_old = gc.isenabled()
#    gc.disable()
    tic = time.time()
    runner(it, b)
    toc = time.time() - tic
    if gc_old:
        gc.enable()
    if 'reused_pool' in globals():
        del globals()['reused_pool']

    #calculate average time and min time and also keep track of outliers (max time in the loop)
    min_time = np.amin(out)
    max_time = np.amax(out)
    mean_time = np.mean(out)
    stdev_time = np.std(out)

    #print("Min = %.5f, Max = %.5f, Mean = %.5f, stdev = %.5f " % (min_time, max_time, mean_time, stdev_time))
    #final_times = [min_time, max_time, mean_time, stdev_time]

    print('## %s: Outter:%s, Inner:%s, Wall seconds:%f\n' % (sym, out_lvl, mkl_layer, float(toc)))
    return out 
Example 13
Project: spyking-circus   Author: spyking-circus   File: utils.py    (license) View Source Project 5 votes vote down vote up
def get_whitening_matrix(X, fudge=1E-18):
   from numpy.linalg import eigh
   Xcov = numpy.dot(X.T, X)/X.shape[0]
   d,V  = eigh(Xcov)
   D    = numpy.diag(1./numpy.sqrt(d+fudge))
   W    = numpy.dot(numpy.dot(V,D), V.T)
   return W 
Example 14
Project: spyking-circus   Author: spyking-circus   File: utils.py    (license) View Source Project 5 votes vote down vote up
def get_precision(self):
        """Compute data precision matrix with the generative model.
        Equals the inverse of the covariance but computed with
        the matrix inversion lemma for efficiency.
        Returns
        -------
        precision : array, shape=(n_features, n_features)
            Estimated precision of data.
        """
        n_features = self.components_.shape[1]

        # handle corner cases first
        if self.n_components_ == 0:
            return np.eye(n_features) / self.noise_variance_
        if self.n_components_ == n_features:
            return linalg.inv(self.get_covariance())

        # Get precision using matrix inversion lemma
        components_ = self.components_
        exp_var = self.explained_variance_
        exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
        precision = np.dot(components_, components_.T) / self.noise_variance_
        precision.flat[::len(precision) + 1] += 1. / exp_var_diff
        precision = np.dot(components_.T,
                           np.dot(linalg.inv(precision), components_))
        precision /= -(self.noise_variance_ ** 2)
        precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
        return precision 
Example 15
Project: pycma   Author: CMA-ES   File: sampler.py    (license) View Source Project 5 votes vote down vote up
def __init__(self, dimension,
                 lazy_update_gap=0,
                 constant_trace='',
                 randn=np.random.randn,
                 eigenmethod=np.linalg.eigh):
        try:
            self.dimension = len(dimension)
            standard_deviations = np.asarray(dimension)
        except TypeError:
            self.dimension = dimension
            standard_deviations = np.ones(dimension)
        assert len(standard_deviations) == self.dimension

        # prevent equal eigenvals, a hack for np.linalg:
        self.C = np.diag(standard_deviations**2
                    * np.exp((1e-4 / self.dimension) *
                             np.arange(self.dimension)))
        "covariance matrix"
        self.lazy_update_gap = lazy_update_gap
        self.constant_trace = constant_trace
        self.randn = randn
        self.eigenmethod = eigenmethod
        self.B = np.eye(self.dimension)
        "columns, B.T[i] == B[:, i], are eigenvectors of C"
        self.D = np.diag(self.C)**0.5  # we assume that C is yet diagonal
        idx = self.D.argsort()
        self.D = self.D[idx]
        self.B = self.B[:, idx]
        "axis lengths, roots of eigenvalues, sorted"
        self._inverse_root_C = None  # see transform_inv...
        self.last_update = 0
        self.count_tell = 0
        self.count_eigen = 0 
Example 16
Project: SLP-Annotator   Author: PhonologicalCorpusTools   File: applyHandCode.py    (GNU General Public License v3.0) View Source Project 5 votes vote down vote up
def rotation_from_matrix(matrix):
    """Return rotation angle and axis from rotation matrix.

    >>> angle = (random.random() - 0.5) * (2*math.pi)
    >>> direc = numpy.random.random(3) - 0.5
    >>> point = numpy.random.random(3) - 0.5
    >>> R0 = rotation_matrix(angle, direc, point)
    >>> angle, direc, point = rotation_from_matrix(R0)
    >>> R1 = rotation_matrix(angle, direc, point)
    >>> is_same_transform(R0, R1)
    True

    """
    R = numpy.array(matrix, dtype=numpy.float64, copy=False)
    R33 = R[:3, :3]
    # direction: unit eigenvector of R33 corresponding to eigenvalue of 1
    w, W = numpy.linalg.eig(R33.T)
    i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
    if not len(i):
        raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
    direction = numpy.real(W[:, i[-1]]).squeeze()
    # point: unit eigenvector of R33 corresponding to eigenvalue of 1
    w, Q = numpy.linalg.eig(R)
    i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
    if not len(i):
        raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
    point = numpy.real(Q[:, i[-1]]).squeeze()
    point /= point[3]
    # rotation angle depending on direction
    cosa = (numpy.trace(R33) - 1.0) / 2.0
    if abs(direction[2]) > 1e-8:
        sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
    elif abs(direction[1]) > 1e-8:
        sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
    else:
        sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
    angle = math.atan2(sina, cosa)
    return angle, direction, point

# Function to translate handshape coding to degrees of rotation, adduction, flexion 
Example 17
Project: SLP-Annotator   Author: PhonologicalCorpusTools   File: position_hand.py    (GNU General Public License v3.0) View Source Project 5 votes vote down vote up
def vector_norm(data, axis=None, out=None):
    """Return length, i.e. Euclidean norm, of ndarray along axis.

    >>> v = numpy.random.random(3)
    >>> n = vector_norm(v)
    >>> numpy.allclose(n, numpy.linalg.norm(v))
    True
    >>> v = numpy.random.rand(6, 5, 3)
    >>> n = vector_norm(v, axis=-1)
    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
    True
    >>> n = vector_norm(v, axis=1)
    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
    True
    >>> v = numpy.random.rand(5, 4, 3)
    >>> n = numpy.empty((5, 3))
    >>> vector_norm(v, axis=1, out=n)
    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
    True
    >>> vector_norm([])
    0.0
    >>> vector_norm([1])
    1.0

    """
    data = numpy.array(data, dtype=numpy.float64, copy=True)
    if out is None:
        if data.ndim == 1:
            return math.sqrt(numpy.dot(data, data))
        data *= data
        out = numpy.atleast_1d(numpy.sum(data, axis=axis))
        numpy.sqrt(out, out)
        return out
    else:
        data *= data
        numpy.sum(data, axis=axis, out=out)
        numpy.sqrt(out, out) 
Example 18
Project: SLP-Annotator   Author: PhonologicalCorpusTools   File: position_hand.py    (GNU General Public License v3.0) View Source Project 5 votes vote down vote up
def rotation_from_matrix(matrix):
    """Return rotation angle and axis from rotation matrix.

    >>> angle = (random.random() - 0.5) * (2*math.pi)
    >>> direc = numpy.random.random(3) - 0.5
    >>> point = numpy.random.random(3) - 0.5
    >>> R0 = rotation_matrix(angle, direc, point)
    >>> angle, direc, point = rotation_from_matrix(R0)
    >>> R1 = rotation_matrix(angle, direc, point)
    >>> is_same_transform(R0, R1)
    True

    """
    R = numpy.array(matrix, dtype=numpy.float64, copy=False)
    R33 = R[:3, :3]
    # direction: unit eigenvector of R33 corresponding to eigenvalue of 1
    w, W = numpy.linalg.eig(R33.T)
    i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
    if not len(i):
        raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
    direction = numpy.real(W[:, i[-1]]).squeeze()
    # point: unit eigenvector of R33 corresponding to eigenvalue of 1
    w, Q = numpy.linalg.eig(R)
    i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
    if not len(i):
        raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
    point = numpy.real(Q[:, i[-1]]).squeeze()
    point /= point[3]
    # rotation angle depending on direction
    cosa = (numpy.trace(R33) - 1.0) / 2.0
    if abs(direction[2]) > 1e-8:
        sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
    elif abs(direction[1]) > 1e-8:
        sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
    else:
        sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
    angle = math.atan2(sina, cosa)
    return angle, direction, point

# Function to translate handshape coding to degrees of rotation, adduction, flexion 
Example 19
Project: third_person_im   Author: bstadie   File: cma_es_lib.py    (license) View Source Project 5 votes vote down vote up
def expms(A, eig=np.linalg.eigh):
            """matrix exponential for a symmetric matrix"""
            # TODO: check that this works reliably for low rank matrices
            # first: symmetrize A
            D, B = eig(A)
            return np.dot(B, (np.exp(D) * B).T) 
Example 20
Project: third_person_im   Author: bstadie   File: cma_es_lib.py    (license) View Source Project 5 votes vote down vote up
def likelihood(x, m=None, Cinv=None, sigma=1, detC=None):
        """return likelihood of x for the normal density N(m, sigma**2 * Cinv**-1)"""
        # testing: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
        # for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
        if m is None:
            dx = x
        else:
            dx = x - m  # array(x) - array(m)
        n = len(x)
        s2pi = (2 * np.pi)**(n / 2.)
        if Cinv is None:
            return exp(-sum(dx**2) / sigma**2 / 2) / s2pi / sigma**n
        if detC is None:
            detC = 1. / np.linalg.linalg.det(Cinv)
        return  exp(-np.dot(dx, np.dot(Cinv, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n 
Example 21
Project: CSB   Author: csb-toolbox   File: __init__.py    (license) View Source Project 5 votes vote down vote up
def rmsd(X, Y):
    """
    Calculate the root mean squared deviation (RMSD) using Kabsch' formula.

    @param X: (n, d) input vector
    @type X: numpy array

    @param Y: (n, d) input vector
    @type Y: numpy array

    @return: rmsd value between the input vectors
    @rtype: float
    """

    from numpy import sum, dot, sqrt, clip, average
    from numpy.linalg import svd, det

    X = X - X.mean(0)
    Y = Y - Y.mean(0)

    R_x = sum(X ** 2)
    R_y = sum(Y ** 2)

    V, L, U = svd(dot(Y.T, X))

    if det(dot(V, U)) < 0.:
        L[-1] *= -1

    return sqrt(clip(R_x + R_y - 2 * sum(L), 0., 1e300) / len(X)) 
Example 22
Project: CSB   Author: csb-toolbox   File: __init__.py    (license) View Source Project 5 votes vote down vote up
def wrmsd(X, Y, w):
    """
    Calculate the weighted root mean squared deviation (wRMSD) using Kabsch'
    formula.

    @param X: (n, d) input vector
    @type X: numpy array

    @param Y: (n, d) input vector
    @type Y: numpy array

    @param w: input weights
    @type w: numpy array

    @return: rmsd value between the input vectors
    @rtype: float
    """

    from numpy import sum, dot, sqrt, clip, average
    from numpy.linalg import svd

    ## normalize weights

    w = w / w.sum()

    X = X - dot(w, X)
    Y = Y - dot(w, Y)

    R_x = sum(X.T ** 2 * w)
    R_y = sum(Y.T ** 2 * w)

    L = svd(dot(Y.T * w, X))[1]

    return sqrt(clip(R_x + R_y - 2 * sum(L), 0., 1e300)) 
Example 23
Project: CSB   Author: csb-toolbox   File: __init__.py    (license) View Source Project 5 votes vote down vote up
def is_mirror_image(X, Y):
    """
    Check if two configurations X and Y are mirror images
    (i.e. their optimal superposition involves a reflection).

    @param X: n x 3 input vector
    @type X: numpy array
    @param Y: n x 3 input vector
    @type Y: numpy array
 
    @rtype: bool
    """
    from numpy.linalg import det, svd
    
    ## center configurations

    X = X - numpy.mean(X, 0)
    Y = Y - numpy.mean(Y, 0)

    ## SVD of correlation matrix

    V, L, U = svd(numpy.dot(numpy.transpose(X), Y))             #@UnusedVariable

    R = numpy.dot(V, U)

    return det(R) < 0 
Example 24
Project: rllabplusplus   Author: shaneshixiang   File: cma_es_lib.py    (license) View Source Project 5 votes vote down vote up
def expms(A, eig=np.linalg.eigh):
            """matrix exponential for a symmetric matrix"""
            # TODO: check that this works reliably for low rank matrices
            # first: symmetrize A
            D, B = eig(A)
            return np.dot(B, (np.exp(D) * B).T) 
Example 25
Project: rllabplusplus   Author: shaneshixiang   File: cma_es_lib.py    (license) View Source Project 5 votes vote down vote up
def likelihood(x, m=None, Cinv=None, sigma=1, detC=None):
        """return likelihood of x for the normal density N(m, sigma**2 * Cinv**-1)"""
        # testing: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
        # for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
        if m is None:
            dx = x
        else:
            dx = x - m  # array(x) - array(m)
        n = len(x)
        s2pi = (2 * np.pi)**(n / 2.)
        if Cinv is None:
            return exp(-sum(dx**2) / sigma**2 / 2) / s2pi / sigma**n
        if detC is None:
            detC = 1. / np.linalg.linalg.det(Cinv)
        return  exp(-np.dot(dx, np.dot(Cinv, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n 
Example 26
Project: cma   Author: hardmaru   File: cma.py    (license) View Source Project 5 votes vote down vote up
def expms(A, eig=np.linalg.eigh):
            """matrix exponential for a symmetric matrix"""
            # TODO: check that this works reliably for low rank matrices
            # first: symmetrize A
            D, B = eig(A)
            return np.dot(B, (np.exp(D) * B).T) 
Example 27
Project: cma   Author: hardmaru   File: cma.py    (license) View Source Project 5 votes vote down vote up
def likelihood(x, m=None, Cinv=None, sigma=1, detC=None):
        """return likelihood of x for the normal density N(m, sigma**2 * Cinv**-1)"""
        # testing: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
        # for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
        if m is None:
            dx = x
        else:
            dx = x - m  # array(x) - array(m)
        n = len(x)
        s2pi = (2 * np.pi)**(n / 2.)
        if Cinv is None:
            return exp(-sum(dx**2) / sigma**2 / 2) / s2pi / sigma**n
        if detC is None:
            detC = 1. / np.linalg.linalg.det(Cinv)
        return  exp(-np.dot(dx, np.dot(Cinv, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n 
Example 28
Project: Deep-Subspace-Clustering   Author: tonyabracadabra   File: core.py    (license) View Source Project 5 votes vote down vote up
def eig(a):
    u,v = np.linalg.eig(a)
    return u.T 
Example 29
Project: FermiLib   Author: ProjectQ-Framework   File: _sparse_tools.py    (license) View Source Project 5 votes vote down vote up
def sparse_eigenspectrum(sparse_operator):
    """Perform a dense diagonalization.

    Returns:
        eigenspectrum: The lowest eigenvalues in a numpy array.
    """
    dense_operator = sparse_operator.todense()
    if is_hermitian(sparse_operator):
        eigenspectrum = numpy.linalg.eigvalsh(dense_operator)
    else:
        eigenspectrum = numpy.linalg.eigvals(dense_operator)
    return numpy.sort(eigenspectrum) 
Example 30
Project: FermiLib   Author: ProjectQ-Framework   File: _sparse_tools.py    (license) View Source Project 5 votes vote down vote up
def get_gap(sparse_operator):
    """Compute gap between lowest eigenvalue and first excited state.

    Returns: A real float giving eigenvalue gap.
    """
    if not is_hermitian(sparse_operator):
        raise ValueError('sparse_operator must be Hermitian.')

    values, _ = scipy.sparse.linalg.eigsh(
        sparse_operator, 2, which='SA', maxiter=1e7)

    gap = abs(values[1] - values[0])
    return gap 
Example 31
Project: SamuROI   Author: samuroi   File: branch.py    (license) View Source Project 5 votes vote down vote up
def perpedndicular1(v):
    """calculate the perpendicular unit vector"""
    return numpy.array((-v[1], v[0])) / numpy.linalg.norm((-v[1], v[0])) 
Example 32
Project: SamuROI   Author: samuroi   File: branch.py    (license) View Source Project 5 votes vote down vote up
def normalize(v):
    return v / numpy.linalg.norm(v) 
Example 33
Project: gail-driver   Author: sisl   File: cma_es_lib.py    (license) View Source Project 5 votes vote down vote up
def expms(A, eig=np.linalg.eigh):
            """matrix exponential for a symmetric matrix"""
            # TODO: check that this works reliably for low rank matrices
            # first: symmetrize A
            D, B = eig(A)
            return np.dot(B, (np.exp(D) * B).T) 
Example 34
Project: Theano-Deep-learning   Author: GeekLiB   File: test_nlinalg.py    (license) View Source Project 5 votes vote down vote up
def test_qr_modes():
    rng = numpy.random.RandomState(utt.fetch_seed())

    A = tensor.matrix("A", dtype=theano.config.floatX)
    a = rng.rand(4, 4).astype(theano.config.floatX)

    f = function([A], qr(A))
    t_qr = f(a)
    n_qr = numpy.linalg.qr(a)
    assert _allclose(n_qr, t_qr)

    for mode in ["reduced", "r", "raw"]:
        f = function([A], qr(A, mode))
        t_qr = f(a)
        n_qr = numpy.linalg.qr(a, mode)
        if isinstance(n_qr, (list, tuple)):
            assert _allclose(n_qr[0], t_qr[0])
            assert _allclose(n_qr[1], t_qr[1])
        else:
            assert _allclose(n_qr, t_qr)

    try:
        n_qr = numpy.linalg.qr(a, "complete")
        f = function([A], qr(A, "complete"))
        t_qr = f(a)
        assert _allclose(n_qr, t_qr)
    except TypeError as e:
        assert "name 'complete' is not defined" in str(e) 
Example 35
Project: Theano-Deep-learning   Author: GeekLiB   File: test_nlinalg.py    (license) View Source Project 5 votes vote down vote up
def test_svd():
    rng = numpy.random.RandomState(utt.fetch_seed())
    A = tensor.matrix("A", dtype=theano.config.floatX)
    U, V, T = svd(A)
    fn = function([A], [U, V, T])
    a = rng.rand(4, 4).astype(theano.config.floatX)
    n_u, n_v, n_t = numpy.linalg.svd(a)
    t_u, t_v, t_t = fn(a)

    assert _allclose(n_u, t_u)
    assert _allclose(n_v, t_v)
    assert _allclose(n_t, t_t) 
Example 36
Project: Theano-Deep-learning   Author: GeekLiB   File: test_nlinalg.py    (license) View Source Project 5 votes vote down vote up
def test_inverse_singular():
    singular = numpy.array([[1, 0, 0]] + [[0, 1, 0]] * 2,
                           dtype=theano.config.floatX)
    a = tensor.matrix()
    f = function([a], matrix_inverse(a))
    try:
        f(singular)
    except numpy.linalg.LinAlgError:
        return
    assert False 
Example 37
Project: Theano-Deep-learning   Author: GeekLiB   File: test_nlinalg.py    (license) View Source Project 5 votes vote down vote up
def test_wrong_coefficient_matrix(self):
        x = tensor.vector()
        y = tensor.vector()
        z = tensor.scalar()
        b = theano.tensor.nlinalg.lstsq()(x, y, z)
        f = function([x, y, z], b)
        self.assertRaises(numpy.linalg.linalg.LinAlgError, f, [2, 1], [2, 1], 1) 
Example 38
Project: Theano-Deep-learning   Author: GeekLiB   File: test_nlinalg.py    (license) View Source Project 5 votes vote down vote up
def test_wrong_rcond_dimension(self):
        x = tensor.vector()
        y = tensor.vector()
        z = tensor.vector()
        b = theano.tensor.nlinalg.lstsq()(x, y, z)
        f = function([x, y, z], b)
        self.assertRaises(numpy.linalg.LinAlgError, f, [2, 1], [2, 1], [2, 1]) 
Example 39
Project: Theano-Deep-learning   Author: GeekLiB   File: test_nlinalg.py    (license) View Source Project 5 votes vote down vote up
def test_numpy_compare(self):
        rng = numpy.random.RandomState(utt.fetch_seed())
        A = tensor.matrix("A", dtype=theano.config.floatX)
        Q = matrix_power(A, 3)
        fn = function([A], [Q])
        a = rng.rand(4, 4).astype(theano.config.floatX)

        n_p = numpy.linalg.matrix_power(a, 3)
        t_p = fn(a)
        assert numpy.allclose(n_p, t_p) 
Example 40
Project: Theano-Deep-learning   Author: GeekLiB   File: test_slinalg.py    (license) View Source Project 5 votes vote down vote up
def test_eigvalsh_grad():
    if not imported_scipy:
        raise SkipTest("Scipy needed for the geigvalsh op.")
    import scipy.linalg

    rng = numpy.random.RandomState(utt.fetch_seed())
    a = rng.randn(5, 5)
    a = a + a.T
    b = 10 * numpy.eye(5, 5) + rng.randn(5, 5)
    tensor.verify_grad(lambda a, b: eigvalsh(a, b).dot([1, 2, 3, 4, 5]),
                       [a, b], rng=numpy.random) 
Example 41
Project: Theano-Deep-learning   Author: GeekLiB   File: test_slinalg.py    (license) View Source Project 5 votes vote down vote up
def test_solve_correctness(self):
        if not imported_scipy:
            raise SkipTest("Scipy needed for the Cholesky and Solve ops.")
        rng = numpy.random.RandomState(utt.fetch_seed())
        A = theano.tensor.matrix()
        b = theano.tensor.matrix()
        y = self.op(A, b)
        gen_solve_func = theano.function([A, b], y)

        cholesky_lower = Cholesky(lower=True)
        L = cholesky_lower(A)
        y_lower = self.op(L, b)
        lower_solve_func = theano.function([L, b], y_lower)

        cholesky_upper = Cholesky(lower=False)
        U = cholesky_upper(A)
        y_upper = self.op(U, b)
        upper_solve_func = theano.function([U, b], y_upper)

        b_val = numpy.asarray(rng.rand(5, 1), dtype=config.floatX)

        # 1-test general case
        A_val = numpy.asarray(rng.rand(5, 5), dtype=config.floatX)
        # positive definite matrix:
        A_val = numpy.dot(A_val.transpose(), A_val)
        assert numpy.allclose(scipy.linalg.solve(A_val, b_val),
                              gen_solve_func(A_val, b_val))

        # 2-test lower traingular case
        L_val = scipy.linalg.cholesky(A_val, lower=True)
        assert numpy.allclose(scipy.linalg.solve_triangular(L_val, b_val, lower=True),
                              lower_solve_func(L_val, b_val))

        # 3-test upper traingular case
        U_val = scipy.linalg.cholesky(A_val, lower=False)
        assert numpy.allclose(scipy.linalg.solve_triangular(U_val, b_val, lower=False),
                              upper_solve_func(U_val, b_val)) 
Example 42
Project: Theano-Deep-learning   Author: GeekLiB   File: test_slinalg.py    (license) View Source Project 5 votes vote down vote up
def test_expm():
    if not imported_scipy:
        raise SkipTest("Scipy needed for the expm op.")
    rng = numpy.random.RandomState(utt.fetch_seed())
    A = rng.randn(5, 5).astype(config.floatX)

    ref = scipy.linalg.expm(A)

    x = tensor.matrix()
    m = expm(x)
    expm_f = function([x], m)

    val = expm_f(A)
    numpy.testing.assert_array_almost_equal(val, ref) 
Example 43
Project: neurotools   Author: michaelerule   File: matrix.py    (license) View Source Project 5 votes vote down vote up
def rcond(x):
    '''
    Reciprocal condition number
    '''
    return 1./np.linalg.cond(x) 
Example 44
Project: neurotools   Author: michaelerule   File: matrix.py    (license) View Source Project 5 votes vote down vote up
def check_covmat_fast(C,N=None,eps=1e-6):
    '''
    Verify that matrix M is a size NxN precision or covariance matirx
    '''
    if not type(C)==np.ndarray:
        raise ValueError("Covariance matrix should be a 2D numpy array")
    if not len(C.shape)==2:
        raise ValueError("Covariance matrix should be a 2D numpy array")
    if N is None: 
        N = C.shape[0]
    if not C.shape==(N,N):
        raise ValueError("Expected size %d x %d matrix"%(N,N))
    if np.any(~np.isreal(C)):
        raise ValueError("Covariance matrices should not contain complex numbers")
    C = np.real(C)
    if np.any(~np.isfinite(C)):
        raise ValueError("Covariance matrix contains NaN or ┬▒inf!")
    if not np.all(np.abs(C-C.T)<eps):
        raise ValueError("Covariance matrix is not symmetric up to precision %0.1e"%eps)
    try:
        ch = chol(C)
    except numpy.linalg.linalg.LinAlgError:
        # Check smallest eigenvalue if cholesky fails
        mine = np.real(scipy.linalg.decomp.eigh(C,eigvals=(0,0))[0][0])
        if np.any(mine<-eps):
            raise ValueError('Covariance matrix contains eigenvalue %0.3e<%0.3e'%(mine,-eps)) 
        if (mine<eps):
            C = C + np.eye(N)*(eps-mine)
    C = 0.5*(C+C.T)
    return C 
Example 45
Project: neurotools   Author: michaelerule   File: matrix.py    (license) View Source Project 5 votes vote down vote up
def rsolve(a,b):
    '''
    wraps solve, applies to right hand solution
    solves system x A = B
    '''
    return scipy.linalg.solve(b.T,a.T).T 
Example 46
Project: rllab   Author: rll   File: cma_es_lib.py    (license) View Source Project 5 votes vote down vote up
def expms(A, eig=np.linalg.eigh):
            """matrix exponential for a symmetric matrix"""
            # TODO: check that this works reliably for low rank matrices
            # first: symmetrize A
            D, B = eig(A)
            return np.dot(B, (np.exp(D) * B).T) 
Example 47
Project: rllab   Author: rll   File: cma_es_lib.py    (license) View Source Project 5 votes vote down vote up
def likelihood(x, m=None, Cinv=None, sigma=1, detC=None):
        """return likelihood of x for the normal density N(m, sigma**2 * Cinv**-1)"""
        # testing: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
        # for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
        if m is None:
            dx = x
        else:
            dx = x - m  # array(x) - array(m)
        n = len(x)
        s2pi = (2 * np.pi)**(n / 2.)
        if Cinv is None:
            return exp(-sum(dx**2) / sigma**2 / 2) / s2pi / sigma**n
        if detC is None:
            detC = 1. / np.linalg.linalg.det(Cinv)
        return  exp(-np.dot(dx, np.dot(Cinv, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n 
Example 48
Project: MachineLearningAlgorithm   Author: lining0806   File: LinearRegressionTest.py    (license) View Source Project 5 votes vote down vote up
def lwlr(testPoint, xMat, yMat, k=1.0):
    m = np.shape(xMat)[0]
    weights = np.matrix(np.eye(m)) # ??????
    for j in range(m):
        diffMat = testPoint-xMat[j, :]
        weights[j, j] = np.exp(diffMat*diffMat.T/(-2.0*k**2)) # ???
    print weights
    xTx = xMat.T*(weights*xMat)
    if np.linalg.det(xTx) == 0.0:
        print 'This matrix is singular, cannot do inverse'
        return
    ws = xTx.I*(xMat.T*(weights*yMat))
    return testPoint*ws 
Example 49
Project: MachineLearningAlgorithm   Author: lining0806   File: LinearRegressionTest.py    (license) View Source Project 5 votes vote down vote up
def ridgeRegres(xMat, yMat, lam=0.2):
    xTx = xMat.T*xMat
    denom = xTx+np.eye(np.shape(xMat)[1])*lam
    if np.linalg.det(denom) == 0.0:
        print 'This matrix is singular, cannot do inverse'
        return
    ws = denom.I*(xMat.T*yMat)
    return ws 
Example 50
Project: maml_rl   Author: cbfinn   File: cma_es_lib.py    (license) View Source Project 5 votes vote down vote up
def expms(A, eig=np.linalg.eigh):
            """matrix exponential for a symmetric matrix"""
            # TODO: check that this works reliably for low rank matrices
            # first: symmetrize A
            D, B = eig(A)
            return np.dot(B, (np.exp(D) * B).T)