Python scipy.dot() Examples

The following are 30 code examples of scipy.dot(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy , or try the search function .
Example #1
Source File: lobpcg.py    From Computable with MIT License 7 votes vote down vote up
def b_orthonormalize(B, blockVectorV,
                      blockVectorBV=None, retInvR=False):
    """Internal."""
    import scipy.linalg as sla
    if blockVectorBV is None:
        if B is not None:
            blockVectorBV = B(blockVectorV)
        else:
            blockVectorBV = blockVectorV  # Shared data!!!
    gramVBV = sp.dot(blockVectorV.T, blockVectorBV)
    gramVBV = sla.cholesky(gramVBV)
    gramVBV = sla.inv(gramVBV, overwrite_a=True)
    # gramVBV is now R^{-1}.
    blockVectorV = sp.dot(blockVectorV, gramVBV)
    if B is not None:
        blockVectorBV = sp.dot(blockVectorBV, gramVBV)

    if retInvR:
        return blockVectorV, blockVectorBV, gramVBV
    else:
        return blockVectorV, blockVectorBV 
Example #2
Source File: recipe-576547.py    From code with MIT License 6 votes vote down vote up
def coupling_optim(y,t):
	creation=s.zeros(n_bin)
	destruction=s.zeros(n_bin)
	#now I try to rewrite this in a more optimized way
	destruction = -s.dot(s.transpose(kernel),y)*y #much more concise way to express\
	#the destruction of k-mers 
	kyn = kernel*y[:,s.newaxis]*y[s.newaxis,:]
	for k in xrange(n_bin):
		creation[k] = s.sum(kyn[s.arange(k),k-s.arange(k)-1])
	creation=0.5*creation
	out=creation+destruction
	return out


#Now I go for the optimal optimization of the chi_{i,j,k} coefficients used by Garrick for
# dealing with a non-uniform grid. 
Example #3
Source File: gp.py    From GPPVAE with Apache License 2.0 6 votes vote down vote up
def generate_data(N, S, L):

        # generate genetics
        G = 1.0 * (sp.rand(N, S) < 0.2)
        G -= G.mean(0)
        G /= G.std(0) * sp.sqrt(G.shape[1])

        # generate latent phenotypes
        Zg = sp.dot(G, sp.randn(G.shape[1], L))
        Zn = sp.randn(N, L)

        # generate variance exapleind
        vg = sp.linspace(0.8, 0, L)

        # rescale and sum
        Zg *= sp.sqrt(vg / Zg.var(0))
        Zn *= sp.sqrt((1 - vg) / Zn.var(0))
        Z = Zg + Zn

        return Z, G 
Example #4
Source File: topology.py    From PyBioMed with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def CalculateSchiultz(mol):
    """
    #################################################################
    Calculation of Schiultz number

    ---->Tsch(log value)

    Usage:

        result=CalculateSchiultz(mol)

        Input: mol is a molecule object

        Output: result is a numeric value
    #################################################################
    """
    Distance = numpy.array(Chem.GetDistanceMatrix(mol), "d")
    Adjacent = numpy.array(Chem.GetAdjacencyMatrix(mol), "d")
    VertexDegree = sum(Adjacent)

    return sum(scipy.dot((Distance + Adjacent), VertexDegree)) 
Example #5
Source File: rotator.py    From factor_analyzer with GNU General Public License v2.0 6 votes vote down vote up
def _quartimax_obj(self, loadings):
        """
        Quartimax function objective.

        Parameters
        ----------
        loadings : array-like
            The loading matrix

        Returns
        -------
        gradient_dict : dict
            A dictionary with
            - grad : np.array
                The gradient.
            - criterion : float
                The value of the criterion for the objective.
        """
        gradient = -loadings**3
        criterion = -np.sum(np.diag(np.dot((loadings**2).T, loadings**2))) / 4
        return {'grad': gradient, 'criterion': criterion} 
Example #6
Source File: rotator.py    From factor_analyzer with GNU General Public License v2.0 6 votes vote down vote up
def _oblimin_obj(self, loadings):
        """
        The Oblimin function objective.

        Parameters
        ----------
        loadings : array-like
            The loading matrix

        Returns
        -------
        gradient_dict : dict
            A dictionary with
            - grad : np.array
                The gradient.
            - criterion : float
                The value of the criterion for the objective.
        """
        X = np.dot(loadings**2, np.eye(loadings.shape[1]) != 1)
        if (self.gamma != 0):
            p = loadings.shape[0]
            X = np.diag(np.full(1, p)) - np.dot(np.zeros((p, p)), X)
        gradient = loadings * X
        criterion = np.sum(loadings**2 * X) / 4
        return {'grad': gradient, 'criterion': criterion} 
Example #7
Source File: rotator.py    From factor_analyzer with GNU General Public License v2.0 6 votes vote down vote up
def _quartimin_obj(self, loadings):
        """
        Quartimin function objective.

        Parameters
        ----------
        loadings : array-like
            The loading matrix

        Returns
        -------
        gradient_dict : dict
            A dictionary with
            - grad : np.array
                The gradient.
            - criterion : float
                The value of the criterion for the objective.
        """
        X = np.dot(loadings**2, np.eye(loadings.shape[1]) != 1)
        gradient = loadings * X
        criterion = np.sum(loadings**2 * X) / 4
        return {'grad': gradient, 'criterion': criterion} 
Example #8
Source File: recipe-576547.py    From code with MIT License 6 votes vote down vote up
def coupling_optim_garrick(y,t):
	creation=s.zeros(n_bin)
	destruction=s.zeros(n_bin)
	#now I try to rewrite this in a more optimized way
	destruction = -s.dot(s.transpose(kernel),y)*y #much more concise way to express\
	#the destruction of k-mers 
	
	for k in xrange(n_bin):
		kyn = (kernel*f_garrick[:,:,k])*y[:,s.newaxis]*y[s.newaxis,:]
		creation[k] = s.sum(kyn)
	creation=0.5*creation
	out=creation+destruction
	return out



#Now I work with the function for espressing smoluchowski equation when a uniform grid is used 
Example #9
Source File: gmres.py    From pygbe with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def apply_givens(Q, v, k):
    """
    Apply the first k Givens rotations in Q to the vector v.

    Arguments
    ---------
        Q: list, list of consecutive 2x2 Givens rotations
        v: array, vector to apply the rotations to
        k: int, number of rotations to apply

    Returns
    -------
        v: array, that is changed in place.

    """

    for j in range(k):
        Qloc = Q[j]
        v[j:j+2] = scipy.dot(Qloc, v[j:j+2]) 
Example #10
Source File: coregistration_utilities.py    From eo-learn with MIT License 5 votes vote down vote up
def score(self, idx, warp_matrix):
        """ Estimate the registration error of estimated transformation matrix

        :param idx: List of points used to estimate the transformation
        :param warp_matrix: Matrix estimating Euler trasnformation
        :return: Square root of Target Registration Error
        """
        # Transform source points with estimated transformation
        trg_fit = scipy.dot(warp_matrix, np.concatenate((self.src_pts[idx, :], np.ones((len(idx), 1))), axis=1).T).T
        # Compute error in transformation
        err_per_point = np.sqrt(np.sum((self.trg_pts[idx, :] - trg_fit[:, :2])**2, axis=1))  # sum squared error per row
        return err_per_point 
Example #11
Source File: rotator.py    From factor_analyzer with GNU General Public License v2.0 5 votes vote down vote up
def _equamax_obj(self, loadings):
        """
        Equamax function objective.

        Parameters
        ----------
        loadings : array-like
            The loading matrix

        Returns
        -------
        gradient_dict : dict
            A dictionary with
            - grad : np.array
                The gradient.
            - criterion : float
                The value of the criterion for the objective.
        """
        p, k = loadings.shape

        N = np.ones(k) - np.eye(k)
        M = np.ones(p) - np.eye(p)

        loadings_squared = loadings**2
        f1 = (1 - self.kappa) * np.sum(np.diag(np.dot(loadings_squared.T,
                                                      np.dot(loadings_squared, N)))) / 4
        f2 = self.kappa * np.sum(np.diag(np.dot(loadings_squared.T,
                                                np.dot(M, loadings_squared)))) / 4

        gradient = ((1 - self.kappa) * loadings * np.dot(loadings_squared, N) +
                    self.kappa * loadings * np.dot(M, loadings_squared))

        criterion = f1 + f2
        return {'grad': gradient, 'criterion': criterion} 
Example #12
Source File: ld.py    From ldpred with MIT License 5 votes vote down vote up
def _calculate_D(snps, snp_i, snp, start_i, stop_i, ld_dict, ld_scores):
    m, n = snps.shape
    X = snps[start_i: stop_i]
    D_i = sp.dot(snp, X.T) / n
    D_i_shrunk = shrink_r2_mat(D_i,n)
    r2s = D_i ** 2
    ld_dict[snp_i] = D_i_shrunk
    lds_i = sp.sum(r2s - (1 - r2s) / (n - 2), dtype='float32')
    ld_scores[snp_i] = lds_i 
Example #13
Source File: compute.py    From Faces with GNU General Public License v3.0 5 votes vote down vote up
def compute(image_paths, image_path, n_eigenfaces=NUM_IMAGE,
            width=IMAGE_WIDTH, height=IMAGE_HEIGHT):
    input_path = image_path
    image_matrix = np.vstack([_reshape(image, height, width)
                              for image in _imread(image_paths)])
                              
    print image_matrix.size

    img1 = np.vstack([_reshape(image, height, width)
                      for image in _imread(input_path)])

    pca = PCA(n_components=n_eigenfaces).fit(image_matrix)
    eigenvectors = pca.components_.reshape((n_eigenfaces, height, width))
    eigenvalues = pca.explained_variance_ratio_
    data_mean = pca.mean_.reshape((height, width))

    po = pca.transform(img1)
       
    eigenvectors = image_matrix.reshape(2, 100, 100)
        
    ccount = 1    
    recons1 = po[0][0] * eigenvectors[0]    
    while ccount < n_eigenfaces:
        recons1 += po[0][ccount] * eigenvectors[ccount]
        ccount += 1
      
    rimage = np.reshape(img1[0], (100, 100))
    diff1 = rimage - recons1
    det1 = np.linalg.det(diff1)
    a = rimage
    b = recons1
    c = dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b)
    n_m, n_0 = compare_images(a, b)
    n_m = np.mean(n_m)
    n_0 = np.mean(n_0)
    return n_0 * 1.0 / a.size 
Example #14
Source File: MR.py    From mr_saliency with GNU General Public License v2.0 5 votes vote down vote up
def __MR_saliency(self,aff,indictor):
        return sp.dot(aff,indictor) 
Example #15
Source File: lobpcg.py    From Computable with MIT License 5 votes vote down vote up
def applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
    """Internal. Changes blockVectorV in place."""
    gramYBV = sp.dot(blockVectorBY.T, blockVectorV)
    import scipy.linalg as sla
    tmp = sla.cho_solve(factYBY, gramYBV)
    blockVectorV -= sp.dot(blockVectorY, tmp) 
Example #16
Source File: train_gppvae.py    From GPPVAE with Apache License 2.0 5 votes vote down vote up
def eval_step(vae, gp, vm, val_queue, Zm, Vt, Vv):

    rv = {}

    with torch.no_grad():

        _X = vm.x().data.cpu().numpy()
        _W = vm.v().data.cpu().numpy()
        covs = {"XX": sp.dot(_X, _X.T), "WW": sp.dot(_W, _W.T)}
        rv["vars"] = gp.get_vs().data.cpu().numpy()
        # out of sample
        vs = gp.get_vs()
        U, UBi, _ = gp.U_UBi_Shb([Vt], vs)
        Kiz = gp.solve(Zm, U, UBi, vs)
        Zo = vs[0] * Vv.mm(Vt.transpose(0, 1).mm(Kiz))
        mse_out = Variable(torch.zeros(Vv.shape[0], 1), requires_grad=False).cuda()
        mse_val = Variable(torch.zeros(Vv.shape[0], 1), requires_grad=False).cuda()
        for batch_i, data in enumerate(val_queue):
            idxs = data[-1].cuda()
            Yv = data[0].cuda()
            Zv = vae.encode(Yv)[0].detach()
            Yr = vae.decode(Zv)
            Yo = vae.decode(Zo[idxs])
            mse_out[idxs] = (
                ((Yv - Yo) ** 2).view(Yv.shape[0], -1).mean(1)[:, None].detach()
            )
            mse_val[idxs] = (
                ((Yv - Yr) ** 2).view(Yv.shape[0], -1).mean(1)[:, None].detach()
            )
            # store a few examples
            if batch_i == 0:
                imgs = {}
                imgs["Yv"] = Yv[:24].data.cpu().numpy().transpose(0, 2, 3, 1)
                imgs["Yr"] = Yr[:24].data.cpu().numpy().transpose(0, 2, 3, 1)
                imgs["Yo"] = Yo[:24].data.cpu().numpy().transpose(0, 2, 3, 1)
        rv["mse_out"] = float(mse_out.data.mean().cpu())
        rv["mse_val"] = float(mse_val.data.mean().cpu())

    return rv, imgs, covs 
Example #17
Source File: polynomial.py    From mirage with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def polyfit(u, x, y, order):
    '''Fit polynomial to a set of u values on an x, y grid
    u is a function u(x, y) being a polynomial of the form
    u = a[i, j] x**(i-j) y**j. x and y can be on a grid or be arbitrary values'''

    # First set up x and y powers for each coefficient
    px = []
    py = []
    for i in range(order+1):
        for j in range(i+1):
            px.append(i-j)
            py.append(j)
    terms = len(px)
    #print terms, ' terms for order ', order
    #print px
    #print py

    # Make up matrix and vector
    vector = scipy.zeros((terms))
    mat = scipy.zeros((terms, terms))
    for i in range(terms):
        vector[i] = (u*x**px[i]*y**py[i]).sum()
        for j in range(terms):
            mat[i, j] =  (x**px[i]*y**py[i]*x**px[j]*y**py[j]).sum()

    #print 'Vector', vector
    #print 'Matrix'
    #print mat
    imat = linalg.inv(mat)
    #print 'Inverse'
    #print imat
    # Check that inversion worked
    #print scipy.dot(mat, imat)
    coeffs = scipy.dot(imat, vector)
    return coeffs 
Example #18
Source File: polynomial.py    From mirage with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def invert(a, b, u, v, n, verbose=False):
    '''Given that order n polynomials of (x, y) have the result (u, v), find (x, y)
    Newton Raphson method in two dimensions'''
    tol = 1.0e-10
    err = 1.0
    #Initial guesses - Linear approximation
    det = a[1]*b[2] - a[2]*b[1]
    x0 = (b[2]*u-a[2]*v)/det
    y0 = (-b[1]*u + a[1]*v)/det
    if verbose: print('Initial guesses', x0, y0)
    x = x0
    y = y0
    X = scipy.array([x, y])
    iter = 0
    while err > tol:
        f1 = scipy.array([poly(a, x, y, n)-u, poly(b, x, y, n)-v])
        j = scipy.array([[dpdx(a, x, y, n), dpdy(a, x, y, n)], [dpdx(b, x, y, n), dpdy(b, x, y, n)]])
        invj = scipy.linalg.inv(j)
        X = X - scipy.dot(invj, f1)
        if verbose: print('[X1, Y1]', X)
        x1 = X[0]
        y1 = X[1]
        err = hypot(x-x1, y-y1)
        if verbose: print('Error %10.2e' % err)
        [x, y] = [x1, y1]
        iter += 1

    return  (x, y, err, iter) 
Example #19
Source File: c9_19_treynor_ratio.py    From Python-for-Finance-Second-Edition with MIT License 5 votes vote down vote up
def treynor(R,w):
    betaP=portfolioBeta(betaGiven,w)
    mean_return=sp.mean(R,axis=0)
    ret = sp.array(mean_return)
    return (sp.dot(w,ret) - rf)/betaP
#
# function 4: for given n-1 weights, return a negative sharpe ratio 
Example #20
Source File: c9_23_efficient_based_on_sortino_ratio.py    From Python-for-Finance-Second-Edition with MIT License 5 votes vote down vote up
def sortino(R,w):
    mean_return=sp.mean(R,axis=0)
    ret = sp.array(mean_return)
    LPSD=LPSD_f(R,rf)
    return (sp.dot(w,ret) - rf)/LPSD

# function 4: for given n-1 weights, return a negative sharpe ratio 
Example #21
Source File: c9_21_optimal_portfolio_based_on_Sortino_ratio.py    From Python-for-Finance-Second-Edition with MIT License 5 votes vote down vote up
def treynor(R,w):
    betaP=portfolioBeta(betaGiven,w)
    mean_return=sp.mean(R,axis=0)
    ret = sp.array(mean_return)
    return (sp.dot(w,ret) - rf)/betaP
# function 4: for given n-1 weights, return a negative sharpe ratio 
Example #22
Source File: c9_21_optimal_portfolio_based_on_Sortino_ratio.py    From Python-for-Finance-Second-Edition with MIT License 5 votes vote down vote up
def portfolioBeta(betaGiven,w):
    #print("betaGiven=",betaGiven,"w=",w)
    return sp.dot(betaGiven,w)
# function 3: estimate Treynor 
Example #23
Source File: c9_18_sharpe_ratio.py    From Python-for-Finance-Second-Edition with MIT License 5 votes vote down vote up
def sharpe(R,w):
    var = portfolio_var(R,w)
    mean_return=sp.mean(R,axis=0)
    ret = sp.array(mean_return)
    return (sp.dot(w,ret) - rf)/sp.sqrt(var)
# function 4: for given n-1 weights, return a negative sharpe ratio 
Example #24
Source File: c9_44_impact_of_correlation_2stock_portfolio.py    From Python-for-Finance-Second-Edition with MIT License 5 votes vote down vote up
def portfolioRet(R,w):
    mean_return=sp.mean(R,axis=0)
    ret = sp.array(mean_return)
    return sp.dot(w,ret) 
Example #25
Source File: recipe-576547.py    From code with MIT License 5 votes vote down vote up
def total_mass_conservation(number_mat, vol_grid,box_volume):
	ini_mass=s.dot(number_mat[0,:],vol_grid)*box_volume
	fin_mass=s.dot(number_mat[-1,:],vol_grid)*box_volume
	mass_conservation=(ini_mass-fin_mass)/ini_mass

	results=s.array([ini_mass,fin_mass,mass_conservation])
	return results 
Example #26
Source File: numpy_utils.py    From ARNs with GNU General Public License v3.0 5 votes vote down vote up
def cosine_similarity(a,b):
    a = mat(a)
    b = mat(b)

    c = dot(a,b.T)/linalg.norm(a)/linalg.norm(b)
    return c[0,0] 
Example #27
Source File: auc_regressor.py    From xam with MIT License 5 votes vote down vote up
def score(self, X, y):
        fpr, tpr, _ = metrics.roc_curve(y, sp.dot(X, self.coef_))
        return metrics.auc(fpr, tpr) 
Example #28
Source File: auc_regressor.py    From xam with MIT License 5 votes vote down vote up
def predict(self, X):
        return sp.dot(X, self.coef_) 
Example #29
Source File: auc_regressor.py    From xam with MIT License 5 votes vote down vote up
def _auc_loss(self, coef, X, y):
        fpr, tpr, _ = metrics.roc_curve(y, sp.dot(X, coef))
        return -metrics.auc(fpr, tpr) 
Example #30
Source File: sound_waves.py    From qmpy with MIT License 5 votes vote down vote up
def dir_cosines(dir,coords=sp.identity(3)):
    """Returns a vector containing the direction cosines between vector dir, and
    the coordinate system coords. Default coordinate system is an orthonormal
    cartesian coordinate system."""
    cosines = sp.dot(coords,dir)/linalg.norm(dir)
    return cosines