Python autograd.numpy.eye() Examples

The following are 27 code examples of autograd.numpy.eye(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: lgss_example.py    From variational-smc with MIT License 6 votes vote down vote up
def init_model_params(Dx, Dy, alpha, r, obs, rs = npr.RandomState(0)):
    mu0 = np.zeros(Dx)
    Sigma0 = np.eye(Dx)
    
    A = np.zeros((Dx,Dx))
    for i in range(Dx):
        for j in range(Dx):
            A[i,j] = alpha**(abs(i-j)+1)
            
    Q = np.eye(Dx)
    C = np.zeros((Dy,Dx))
    if obs == 'sparse':
        C[:Dy,:Dy] = np.eye(Dy)
    else:
        C = rs.normal(size=(Dy,Dx))
    R = r * np.eye(Dy)
    
    return (mu0, Sigma0, A, Q, C, R) 
Example #2
Source File: data.py    From kernel-gof with MIT License 6 votes vote down vote up
def gmm_sample(self, mean=None, w=None, N=10000,n=10,d=2,seed=10):
        np.random.seed(seed)
        self.d = d
        if mean is None:
            mean = np.random.randn(n,d)*10
        if w is None:
            w = np.random.rand(n)
        w = old_div(w,sum(w))
        multi = np.random.multinomial(N,w)
        X = np.zeros((N,d))
        base = 0
        for i in range(n):
            X[base:base+multi[i],:] = np.random.multivariate_normal(mean[i,:], np.eye(self.d), multi[i])
            base += multi[i]
        
        llh = np.zeros(N)
        for i in range(n):
            llh += w[i] * stats.multivariate_normal.pdf(X, mean[i,:], np.eye(self.d))
        #llh = llh/sum(llh)
        return X, llh 
Example #3
Source File: util.py    From kernel-gof with MIT License 6 votes vote down vote up
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
    """
    Fit a multivariate normal to the data X (n x d) and draw J points 
    from the fit. 
    - reg: regularizer to use with the covariance matrix
    - eig_pow: raise eigenvalues of the covariance matrix to this power to construct 
        a new covariance matrix before drawing samples. Useful to shrink the spread 
        of the variance.
    """
    with NumpySeedContext(seed=seed):
        d = X.shape[1]
        mean_x = np.mean(X, 0)
        cov_x = np.cov(X.T)
        if d==1:
            cov_x = np.array([[cov_x]])
        [evals, evecs] = np.linalg.eig(cov_x)
        evals = np.maximum(0, np.real(evals))
        assert np.all(np.isfinite(evals))
        evecs = np.real(evecs)
        shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
        V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
    return V 
Example #4
Source File: methods.py    From tf-quant-finance with Apache License 2.0 6 votes vote down vote up
def central(f0, ds, w):
  """Apply central difference method to estimate derivatives."""
  f = lambda o: shift(f0, o)
  eye = np.eye(f0.ndim, dtype=int)
  offsets = [-eye[d] for d in ds]

  if not ds:
    return f0
  elif len(ds) == 1:  # First order derivatives.
    i = offsets[0]
    return (f(i) - f(-i)) / (2 * w)
  elif len(ds) == 2:  # Second order derivatives.
    i, j = offsets
    w2 = np.square(w)
    if ds[0] == ds[1]:  # d^2/dxdx
      return (f(i) - 2 * f0 + f(-i)) / w2
    else:  # d^2/dxdy
      return (f(i + j) - f(i - j) - f(j - i) + f(-i - j)) / (4 * w2)
  else:
    raise NotImplementedError(ds) 
Example #5
Source File: test_matrices.py    From mici with MIT License 6 votes vote down vote up
def __init__(self, generate_scalar, matrix_class):
        rng = np.random.RandomState(SEED)
        matrix_pairs = {}
        for sz in SIZES:
            scalar = generate_scalar(rng)
            matrix_pairs[sz] = (
                matrix_class(scalar, sz), scalar * np.identity(sz))

        if AUTOGRAD_AVAILABLE:

            def param_func(param, matrix):
                return param * anp.eye(matrix.shape[0])

            def get_param(matrix):
                return matrix._scalar

        else:
            param_func, get_param = None, None

        super().__init__(
            matrix_pairs, get_param, param_func, rng) 
Example #6
Source File: test_linalg.py    From autograd with MIT License 5 votes vote down vote up
def test_solve_arg1_1d():
    D = 8
    A = npr.randn(D, D) + 10.0 * np.eye(D)
    B = npr.randn(D)
    def fun(a): return np.linalg.solve(a, B)
    check_grads(fun)(A) 
Example #7
Source File: RecurrentNeuralNetworks.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def initialize_RNN(self):
        hyp = np.array([])
        Q = self.hidden_dim
            
        U = -np.sqrt(6.0/(self.X_dim+Q)) + 2.0*np.sqrt(6.0/(self.X_dim+Q))*np.random.rand(self.X_dim,Q)
        b = np.zeros((1,Q))
        W = np.eye(Q)
        hyp = np.concatenate([hyp, U.ravel(), b.ravel(), W.ravel()])            
        
        V = -np.sqrt(6.0/(Q+self.Y_dim)) + 2.0*np.sqrt(6.0/(Q+self.Y_dim))*np.random.rand(Q,self.Y_dim)
        c = np.zeros((1,self.Y_dim))
        hyp = np.concatenate([hyp, V.ravel(), c.ravel()])
    
        return hyp 
Example #8
Source File: LongShortTermMemoryNetworks.py    From DeepLearningTutorial with MIT License 5 votes vote down vote up
def initialize_LSTM(self):
        hyp = np.array([])
        Q = self.hidden_dim
        
        # Forget Gate
        U_f = -np.sqrt(6.0/(self.X_dim+Q)) + 2.0*np.sqrt(6.0/(self.X_dim+Q))*np.random.rand(self.X_dim,Q)
        b_f = np.zeros((1,Q))
        W_f = np.eye(Q)
        hyp = np.concatenate([hyp, U_f.ravel(), b_f.ravel(), W_f.ravel()])
        
        # Input Gate
        U_i = -np.sqrt(6.0/(self.X_dim+Q)) + 2.0*np.sqrt(6.0/(self.X_dim+Q))*np.random.rand(self.X_dim,Q)
        b_i = np.zeros((1,Q))
        W_i = np.eye(Q)
        hyp = np.concatenate([hyp, U_i.ravel(), b_i.ravel(), W_i.ravel()])

        # Update Cell State
        U_s = -np.sqrt(6.0/(self.X_dim+Q)) + 2.0*np.sqrt(6.0/(self.X_dim+Q))*np.random.rand(self.X_dim,Q)
        b_s = np.zeros((1,Q))
        W_s = np.eye(Q)
        hyp = np.concatenate([hyp, U_s.ravel(), b_s.ravel(), W_s.ravel()])

        # Ouput Gate
        U_o = -np.sqrt(6.0/(self.X_dim+Q)) + 2.0*np.sqrt(6.0/(self.X_dim+Q))*np.random.rand(self.X_dim,Q)
        b_o = np.zeros((1,Q))
        W_o = np.eye(Q)
        hyp = np.concatenate([hyp, U_o.ravel(), b_o.ravel(), W_o.ravel()])

        V = -np.sqrt(6.0/(Q+self.Y_dim)) + 2.0*np.sqrt(6.0/(Q+self.Y_dim))*np.random.rand(Q,self.Y_dim)
        c = np.zeros((1,self.Y_dim))
        hyp = np.concatenate([hyp, V.ravel(), c.ravel()])
    
        return hyp 
Example #9
Source File: density.py    From kernel-gof with MIT License 5 votes vote down vote up
def log_normalized_den(self, X):
        d = self.dim()
        return stats.multivariate_normal.logpdf(X, mean=self.mean, cov=self.variance*np.eye(d)) 
Example #10
Source File: run_synthetic_example.py    From ParetoMTL with MIT License 5 votes vote down vote up
def get_d_moomtl(grads):
    """
    calculate the gradient direction for MOO-MTL 
    """
    
    nobj, dim = grads.shape
    
#    # use cvxopt to solve QP
#    P = np.dot(grads , grads.T)
#    
#    q = np.zeros(nobj)
#    
#    G =  - np.eye(nobj)
#    h = np.zeros(nobj)
#    
#    
#    A = np.ones(nobj).reshape(1,2)
#    b = np.ones(1)
#       
#    cvxopt.solvers.options['show_progress'] = False
#    sol = cvxopt_solve_qp(P, q, G, h, A, b)
    
    # use MinNormSolver to solve QP
    sol, nd = MinNormSolver.find_min_norm_element(grads)
    
    return sol 
Example #11
Source File: test_linalg.py    From autograd with MIT License 5 votes vote down vote up
def test_eigvalh_upper_broadcasting():
    def fun(x):
        w, v = np.linalg.eigh(x, 'U')
        return tuple((w, v))
    D = 6
    mat = npr.randn(2, 3, D, D) + 10 * np.eye(D)[None,None,...]
    hmat = broadcast_dot_transpose(mat, mat)
    check_grads(fun)(hmat)

# For complex-valued matrices, the eigenvectors could have arbitrary phases (gauge)
# which makes it impossible to compare to numerical derivatives. So we take the 
# absolute value to get rid of that phase. 
Example #12
Source File: test_linalg.py    From autograd with MIT License 5 votes vote down vote up
def test_slogdet_3d():
    fun = lambda x: np.sum(np.linalg.slogdet(x)[1])
    mat = np.concatenate([(rand_psd(5) + 5*np.eye(5))[None,...] for _ in range(3)])
    check_grads(fun)(mat) 
Example #13
Source File: test_linalg.py    From autograd with MIT License 5 votes vote down vote up
def test_solve_arg1_3d_3d():
    D = 4
    A = npr.randn(D+1, D, D) + 5*np.eye(D)
    B = npr.randn(D+1, D, D+2)
    fun = lambda A: np.linalg.solve(A, B)
    check_grads(fun)(A) 
Example #14
Source File: test_linalg.py    From autograd with MIT License 5 votes vote down vote up
def test_solve_arg1_3d():
    D = 4
    A = npr.randn(D+1, D, D) + 5*np.eye(D)
    B = npr.randn(D+1, D)
    fun = lambda A: np.linalg.solve(A, B)
    check_grads(fun)(A) 
Example #15
Source File: test_linalg.py    From autograd with MIT License 5 votes vote down vote up
def test_solve_arg2():
    D = 6
    A = npr.randn(D, D) + 1.0 * np.eye(D)
    B = npr.randn(D, D - 1)
    def fun(b): return np.linalg.solve(A, b)
    check_grads(fun)(B) 
Example #16
Source File: test_linalg.py    From autograd with MIT License 5 votes vote down vote up
def test_inv_3d():
    fun = lambda x: np.linalg.inv(x)

    D = 4
    mat = npr.randn(D, D, D) + 5*np.eye(D)
    check_grads(fun)(mat)

    mat = npr.randn(D, D, D, D) + 5*np.eye(D)
    check_grads(fun)(mat) 
Example #17
Source File: test_linalg.py    From autograd with MIT License 5 votes vote down vote up
def test_inv():
    def fun(x): return np.linalg.inv(x)
    D = 8
    mat = npr.randn(D, D)
    mat = np.dot(mat, mat) + 1.0 * np.eye(D)
    check_grads(fun)(mat) 
Example #18
Source File: test_scipy.py    From autograd with MIT License 5 votes vote down vote up
def make_psd(mat): return np.dot(mat.T, mat) + np.eye(mat.shape[0]) 
Example #19
Source File: gmm.py    From autograd with MIT License 5 votes vote down vote up
def init_gmm_params(num_components, D, scale, rs=npr.RandomState(0)):
    return {'log proportions': rs.randn(num_components) * scale,
            'means':           rs.randn(num_components, D) * scale,
            'lower triangles': np.zeros((num_components, D, D)) + np.eye(D)} 
Example #20
Source File: gaussian_process.py    From autograd with MIT License 5 votes vote down vote up
def make_gp_funs(cov_func, num_cov_params):
    """Functions that perform Gaussian process regression.
       cov_func has signature (cov_params, x, x')"""

    def unpack_kernel_params(params):
        mean        = params[0]
        cov_params  = params[2:]
        noise_scale = np.exp(params[1]) + 0.0001
        return mean, cov_params, noise_scale

    def predict(params, x, y, xstar):
        """Returns the predictive mean and covariance at locations xstar,
           of the latent function value f (without observation noise)."""
        mean, cov_params, noise_scale = unpack_kernel_params(params)
        cov_f_f = cov_func(cov_params, xstar, xstar)
        cov_y_f = cov_func(cov_params, x, xstar)
        cov_y_y = cov_func(cov_params, x, x) + noise_scale * np.eye(len(y))
        pred_mean = mean +   np.dot(solve(cov_y_y, cov_y_f).T, y - mean)
        pred_cov = cov_f_f - np.dot(solve(cov_y_y, cov_y_f).T, cov_y_f)
        return pred_mean, pred_cov

    def log_marginal_likelihood(params, x, y):
        mean, cov_params, noise_scale = unpack_kernel_params(params)
        cov_y_y = cov_func(cov_params, x, x) + noise_scale * np.eye(len(y))
        prior_mean = mean * np.ones(len(y))
        return mvn.logpdf(y, prior_mean, cov_y_y)

    return num_cov_params + 2, predict, log_marginal_likelihood

# Define an example covariance function. 
Example #21
Source File: performance.py    From AeroSandbox with MIT License 5 votes vote down vote up
def compute_rotation_matrix_wind_to_geometry(self):
        # Computes the 3x3 rotation matrix required to go from wind axes to geometry axes.
        sinalpha = np.sin(np.radians(self.alpha))
        cosalpha = np.cos(np.radians(self.alpha))
        sinbeta = np.sin(np.radians(self.beta))
        cosbeta = np.cos(np.radians(self.beta))

        # r=-1*np.array([
        #     [cosbeta*cosalpha, -sinbeta, cosbeta*sinalpha],
        #     [sinbeta*cosalpha, cosbeta, sinbeta*sinalpha],
        #     [-sinalpha, 0, cosalpha]
        # ])

        eye = np.eye(3)

        alpharotation = np.array([
            [cosalpha, 0, -sinalpha],
            [0, 1, 0],
            [sinalpha, 0, cosalpha]
        ])

        betarotation = np.array([
            [cosbeta, -sinbeta, 0],
            [sinbeta, cosbeta, 0],
            [0, 0, 1]
        ])

        axesflip = np.array([
            [-1, 0, 0],
            [0, 1, 0, ],
            [0, 0, -1]
        ])  # Since in geometry axes, X is downstream by convention, while in wind axes, X is upstream by convetion. Same with Z being up/down respectively.

        r = axesflip @ alpharotation @ betarotation @ eye  # where "@" is the matrix multiplication operator

        return r 
Example #22
Source File: geometry.py    From AeroSandbox with MIT License 5 votes vote down vote up
def angle_axis_rotation_matrix(angle, axis, axis_already_normalized=False):
    # Gives the rotation matrix from an angle and an axis.
    # An implmentation of https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle
    # Inputs:
    #   * angle: can be one angle or a vector (1d ndarray) of angles. Given in radians.
    #   * axis: a 1d numpy array of length 3 (x,y,z). Represents the angle.
    #   * axis_already_normalized: boolean, skips normalization for speed if you flag this true.
    # Outputs:
    #   * If angle is a scalar, returns a 3x3 rotation matrix.
    #   * If angle is a vector, returns a 3x3xN rotation matrix.
    if not axis_already_normalized:
        axis = axis / np.linalg.norm(axis)

    sintheta = np.sin(angle)
    costheta = np.cos(angle)
    cpm = np.array(
        [[0, -axis[2], axis[1]],
         [axis[2], 0, -axis[0]],
         [-axis[1], axis[0], 0]]
    )  # The cross product matrix of the rotation axis vector
    outer_axis = np.outer(axis, axis)

    angle = np.array(angle)  # make sure angle is a ndarray
    if len(angle.shape) == 0:  # is a scalar
        rot_matrix = costheta * np.eye(3) + sintheta * cpm + (1 - costheta) * outer_axis
        return rot_matrix
    else:  # angle is assumed to be a 1d ndarray
        rot_matrix = costheta * np.expand_dims(np.eye(3), 2) + sintheta * np.expand_dims(cpm, 2) + (
                1 - costheta) * np.expand_dims(outer_axis, 2)
        return rot_matrix 
Example #23
Source File: weights.py    From AeroSandbox with MIT License 5 votes vote down vote up
def get_inertia_tensor_about_point(self, point):
        # Returns the inertia tensor about an arbitrary point.
        # Using https://en.wikipedia.org/wiki/Parallel_axis_theorem#Tensor_generalization

        R = point - self.xyz_cg
        I = self.get_inertia_tensor()
        m = self.mass
        J = I + m * (np.dot(R, R) * np.eye(3) - np.outer(R, R))

        return J 
Example #24
Source File: parametric_GP.py    From ParametricGP with MIT License 4 votes vote down vote up
def likelihood(self, hyp): 
        M = self.M 
        Z = self.Z
        m = self.m
        S = self.S
        X_batch = self.X_batch
        y_batch = self.y_batch
        jitter = self.jitter 
        jitter_cov = self.jitter_cov
        N = X_batch.shape[0]
        
        
        logsigma_n = hyp[-1]
        sigma_n = np.exp(logsigma_n)
        
        # Compute K_u_inv
        K_u = kernel(Z, Z, hyp[:-1])    
        # K_u_inv = np.linalg.solve(K_u + np.eye(M)*jitter_cov, np.eye(M))
        L = np.linalg.cholesky(K_u + np.eye(M)*jitter_cov)    
        K_u_inv = np.linalg.solve(L.T, np.linalg.solve(L,np.eye(M)))
        
        self.K_u_inv = K_u_inv
          
        # Compute mu
        psi = kernel(Z, X_batch, hyp[:-1])    
        K_u_inv_m = np.matmul(K_u_inv,m)   
        MU = np.matmul(psi.T,K_u_inv_m)
        
        # Compute cov
        Alpha = np.matmul(K_u_inv,psi)
        COV = kernel(X_batch, X_batch, hyp[:-1]) - np.matmul(psi.T, np.matmul(K_u_inv,psi)) + \
                np.matmul(Alpha.T, np.matmul(S,Alpha))
        
        COV_inv = np.linalg.solve(COV  + np.eye(N)*sigma_n + np.eye(N)*jitter, np.eye(N))
        # L = np.linalg.cholesky(COV  + np.eye(N)*sigma_n + np.eye(N)*jitter) 
        # COV_inv = np.linalg.solve(np.transpose(L), np.linalg.solve(L,np.eye(N)))
        
        # Compute cov(Z, X)
        cov_ZX = np.matmul(S,Alpha)
        
        # Update m and S
        alpha = np.matmul(COV_inv, cov_ZX.T)
        m = m + np.matmul(cov_ZX, np.matmul(COV_inv, y_batch-MU))    
        S = S - np.matmul(cov_ZX, alpha)
        
        self.m = m
        self.S = S
        
        # Compute NLML                
        K_u_inv_m = np.matmul(K_u_inv,m)
        NLML = 0.5*np.matmul(m.T,K_u_inv_m) + np.sum(np.log(np.diag(L))) + 0.5*np.log(2.*np.pi)*M
        
        return NLML[0,0] 
Example #25
Source File: run_synthetic_example.py    From ParetoMTL with MIT License 4 votes vote down vote up
def get_d_paretomtl(grads,value,weights,i):
    # calculate the gradient direction for Pareto MTL
    nobj, dim = grads.shape
    
    # check active constraints
    normalized_current_weight = weights[i]/np.linalg.norm(weights[i])
    normalized_rest_weights = np.delete(weights, (i), axis=0) / np.linalg.norm(np.delete(weights, (i), axis=0), axis = 1,keepdims = True)
    w = normalized_rest_weights - normalized_current_weight
    
    
    # solve QP 
    gx =  np.dot(w,value/np.linalg.norm(value))
    idx = gx >  0
   
    
    vec =  np.concatenate((grads, np.dot(w[idx],grads)), axis = 0)
    
#    # use cvxopt to solve QP
#    
#    P = np.dot(vec , vec.T)
#    
#    q = np.zeros(nobj + np.sum(idx))
#    
#    G =  - np.eye(nobj + np.sum(idx) )
#    h = np.zeros(nobj + np.sum(idx))
#    
#
#    
#    A = np.ones(nobj + np.sum(idx)).reshape(1,nobj + np.sum(idx))
#    b = np.ones(1)
 
#    cvxopt.solvers.options['show_progress'] = False
#    sol = cvxopt_solve_qp(P, q, G, h, A, b)
  
    # use MinNormSolver to solve QP
    sol, nd = MinNormSolver.find_min_norm_element(vec)
   
    
    # reformulate ParetoMTL as linear scalarization method, return the weights
    weight0 =  sol[0] + np.sum(np.array([sol[j] * w[idx][j - 2,0] for j in np.arange(2,2 + np.sum(idx))]))
    weight1 = sol[1] + np.sum(np.array([sol[j] * w[idx][j - 2,1] for j in np.arange(2,2 + np.sum(idx))]))
    weight = np.stack([weight0,weight1])
   

    return weight 
Example #26
Source File: optimizers.py    From momi2 with GNU General Public License v3.0 4 votes vote down vote up
def _find_minimum(f, start_params, optimizer, bounds=None,
                  callback=None,
                  opt_kwargs={}, **kwargs):
    fixed_params = []
    if bounds is not None:
        bounds = [(None, None) if b is None else b for b in bounds]
        for i, b in enumerate(bounds):
            try:
                if b[0] == b[1] and b[0] is not None:
                    fixed_params += [(i, b[0])]
            except (TypeError, IndexError) as e:
                fixed_params += [(i, b)]

        if any(start_params[i] != b for i, b in fixed_params):
            raise ValueError(
                "start_params does not agree with fixed parameters in bounds")

        if fixed_params:
            fixed_idxs, fixed_offset = list(
                map(np.array, list(zip(*fixed_params))))

            fixed_idxs = np.array([(i in fixed_idxs)
                                   for i in range(len(start_params))])
            proj0 = np.eye(len(fixed_idxs))[:, fixed_idxs]
            proj1 = np.eye(len(fixed_idxs))[:, ~fixed_idxs]

            fixed_offset = np.dot(proj0, fixed_offset)
            get_x = lambda x0: np.dot(proj1, x0) + fixed_offset

            def restricted(fun):
                if fun is None:
                    return None
                new_fun = lambda x0, * \
                    fargs, **fkwargs: fun(get_x(x0), *fargs, **fkwargs)
                return wraps(fun)(new_fun)
            f = restricted(f)
            callback = restricted(callback)

            start_params = np.array(
                [s for (fxd, s) in zip(fixed_idxs, start_params) if not fxd])
            bounds = [b for (fxd, b) in zip(fixed_idxs, bounds) if not fxd]

    opt_kwargs = dict(opt_kwargs)
    assert all([k not in opt_kwargs for k in ['bounds', 'callback']])
    if callback:
        opt_kwargs['callback'] = callback
    if bounds is not None and not all([l is None and h is None for (l, h) in bounds]):
        opt_kwargs['bounds'] = bounds

    ret = _find_minimum_helper(
        f, start_params, optimizer, opt_kwargs, **kwargs)
    if fixed_params:
        ret.x = get_x(ret.x)
    return ret 
Example #27
Source File: dr.py    From POT with MIT License 4 votes vote down vote up
def fda(X, y, p=2, reg=1e-16):
    """Fisher Discriminant Analysis

    Parameters
    ----------
    X : ndarray, shape (n, d)
        Training samples.
    y : ndarray, shape (n,)
        Labels for training samples.
    p : int, optional
        Size of dimensionnality reduction.
    reg : float, optional
        Regularization term >0 (ridge regularization)

    Returns
    -------
    P : ndarray, shape (d, p)
        Optimal transportation matrix for the given parameters
    proj : callable
        projection function including mean centering
    """

    mx = np.mean(X)
    X -= mx.reshape((1, -1))

    # data split between classes
    d = X.shape[1]
    xc = split_classes(X, y)
    nc = len(xc)

    p = min(nc - 1, p)

    Cw = 0
    for x in xc:
        Cw += np.cov(x, rowvar=False)
    Cw /= nc

    mxc = np.zeros((d, nc))

    for i in range(nc):
        mxc[:, i] = np.mean(xc[i])

    mx0 = np.mean(mxc, 1)
    Cb = 0
    for i in range(nc):
        Cb += (mxc[:, i] - mx0).reshape((-1, 1)) * \
            (mxc[:, i] - mx0).reshape((1, -1))

    w, V = linalg.eig(Cb, Cw + reg * np.eye(d))

    idx = np.argsort(w.real)

    Popt = V[:, idx[-p:]]

    def proj(X):
        return (X - mx.reshape((1, -1))).dot(Popt)

    return Popt, proj