Python autograd.numpy.ones() Examples

The following are code examples for showing how to use autograd.numpy.ones(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: ReducedVarianceReparamGradients   Author: andymiller   File: bbvi_mvn_diag.py    MIT License 6 votes vote down vote up
def __init__(self, lnpdf, D, glnpdf=None, lnpdf_is_vectorized=False):
        """
        Implements MCVI --- exposes elbo gradient and sampling methods.
        This class breaks the gradient down into parts

        dg/dz = dlnpdf(z)/dz * dz/dlam - dlnq(z)/dz * dz/dlam - dlnq(z)/dlam

        Parameterizes with mean and log-std! (not variance!)
            lam = [mean, log-std]
        """
        # base class sets up the gradient function organization
        super(DiagMvnBBVI, self).__init__(lnpdf, D, glnpdf, lnpdf_is_vectorized)

        # we note that the second two terms, with probability one, 
        # create the vector [0, 0, 0, ..., 0, 1., 1., ..., 1.]
        self.mask = np.concatenate([np.zeros(D), np.ones(D)])
        self.num_variational_params = 2*D
        self.D = D

    #####################################################################
    # Methods for various types of gradients of the ELBO                #
    #    -- that can be plugged into FilteredOptimization routines      #
    ##################################################################### 
Example 2
Project: momi2   Author: popgenmethods   File: compute_sfs.py    GNU General Public License v3.0 6 votes vote down vote up
def expected_tmrca(demography, sampled_pops=None, sampled_n=None):
    """
    The expected time to most recent common ancestor of the sample.

    Parameters
    ----------
    demography : Demography

    Returns
    -------
    tmrca : float-like

    See Also
    --------
    expected_deme_tmrca : tmrca of subsample within a deme
    expected_sfs_tensor_prod : compute general class of summary statistics
    """
    vecs = [np.ones(n + 1) for n in demography.sampled_n]
    n0 = len(vecs[0]) - 1.0
    vecs[0] = np.arange(n0 + 1) / n0
    return np.squeeze(expected_sfs_tensor_prod(vecs, demography)) 
Example 3
Project: momi2   Author: popgenmethods   File: likelihood.py    GNU General Public License v3.0 6 votes vote down vote up
def _mut_factor_het(sfs, demo, mut_rate, vector, p_missing):
    mut_rate = mut_rate * np.ones(sfs.n_loci)
    E_het = expected_heterozygosity(
        demo,
        restrict_to_pops=np.array(
            sfs.sampled_pops)[sfs.ascertainment_pop])

    p_missing = p_missing * np.ones(len(sfs.ascertainment_pop))
    p_missing = p_missing[sfs.ascertainment_pop]
    lambd = np.einsum("i,j->ij", mut_rate, E_het * (1.0 - p_missing))

    counts = sfs.avg_pairwise_hets[:, sfs.ascertainment_pop]
    ret = -lambd + counts * np.log(lambd) - scipy.special.gammaln(counts + 1)
    ret = ret * sfs.sampled_n[sfs.ascertainment_pop] / float(
        np.sum(sfs.sampled_n[sfs.ascertainment_pop]))
    if not vector:
        ret = np.sum(ret)
    else:
        ret = np.sum(ret, axis=1)
    return ret 
Example 4
Project: eye_hand_calibration   Author: MobileManipulation   File: full_calib.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, grid_size, extrinsics, intrinsics):
        self.extrinsics = extrinsics
        self.intrinsics = intrinsics

        # Build up testing data in a grid
        z = np.linspace(-0.2, 0.2, num=grid_size)
        y = np.linspace(-0.2+0.025, 0.2+0.025, num=grid_size)
        x = np.linspace(0.5, 1.2, num=grid_size)
        g = np.meshgrid(x, y, z)
        cam = np.stack(map(np.ravel, g))
        np.concatenate([cam, np.ones([1, cam.shape[1]])])

        # Compute all truth data in various frames
        self.camera_truth = cam
        self.optical_truth = opt = toCameraFrame(cam, extrinsics)
        self.pixel_truth = cameraProjection(opt, intrinsics) 
Example 5
Project: pilco-learner   Author: cryscan   File: util.py    MIT License 6 votes vote down vote up
def gaussian_sin(m, v, i, e=None):
    d = len(m)
    L = len(i)
    e = np.ones((1, L)) if e is None else np.atleast_2d(e)

    mi = np.atleast_2d(m[i])
    vi = v[np.ix_(i, i)]
    vii = np.atleast_2d(np.diag(vi))
    M = e * exp(-vii / 2) * sin(mi)
    M = M.flatten()

    lq = -(vii.T + vii) / 2
    q = exp(lq)
    V = ((exp(lq + vi) - q) * cos(mi.T - mi) -
         (exp(lq - vi) - q) * cos(mi.T + mi))
    V = np.dot(e.T, e) * V / 2

    C = np.diag((e * exp(-vii / 2) * cos(mi)).flatten())
    C = fill_mat(C, np.zeros((d, L)), i, None)

    return M, V, C 
Example 6
Project: ceviche   Author: fancompute   File: utils.py    MIT License 6 votes vote down vote up
def make_IO_matrices(indices, N):
    """ Makes matrices that relate the sparse matrix entries to their locations in the matrix
            The kth column of I is a 'one hot' vector specifing the k-th entries row index into A
            The kth column of J is a 'one hot' vector specifing the k-th entries columnn index into A
            O = J^T is for notational convenience.
            Armed with a vector of M entries 'a', we can construct the sparse matrix 'A' as:
                A = I @ diag(a) @ O
            where 'diag(a)' is a (MxM) matrix with vector 'a' along its diagonal.
            In index notation:
                A_ij = I_ik * a_k * O_kj
            In an opposite way, given sparse matrix 'A' we can strip out the entries `a` using the IO matrices as follows:
                a = diag(I^T @ A @ O^T)
            In index notation:
                a_k = I_ik * A_ij * O_kj
    """
    M = indices.shape[1]                                 # number of indices in the matrix
    entries_1 = npa.ones(M)                              # M entries of all 1's
    ik, jk = indices                                     # separate i and j components of the indices
    indices_I = npa.vstack((ik, npa.arange(M)))          # indices into the I matrix
    indices_J = npa.vstack((jk, npa.arange(M)))          # indices into the J matrix
    I = make_sparse(entries_1, indices_I, shape=(N, M))  # construct the I matrix
    J = make_sparse(entries_1, indices_J, shape=(N, M))  # construct the J matrix
    O = J.T                                              # make O = J^T matrix for consistency with my notes.
    return I, O 
Example 7
Project: ceviche   Author: fancompute   File: fdfd.py    MIT License 6 votes vote down vote up
def _make_A(self, eps_vec):

        eps_vec_xx, eps_vec_yy = self._grid_average_2d(eps_vec)
        eps_vec_xx_inv = 1 / (eps_vec_xx + 1e-5)  # the 1e-5 is for numerical stability
        eps_vec_yy_inv = 1 / (eps_vec_yy + 1e-5)  # autograd throws 'divide by zero' errors.

        indices_diag = npa.vstack((npa.arange(self.N), npa.arange(self.N)))

        entries_DxEpsy,   indices_DxEpsy   = spsp_mult(self.entries_Dxb, self.indices_Dxb, eps_vec_yy_inv, indices_diag, self.N)
        entires_DxEpsyDx, indices_DxEpsyDx = spsp_mult(entries_DxEpsy, indices_DxEpsy, self.entries_Dxf, self.indices_Dxf, self.N)

        entries_DyEpsx,   indices_DyEpsx   = spsp_mult(self.entries_Dyb, self.indices_Dyb, eps_vec_xx_inv, indices_diag, self.N)
        entires_DyEpsxDy, indices_DyEpsxDy = spsp_mult(entries_DyEpsx, indices_DyEpsx, self.entries_Dyf, self.indices_Dyf, self.N)

        entries_d = 1 / EPSILON_0 * npa.hstack((entires_DxEpsyDx, entires_DyEpsxDy))
        indices_d = npa.hstack((indices_DxEpsyDx, indices_DyEpsxDy))

        entries_diag = MU_0 * self.omega**2 * npa.ones(self.N)

        entries_a = npa.hstack((entries_d, entries_diag))
        indices_a = npa.hstack((indices_d, indices_diag))

        return entries_a, indices_a 
Example 8
Project: autoptim   Author: pierreablin   File: test_minimize.py    MIT License 6 votes vote down vote up
def test_preconditioning():
    def f(x, y, z, a, b):
        return np.sum(x ** 2) + np.sum((y - 3) ** 2) + np.sum((z + a) ** 4)

    a = 2
    b = 5
    shapes = [(2, 3), (2, 2), (3,)]
    optim_vars_init = [np.ones(shape) for shape in shapes]

    def precon_fwd(x, y, z, a, b):
        return 3 * x, y / 2, z * 4

    def precon_bwd(x, y, z, a, b):
        return x / 3, 2 * y, z / 4

    optim_vars, res = minimize(f, optim_vars_init, args=(a, b),
                               precon_fwd=precon_fwd, precon_bwd=precon_bwd)
    assert res['success']
    assert [var.shape for var in optim_vars] == shapes
    for var, target in zip(optim_vars, [0, 3, -a]):
        assert_allclose(var, target, atol=1e-1) 
Example 9
Project: kernel-gof   Author: wittawatj   File: data.py    MIT License 6 votes vote down vote up
def __init__(self, means, variances, pmix=None):
        """
        means: a k x d 2d array specifying the means.
        variances: a one-dimensional length-k array of variances
        pmix: a one-dimensional length-k array of mixture weights. Sum to one.
        """
        k, d = means.shape
        if k != len(variances):
            raise ValueError('Number of components in means and variances do not match.')

        if pmix is None:
            pmix = old_div(np.ones(k),float(k))

        if np.abs(np.sum(pmix) - 1) > 1e-8:
            raise ValueError('Mixture weights do not sum to 1.')

        self.pmix = pmix
        self.means = means
        self.variances = variances 
Example 10
Project: kernel-gof   Author: wittawatj   File: data.py    MIT License 6 votes vote down vote up
def __init__(self, means, variances, pmix=None):
        """
        means: a k x d 2d array specifying the means.
        variances: a k x d x d numpy array containing k covariance matrices,
            one for each component.
        pmix: a one-dimensional length-k array of mixture weights. Sum to one.
        """
        k, d = means.shape
        if k != variances.shape[0]:
            raise ValueError('Number of components in means and variances do not match.')

        if pmix is None:
            pmix = old_div(np.ones(k),float(k))

        if np.abs(np.sum(pmix) - 1) > 1e-8:
            raise ValueError('Mixture weights do not sum to 1.')

        self.pmix = pmix
        self.means = means
        self.variances = variances 
Example 11
Project: kernel-gof   Author: wittawatj   File: density.py    MIT License 6 votes vote down vote up
def __init__(self, means, variances, pmix=None):
        """
        means: a k x d 2d array specifying the means.
        variances: a one-dimensional length-k array of variances
        pmix: a one-dimensional length-k array of mixture weights. Sum to one.
        """
        k, d = means.shape
        if k != len(variances):
            raise ValueError('Number of components in means and variances do not match.')

        if pmix is None:
            pmix = old_div(np.ones(k),float(k))

        if np.abs(np.sum(pmix) - 1) > 1e-8:
            raise ValueError('Mixture weights do not sum to 1.')

        self.pmix = pmix
        self.means = means
        self.variances = variances 
Example 12
Project: kernel-gof   Author: wittawatj   File: density.py    MIT License 6 votes vote down vote up
def __init__(self, means, variances, pmix=None):
        """
        means: a k x d 2d array specifying the means.
        variances: a k x d x d numpy array containing a stack of k covariance
            matrices, one for each mixture component.
        pmix: a one-dimensional length-k array of mixture weights. Sum to one.
        """
        k, d = means.shape
        if k != variances.shape[0]:
            raise ValueError('Number of components in means and variances do not match.')

        if pmix is None:
            pmix = old_div(np.ones(k),float(k))

        if np.abs(np.sum(pmix) - 1) > 1e-8:
            raise ValueError('Mixture weights do not sum to 1.')

        self.pmix = pmix
        self.means = means
        self.variances = variances 
Example 13
Project: bosonic   Author: steinbrecher   File: qonn.py    MIT License 6 votes vote down vote up
def var_nonlin_diag_lossy(thetas, n, m):
    assert len(thetas) == m
    N = lossy_basis_size(n, m)
    D = np.ones((N, 1), dtype=complex)
    count = 0
    nn = n
    while nn > 0:
        NN = basis_size(nn, m)
        basis = fock_basis_array(nn, m)
        coeffs = basis * (basis - 1) / 2.0
        DD = np.sum(
            np.multiply(np.reshape(thetas, (m, 1)), coeffs.T).T, axis=1)
        DD = np.exp(1j * DD)
        D[count:count+NN, 0] = DD
        nn -= 1
        count += NN
    return D 
Example 14
Project: prediction-constrained-topic-models   Author: dtak   File: util_differentiable_transform__2D_rows_sum_to_one.py    MIT License 5 votes vote down vote up
def to_diffable_arr(topics_KV, min_eps=MIN_EPS, do_force_safe=False):
    ''' Transform normalized topics to unconstrained space.

    Args
    ----
    topics_KV : 2D array, size K x V
        minimum value of any entry must be min_eps
        each row should sum to 1.0

    Returns
    -------
    log_topics_vec : 2D array, size K x (V-1)
        unconstrained real values

    Examples
    --------
    >>> topics_KV = np.eye(3) + np.ones((3,3))
    >>> topics_KV /= topics_KV.sum(axis=1)[:,np.newaxis]
    >>> log_topics_vec = to_diffable_arr(topics_KV)
    >>> out_KV = to_common_arr(log_topics_vec)
    >>> np.allclose(out_KV, topics_KV)
    True
    '''
    if do_force_safe:
        topics_KV = to_safe_common_arr(topics_KV, min_eps)
    K, V = topics_KV.shape
    log_topics_KV = np.log(topics_KV)
    log_topics_KVm1 = log_topics_KV[:, :-1]
    log_topics_KVm1 = log_topics_KVm1 - log_topics_KV[:, -1][:,np.newaxis]
    return log_topics_KVm1 + np.log1p(-V * min_eps) 
Example 15
Project: Efficient_Augmentation   Author: mkuchnik   File: LinearSVM.py    MIT License 5 votes vote down vote up
def _X_b(X):
        """bias trick pad X"""
        X_b = np.concatenate((X,
                              np.ones((len(X), 1))),
                             axis=1)
        return X_b 
Example 16
Project: Efficient_Augmentation   Author: mkuchnik   File: LinearSVM.py    MIT License 5 votes vote down vote up
def retrain_LOO_loss(self, include_reg=False):
        """
        Non-sklearn function.
        Get the Leave One Out (LOO) influence of all the points in the training
        set by retraining. This function is slow!
        """
        X = self.X_
        y = self.y_
        LOO_infs = np.zeros(len(X))
        assert len(X) == len(y)
        assert not include_reg
        curr_losses = self.sum_pred_losses(X=X, y=y)
        # TODO can use multiprocessing
        for i in range(len(X)):
            mask = np.ones(len(X), dtype=np.bool)
            mask[i] = False
            X_without_i = X[mask]
            y_without_i = y[mask]
            self.fit(X_without_i, y_without_i)
            curr_losses_i = self.sum_pred_losses(X_without_i, y_without_i)
            LOO_inf = curr_losses - curr_losses_i
            LOO_infs[i] = LOO_inf

        self.fit(X, y)

        return LOO_infs 
Example 17
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 5 votes vote down vote up
def _X_b(X):
        """bias trick pad X"""
        X_b = np.concatenate((X,
                              np.ones((len(X), 1))),
                             axis=1)
        return X_b 
Example 18
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 5 votes vote down vote up
def retrain_LOO_loss(self, include_reg=False):
        """
        Non-sklearn function.
        Get the Leave One Out (LOO) influence of all the points in the training
        set by retraining. This function is slow!
        """
        X = self.X_
        y = self.y_
        LOO_infs = np.zeros(len(X))
        assert len(X) == len(y)
        assert not include_reg
        curr_losses = self.sum_pred_losses(X=X, y=y)
        # TODO can use multiprocessing
        for i in range(len(X)):
            mask = np.ones(len(X), dtype=np.bool)
            mask[i] = False
            X_without_i = X[mask]
            y_without_i = y[mask]
            self.fit(X_without_i, y_without_i)
            curr_losses_i = self.sum_pred_losses(X_without_i, y_without_i)
            LOO_inf = curr_losses - curr_losses_i
            LOO_infs[i] = LOO_inf

        self.fit(X, y)

        return LOO_infs 
Example 19
Project: lightML   Author: jfzhang95   File: LinearClassification.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        # def loss function
        def calc_loss(W):
            y_pred = logistic_predictions(W, XMat)
            label_probabilities = y_pred * yMat + (1 - y_pred) * (1 - yMat)
            if self.reg is None:
                return -np.sum(np.log(label_probabilities))
            elif self.reg == 'l1':
                return -np.sum(np.log(label_probabilities))+np.sum(self.alpha*(np.abs(W[0:-1])))
            elif self.reg == 'l2':
                return -np.sum(np.log(label_probabilities))+np.sum(self.alpha*W[0:-1]*W[0:-1])
            else:
                print("the reg can only be None, l1 or l2!")
                return ValueError

        verbose = self.verbose
        print_step = self.print_step
        max_iters = self.max_iters

        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        grad_fun = elementwise_grad(calc_loss)

        n_samples, n_features = X.shape
        n_outdim = y.shape[1]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])

        self.W = np.random.randn(n_features+1, n_outdim) * 0.1
        for it in range(max_iters + 1):
            loss = calc_loss(self.W)
            grad_params = grad_fun(self.W)
            # update params
            self.W -= self.lr * grad_params

            if verbose and it % print_step == 0:
                print('iteration %d / %d: loss %f' % (it, max_iters, loss)) 
Example 20
Project: lightML   Author: jfzhang95   File: LinearClassification.py    MIT License 5 votes vote down vote up
def predict(self, X):
        """Predict function"""
        XMat = np.array(X)
        n_samples = XMat.shape[0]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])
        ypred = logistic_predictions(self.W, XMat)
        for i in range(n_samples):
            if ypred[i] > 0.5:
                ypred[i] = 1
            else:
                ypred[i] = 0

        return ypred 
Example 21
Project: lightML   Author: jfzhang95   File: LinearRegression.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        # def loss function
        def calc_linear_loss(W):
            y_pred = np.dot(XMat, W)
            return np.sqrt((np.power(yMat - y_pred, 2))).mean()

        verbose = self.verbose
        print_step = self.print_step
        max_iters = self.max_iters

        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        grad_fun = grad(calc_linear_loss)

        n_samples, n_features = X.shape
        n_outdim = y.shape[1]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])

        self.W = np.random.randn(n_features+1, n_outdim)
        for it in range(max_iters+1):
            loss = calc_linear_loss(self.W)
            grad_params = grad_fun(self.W)

            # update params
            self.W -= self.lr * grad_params

            if verbose and it % print_step == 0:
                print('iteration %d / %d: loss %f' % (it, max_iters, loss)) 
Example 22
Project: lightML   Author: jfzhang95   File: LinearRegression.py    MIT License 5 votes vote down vote up
def predict(self, X):
        """Predict function"""
        XMat = np.array(X)
        n_samples = XMat.shape[0]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])
        ypred = np.dot(XMat, self.W)

        return ypred 
Example 23
Project: lightML   Author: jfzhang95   File: LinearRegression.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        # def loss function
        def calc_linear_loss(W):
            y_pred = np.dot(XMat, W)
            return np.sqrt((np.power(yMat - y_pred, 2))).mean() \
                        + np.sum(self.alpha * W[0:-1] * W[0:-1])

        verbose = self.verbose
        print_step = self.print_step
        max_iters = self.max_iters

        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        grad_fun = elementwise_grad(calc_linear_loss)

        n_samples, n_features = X.shape
        n_outdim = y.shape[1]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])

        self.W = np.random.randn(n_features+1, n_outdim)
        for it in range(max_iters+1):
            loss = calc_linear_loss(self.W)
            grad_params = grad_fun(self.W)

            # update params
            self.W -= self.lr * grad_params

            if verbose and it % print_step == 0:
                print('iteration %d / %d: loss %f' % (it, max_iters, loss)) 
Example 24
Project: lightML   Author: jfzhang95   File: LinearRegression.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        # def loss function
        def calc_linear_loss(W):
            y_pred = np.dot(XMat, W)
            return np.sqrt((np.power(yMat - y_pred, 2))).mean() \
                   + np.sum(self.alpha * np.abs(W[0:-1]))

        verbose = self.verbose
        print_step = self.print_step
        max_iters = self.max_iters

        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        grad_fun = elementwise_grad(calc_linear_loss)

        n_samples, n_features = X.shape
        n_outdim = y.shape[1]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])

        self.W = np.random.randn(n_features + 1, n_outdim)
        for it in range(max_iters+1):
            loss = calc_linear_loss(self.W)
            grad_params = grad_fun(self.W)

            # update params
            self.W -= self.lr * grad_params

            if verbose and it % print_step == 0:
                print('iteration %d / %d: loss %f' % (it, max_iters, loss)) 
Example 25
Project: lightML   Author: jfzhang95   File: LinearRegression.py    MIT License 5 votes vote down vote up
def predict(self, X):
        """Predict function"""
        XMat = np.array(X)
        n_samples = XMat.shape[0]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])
        ypred = np.dot(XMat, self.W)
        return ypred 
Example 26
Project: ReducedVarianceReparamGradients   Author: andymiller   File: misc.py    MIT License 5 votes vote down vote up
def mvn_fisher_info(params):
    """ returns the fisher information matrix (diagonal) for a multivariate
    normal distribution with params = [mu, ln sigma] """
    D = len(params) / 2
    mean, log_std = params[:D], params[D:]
    return np.concatenate([np.exp(-2.*log_std),
                           2*np.ones(D)]) 
Example 27
Project: momi2   Author: popgenmethods   File: compute_sfs.py    GNU General Public License v3.0 5 votes vote down vote up
def expected_deme_tmrca(demography, deme, sampled_pops=None, sampled_n=None):
    """
    The expected time to most recent common ancestor, of the samples within
    a particular deme.

    Parameters
    ----------
    demography : Demography
    deme : the deme

    Returns
    -------
    tmrca : float

    See Also
    --------
    expected_tmrca : the tmrca of the whole sample
    expected_sfs_tensor_prod : compute general class of summary statistics
    """
    deme = list(demography.sampled_pops).index(deme)
    vecs = [np.ones(n + 1) for n in demography.sampled_n]

    n = len(vecs[deme]) - 1
    vecs[deme] = np.arange(n + 1) / (1.0 * n)
    vecs[deme][-1] = 0.0

    return np.squeeze(expected_sfs_tensor_prod(vecs, demography)) 
Example 28
Project: momi2   Author: popgenmethods   File: compute_sfs.py    GNU General Public License v3.0 5 votes vote down vote up
def _process_leaf_likelihood(self, event):
        (pop, idx), = self.demo._parent_pops(event)
        if idx == 0:
            self._rename_pop(pop, (pop, idx))
        else:
            # ghost population
            batch_size = self.likelihood_list[0].liks.shape[0]
            self.likelihood_list.append(LikelihoodTensor(
                np.ones((batch_size, 1)), 0,
                [(pop, idx)]
            )) 
Example 29
Project: momi2   Author: popgenmethods   File: demography.py    GNU General Public License v3.0 5 votes vote down vote up
def simulate_trees(self, **kwargs):
        sampled_t = self.sampled_t
        if sampled_t is None:
            sampled_t = 0.0
        sampled_t = np.array(sampled_t) * np.ones(len(self.sampled_pops))

        pops = {p: i for i, p in enumerate(self.sampled_pops)}

        demographic_events = []
        for e in self._G.graph["events"]:
            e = e.get_msprime_event(self._G.graph["params"], pops)
            if e is not None:
                demographic_events.append(e)

        return msprime.simulate(
            population_configurations=[
                msprime.PopulationConfiguration()
                for _ in range(len(pops))],
            Ne=self.default_N / 4,
            demographic_events=demographic_events,
            samples=[
                msprime.Sample(population=pops[p], time=t)
                for p, t, n in zip(
                        self.sampled_pops, self.sampled_t,
                        self.sampled_n)
                for _ in range(n)],
            **kwargs) 
Example 30
Project: momi2   Author: popgenmethods   File: configurations.py    GNU General Public License v3.0 5 votes vote down vote up
def count_subsets(self, derived_weights_dict, total_counts_dict):
        """
        For each config, count number of subsets with given sample
        size, weighted according to the number of derived alleles.

        Parameters
        total_counts_dict: dict mapping pop to n_pop
        derived_weights_dict: dict mapping pop to list of floats
           with length n_pop+1, giving the weight for each
           derived allele count

        Returns
        numpy.ndarray of weighted counts for each config
        """
        assert (set(derived_weights_dict.keys())
                <= set(total_counts_dict.keys()))

        ret = np.ones(len(self))
        for p, n in total_counts_dict.items():
            i = self.sampled_pops.index(p)
            if p in derived_weights_dict:
                curr = np.zeros(len(self))
                assert len(derived_weights_dict[p]) == n+1
                for d, w in enumerate(derived_weights_dict[p]):
                    if w == 0:
                        continue
                    a = n - d
                    curr += w * (comb(self.value[:, i, 0], a)
                                 * comb(self.value[:, i, 1], d))
                ret *= curr
            else:
                ret *= comb(self.value[:, i, :].sum(axis=1), n)
        return ret 
Example 31
Project: momi2   Author: popgenmethods   File: configurations.py    GNU General Public License v3.0 5 votes vote down vote up
def _vecs_and_idxs(self, folded):
        augmented_configs = self._augmented_configs(folded)
        augmented_idxs = self._augmented_idxs(folded)

        # construct the vecs
        vecs = [np.zeros((len(augmented_configs), n + 1))
                for n in self.sampled_n]

        for i in range(len(vecs)):
            n = self.sampled_n[i]
            derived = np.einsum(
                "i,j->ji", np.ones(len(augmented_configs)), np.arange(n + 1))
            curr = scipy.stats.hypergeom.pmf(
                    k=augmented_configs[:, i, 1],
                    M=n,
                    n=derived,
                    N=augmented_configs[:, i].sum(1)
                )
            assert not np.any(np.isnan(curr))
            vecs[i] = np.transpose(curr)

        # copy augmented_idxs to make it safe
        return vecs, dict(augmented_idxs)

    # def _config_str_iter(self):
    #     for c in self.value:
    #         yield _config2hashable(c) 
Example 32
Project: momi2   Author: popgenmethods   File: math_functions.py    GNU General Public License v3.0 5 votes vote down vote up
def hypergeom_mat(N, n):
    K = np.outer(np.ones(n + 1), np.arange(N + 1))
    k = np.outer(np.arange(n + 1), np.ones(N + 1))
    ret = log_binom(K, k)
    ret = ret + ret[::-1, ::-1]
    ret = ret - log_binom(N, n)
    return np.exp(ret) 
Example 33
Project: momi2   Author: popgenmethods   File: size_history.py    GNU General Public License v3.0 5 votes vote down vote up
def etjj(self, n):
        j = np.arange(2, n + 1)
        jChoose2 = binom(j, 2)

        ret = np.zeros(len(j))
        noCoalProb = np.ones(len(j))
        for pop in self.pieces:
            ret = ret + noCoalProb * pop.etjj(n)
            if pop.scaled_time != float('inf'):
                noCoalProb = noCoalProb * exp(- pop.scaled_time * jChoose2)
            else:
                assert pop is self.pieces[-1]
        return ret 
Example 34
Project: momi2   Author: popgenmethods   File: sfs_stats.py    GNU General Public License v3.0 5 votes vote down vote up
def ordered_prob(self, subsample_dict,
                     fold=False):
        # The ordered probability for the subsample given by
        # subsample_dict.
        #
        # Parameters:
        # subsample_dict: dict of list
        #    dict mapping population to a list of 0s and 1s giving the
        #       ordered subsample within that population.

        if fold:
            rev_subsample = {p: 1 - np.array(s)
                             for p, s in subsample_dict.items()}

            return (self.ordered_prob(subsample_dict)
                    + self.ordered_prob(rev_subsample))

        derived_weights_dict = {}
        for pop, pop_subsample in subsample_dict.items():
            n = self.sampled_n_dict[pop]
            arange = np.arange(n+1)

            cnts = co.Counter(pop_subsample)

            prob = np.ones(n+1)
            for i in range(cnts[0]):
                prob *= (n - arange - i)
            for i in range(cnts[1]):
                prob *= (arange - i)
            for i in range(cnts[0] + cnts[1]):
                prob /= float(n - i)

            derived_weights_dict[pop] = prob

        return self.tensor_prod(derived_weights_dict) / self.denom 
Example 35
Project: momi2   Author: popgenmethods   File: likelihood.py    GNU General Public License v3.0 5 votes vote down vote up
def _get_stochastic_pieces(self, pieces, rgen):
        sfs_pieces = _subsfs_list(self.sfs, pieces, rgen)
        if self.mut_rate is None:
            mut_rate = None
        else:
            mut_rate = self.mut_rate * np.ones(self.sfs.n_loci)
            mut_rate = np.sum(mut_rate) / float(pieces)

        return [SfsLikelihoodSurface(sfs, demo_func=self.demo_func, mut_rate=None,
                                     folded=self.folded, error_matrices=self.error_matrices,
                                     truncate_probs=self.truncate_probs, batch_size=self.batch_size)
                for sfs in sfs_pieces] 
Example 36
Project: momi2   Author: popgenmethods   File: likelihood.py    GNU General Public License v3.0 5 votes vote down vote up
def _mut_factor_total(sfs, demo, mut_rate, vector):
    mut_rate = mut_rate * np.ones(sfs.n_loci)

    if sfs.configs.has_missing_data:
        raise ValueError(
            "Expected total branch length not implemented for missing data; set use_pairwise_diffs=True to scale total mutations by the pairwise differences instead.")
    E_total = expected_total_branch_len(
        demo, sampled_pops=sfs.sampled_pops, sampled_n=sfs.sampled_n, ascertainment_pop=sfs.ascertainment_pop)
    lambd = mut_rate * E_total

    counts = sfs.n_snps(vector=True)
    ret = -lambd + counts * np.log(lambd) - scipy.special.gammaln(counts + 1)
    if not vector:
        ret = np.sum(ret)
    return ret 
Example 37
Project: momi2   Author: popgenmethods   File: test_subsample.py    GNU General Public License v3.0 5 votes vote down vote up
def test_count_subsets():
    demo = simple_admixture_demo()
    #data = momi.simulate_ms(ms_path, demo.demo_hist,
    #                        sampled_pops=demo.pops,
    #                        sampled_n=demo.n,
    #                        num_loci=1000, mut_rate=1.0)
    num_bases = 1000
    mu = 1.0
    num_replicates = 100
    data = demo.simulate_data(
        muts_per_gen=mu/num_bases,
        recoms_per_gen=0,
        length=num_bases,
        num_replicates=num_replicates,
        sampled_n_dict={"b":2,"a":3}).extract_sfs(None)

    subconfig = []
    for n in data.sampled_n:
        sub_n = random.randrange(n+1)
        d = random.randrange(sub_n+1)
        subconfig.append([sub_n-d, d])

    subconfig_probs = np.ones(len(data.configs))
    for i, (a, d) in enumerate(subconfig):
        #subconfig_probs *= scipy.special.comb(
        #    data.configs.value[:, i, :], [a, d]).prod(axis=1)
        #subconfig_probs /= scipy.special.comb(
        #    data.configs.value[:, i, :].sum(axis=1), a+d)
        subconfig_probs *= scipy.stats.hypergeom.pmf(
            d, data.configs.value[:, i, :].sum(axis=1),
            data.configs.value[:, i, 1], a+d)

    assert np.allclose(subconfig_probs,
                       data.configs.subsample_probs(subconfig)) 
Example 38
Project: SyntheticStatistics   Author: BlissChapman   File: data.py    MIT License 5 votes vote down vote up
def __init__(self, pbs, qbs, pmix=None, qmix=None):
        """
        pbs: a 2d numpy array (cx x 2) of lower/upper bounds.
            pbs[i, 0] and pbs[i, 1] specifies the lower and upper bound
            for the ith mixture component of P.
        qbs: a 2d numpy array (cy x 2) of lower/upper bounds.
        pmix: The mixture weight vector for P. A numpy array of length cx.
            Automatically set to uniform weights if unspecified.
        qmix: The mixture weight vector for Q. A numpy array of length cy.
        """
        cx = pbs.shape[0]
        cy = qbs.shape[0]
        if pbs.shape[1] != 2:
            raise ValueError('pbs must have 2 columns')
        if qbs.shape[1] != 2:
            raise ValueError('qbs must have 2 columns')
        if pmix is None:
            pmix = old_div(np.ones(cx),float(cx))
        if qmix is None:
            qmix = old_div(np.ones(cy),float(cy))

        if len(pmix) != cx:
            raise ValueError('Length of pmix does not match #rows of pbs')
        if len(qmix) != cy:
            raise ValueError('Length of qmix does not match #rows of qbs')

        if np.abs(np.sum(pmix) - 1) > 1e-6:
            raise ValueError('mixture weights pmix has to sum to 1')
        if np.abs(np.sum(qmix) - 1) > 1e-6:
            raise ValueError('mixture weights qmix has to sum to 1')

        if not np.all(pbs[:, 1] - pbs[:, 0] > 0):
            raise ValueError('Require upper - lower to be positive. False for p')

        if not np.all(qbs[:, 1] - qbs[:, 0] > 0):
            raise ValueError('Require upper - lower to be positive. False for q')
        self.pbs = pbs
        self.qbs = qbs
        self.pmix = pmix
        self.qmix = qmix 
Example 39
Project: SyntheticStatistics   Author: BlissChapman   File: data.py    MIT License 5 votes vote down vote up
def sample(self, n, seed):
        rstate = np.random.get_state()
        np.random.seed(seed)

        d = self.d
        std_y = np.diag(np.hstack((np.sqrt(2.0), np.ones(d-1) )))
        X = np.random.randn(n, d)
        Y = np.random.randn(n, d).dot(std_y)
        np.random.set_state(rstate)
        return TSTData(X, Y, label='gvd') 
Example 40
Project: bayesian-coresets   Author: trevorcampbell   File: test_gaussian.py    MIT License 5 votes vote down vote up
def weighted_post_KL(th0, Sig0inv, Siginv, x, w, reverse=True):
  muw, Sigw = weighted_post(th0, Sig0inv, Siginv, x, w)
  mup, Sigp = weighted_post(th0, Sig0inv, Siginv, x, np.ones(x.shape[0]))
  if reverse:
    return gaussian_KL(muw, Sigw, mup, np.linalg.inv(Sigp))
  else:
    return gaussian_KL(mup, Sigp, muw, np.linalg.inv(Sigw))

#NB: without constant terms
#E[Log N(x; mu, Sig)] under mu ~ N(muw, Sigw) 
Example 41
Project: private-pgm   Author: ryan112358   File: dual_query.py    Apache License 2.0 5 votes vote down vote up
def max_sum_ve(factors, domain = None, elim = None):
    """ run max-product variable elimination on the factors
    return the most likely assignment as a dictionary where
        keys are attributes
        values are elements of the domain
    """
    # step 0: choose an elimination order
    if domain is None:
        domain = reduce(Domain.merge, [F.domain for F in factors])

    if elim is None:
        cliques = [F.domain.attrs for F in factors]
        elim = graphical_model.greedy_order(domain, cliques, domain.attrs)

    # step 1: variable elimination
    k = len(factors)
    phi = dict(zip(range(k), factors))
    psi = {}
    for z in elim:
        phi2 = [phi.pop(i) for i in list(phi.keys()) if z in phi[i].domain]
        psi[z] = sum(phi2, Factor.ones(domain.project(z)))
        phi[k] = psi[z].max([z])
        k += 1

    value = phi[k-1]

    # step 2: traceback-MAP
    x = { }
    for z in reversed(elim):
        x[z] = psi[z].condition(x).values.argmax()

    # step 3 convert to a Dataset object
    df = pd.DataFrame(x, index=[0])
    return Dataset(df, domain) 
Example 42
Project: pilco-learner   Author: cryscan   File: util.py    MIT License 5 votes vote down vote up
def gaussian_trig(m, v, i, e=None):
    d = len(m)
    L = len(i)
    e = np.ones((1, L)) if e is None else np.atleast_2d(e)
    ee = np.vstack([e, e]).reshape(1, -1, order='F')

    mi = np.atleast_2d(m[i])
    vi = v[np.ix_(i, i)]
    vii = np.atleast_2d(np.diag(vi))

    M = np.vstack([e * exp(-vii / 2) * sin(mi), e * exp(-vii / 2) * cos(mi)])
    M = M.flatten(order='F')

    lq = -(vii.T + vii) / 2
    q = exp(lq)

    U1 = (exp(lq + vi) - q) * sin(mi.T - mi)
    U2 = (exp(lq - vi) - q) * sin(mi.T + mi)
    U3 = (exp(lq + vi) - q) * cos(mi.T - mi)
    U4 = (exp(lq - vi) - q) * cos(mi.T + mi)

    V = np.vstack(
        [np.hstack([U3 - U4, U1 + U2]),
         np.hstack([(U1 + U2).T, U3 + U4])])
    V = np.vstack([
        np.hstack([V[::2, ::2], V[::2, 1::2]]),
        np.hstack([V[1::2, ::2], V[1::2, 1::2]])
    ])
    V = np.dot(ee.T, ee) * V / 2

    C = np.hstack([np.diag(M[1::2]), -np.diag(M[::2])])
    C = np.hstack([C[:, ::2], C[:, 1::2]])
    C = fill_mat(C, np.zeros((d, 2 * L)), i, None)

    return M, V, C 
Example 43
Project: ParetoMTL   Author: Xi-L   File: run_synthetic_example.py    MIT License 5 votes vote down vote up
def get_d_moomtl(grads):
    """
    calculate the gradient direction for MOO-MTL 
    """
    
    nobj, dim = grads.shape
    
#    # use cvxopt to solve QP
#    P = np.dot(grads , grads.T)
#    
#    q = np.zeros(nobj)
#    
#    G =  - np.eye(nobj)
#    h = np.zeros(nobj)
#    
#    
#    A = np.ones(nobj).reshape(1,2)
#    b = np.ones(1)
#       
#    cvxopt.solvers.options['show_progress'] = False
#    sol = cvxopt_solve_qp(P, q, G, h, A, b)
    
    # use MinNormSolver to solve QP
    sol, nd = MinNormSolver.find_min_norm_element(grads)
    
    return sol 
Example 44
Project: ParetoMTL   Author: Xi-L   File: run_synthetic_example.py    MIT License 5 votes vote down vote up
def get_d_paretomtl_init(grads,value,weights,i):
    # calculate the gradient direction for Pareto MTL initialization
    nobj, dim = grads.shape
    
    # check active constraints
    normalized_current_weight = weights[i]/np.linalg.norm(weights[i])
    normalized_rest_weights = np.delete(weights, (i), axis=0) / np.linalg.norm(np.delete(weights, (i), axis=0), axis = 1,keepdims = True)
    w = normalized_rest_weights - normalized_current_weight
    
    gx =  np.dot(w,value/np.linalg.norm(value))
    idx = gx >  0
    
    if np.sum(idx) <= 0:
        return np.zeros(nobj)
    if np.sum(idx) == 1:
        sol = np.ones(1)
    else:
        vec =  np.dot(w[idx],grads)
        sol, nd = MinNormSolver.find_min_norm_element(vec)

    # calculate the weights
    weight0 =  np.sum(np.array([sol[j] * w[idx][j ,0] for j in np.arange(0, np.sum(idx))]))
    weight1 =  np.sum(np.array([sol[j] * w[idx][j ,1] for j in np.arange(0, np.sum(idx))]))
    weight = np.stack([weight0,weight1])
   

    return weight 
Example 45
Project: autoptim   Author: pierreablin   File: test_minimize.py    MIT License 5 votes vote down vote up
def test_multiple_shapes():
    def f(x, y, z, a):
        return np.sum(x ** 2) + np.sum((y - 3) ** 2) + np.sum((z + a) ** 4)

    a = 2
    shapes = [(2, 3), (2, 2), (3,)]
    optim_vars_init = [np.ones(shape) for shape in shapes]
    optim_vars, res = minimize(f, optim_vars_init, args=(a,))
    assert res['success']
    assert [var.shape for var in optim_vars] == shapes
    for var, target in zip(optim_vars, [0, 3, -a]):
        assert_allclose(var, target, atol=1e-1) 
Example 46
Project: kernel-gof   Author: wittawatj   File: data.py    MIT License 5 votes vote down vote up
def const(self, X):
        return np.ones(len(X))*8 
Example 47
Project: POT   Author: rflamary   File: dr.py    MIT License 5 votes vote down vote up
def sinkhorn(w1, w2, M, reg, k):
    """Sinkhorn algorithm with fixed number of iteration (autograd)
    """
    K = np.exp(-M / reg)
    ui = np.ones((M.shape[0],))
    vi = np.ones((M.shape[1],))
    for i in range(k):
        vi = w2 / (np.dot(K.T, ui))
        ui = w1 / (np.dot(K, vi))
    G = ui.reshape((M.shape[0], 1)) * K * vi.reshape((1, M.shape[1]))
    return G 
Example 48
Project: bosonic   Author: steinbrecher   File: qonn.py    MIT License 5 votes vote down vote up
def build_phi_layer(phis, m, offset):
    d = np.ones((m, 1), dtype=complex)
    for i, j in enumerate(range(offset, m-1, 2)):
        d[j, 0] = np.exp(1j*phis[i])
    return d 
Example 49
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 4 votes vote down vote up
def get_calibration(t_test, mu_test, sigma_test, mu_w, cov_w, mu_shift):

    n_y = numpy.shape(mu_test)[0]

    n_t = numpy.shape(t_test)[1]

    q_hat = numpy.zeros((n_y, n_t))

    s_hat = numpy.zeros((n_y, n_t))

    for i in range(0, n_y):

        ln_s = scipy.stats.norm.logpdf(x=t_test, loc=mu_test[i, :], scale=sigma_test[i, :]).reshape(-1, 1)

        feature_q = numpy.hstack([scipy.stats.norm.logcdf(x=t_test, loc=mu_test[i, :],
                                                          scale=sigma_test[i, :]).reshape(-1, 1),
                                  scipy.stats.norm.logsf(x=t_test, loc=mu_test[i, :],
                                                         scale=sigma_test[i, :]).reshape(-1, 1),
                                  numpy.ones((n_t, 1))])

        w_sample = scipy.stats.multivariate_normal.rvs(size=1024, mean=mu_w[i, :], cov=cov_w[i, :, :])

        w_sample[:, 0] = -numpy.exp(w_sample[:, 0] / mu_shift[0] + mu_shift[1])

        w_sample[:, 1] = numpy.exp(w_sample[:, 1] / mu_shift[2] + mu_shift[3])

        w_sample[:, 2] = w_sample[:, 2] / mu_shift[4] + mu_shift[5]

        raw_prod = numpy.matmul(feature_q, w_sample.transpose())

        MAX = raw_prod.copy()

        MAX[MAX < 0] = 0

        q_hat[i, :] = numpy.mean(numpy.exp(-MAX) / (numpy.exp(-MAX) + numpy.exp(raw_prod - MAX)), axis=1).ravel()

        tmp_de = numpy.where(raw_prod <= 0,
                             2 * numpy.log(1 + numpy.exp(raw_prod)),
                             2 * (raw_prod + numpy.log(1 + 1 / numpy.exp(raw_prod))))

        ln_s_hat = (raw_prod + numpy.log((w_sample[:, 0] + w_sample[:, 1]) * numpy.exp(feature_q[:, 0].reshape(-1, 1)) -
                                         w_sample[:, 0]) - feature_q[:, 0].reshape(-1, 1) -
                    feature_q[:, 1].reshape(-1, 1) - tmp_de) + ln_s

        mc_s_hat = numpy.exp(ln_s_hat)

        mc_s_hat[numpy.isnan(mc_s_hat)] = 0

        mc_s_hat[numpy.isinf(mc_s_hat)] = 0

        s_hat[i, :] = numpy.mean(mc_s_hat, axis=1).ravel()

    return s_hat, q_hat 
Example 50
Project: momi2   Author: popgenmethods   File: compute_sfs.py    GNU General Public License v3.0 4 votes vote down vote up
def expected_total_branch_len(demography, error_matrices=None, ascertainment_pop=None,
                              sampled_pops=None, sampled_n=None):
    """
    The expected total branch length of the sample genealogy.
    Equivalently, the expected number of observed mutations when
    mutation rate=1.

    Parameters
    ----------
    demography : Demography

    Returns
    -------
    total : float-like
         the total expected number of SNPs/branch length

    Other Parameters
    ----------------
    error_matrices : optional, sequence of 2-dimensional numpy.ndarray
         length-D sequence, where D = number of demes in demography.
         error_matrices[i] describes the sampling error in deme i as:
         error_matrices[i][j,k] = P(observe j mutants in deme i | k mutants in deme i)
         If error_matrices is not None, then the returned value is adjusted
         to account for this sampling error, in particular the effect it
         has on the total number of observed mutations.

    See Also
    --------
    expected_sfs : individual SFS entries
    expected_tmrca, expected_deme_tmrca : other interesting statistics
    expected_sfs_tensor_prod : compute general class of summary statistics
    """
    if ascertainment_pop is None:
        ascertainment_pop = [True] * len(demography.sampled_n)
    ascertainment_pop = np.array(ascertainment_pop)

    vecs = [[np.ones(n + 1), [1] + [0] * n, [0] * n + [1]]
            if asc else np.ones((3, n + 1), dtype=float)
            for asc, n in zip(ascertainment_pop, demography.sampled_n)]
    if error_matrices is not None:
        vecs = _apply_error_matrices(vecs, error_matrices)

    ret = expected_sfs_tensor_prod(vecs, demography)
    return ret[0] - ret[1] - ret[2]