Python autograd.numpy.arange() Examples

The following are code examples for showing how to use autograd.numpy.arange(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 6 votes vote down vote up
def prior_error(mu_shift, w, n_u):

    a = -numpy.abs(w[:, numpy.arange(0, n_u)*3] + mu_shift[1])

    b = numpy.abs(w[:, numpy.arange(0, n_u)*3+1] + mu_shift[3])

    c = w[:, numpy.arange(0, n_u)*3+2] + mu_shift[5]

    q = numpy.linspace(1e-8, 1 - 1e-8, 128)

    # q = q.ravel()

    q_hat = numpy.mean(1 / (1 + numpy.exp(numpy.log(q)[None, None, :] * a[:, :, None] +
                                          numpy.log(1 - q)[None, None, :] * b[:, :, None] +
                                          c[:, :, None])), axis=0)

    return numpy.mean((q - q_hat) ** 2) 
Example 2
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 6 votes vote down vote up
def get_link_hessian_g(w, q, ln_q, ln_1_q, ln_s):

    w = w.reshape(-1, 3)

    n = numpy.shape(w)[0]

    # h = numpy.zeros((n * 3, n * 3, n * 3))

    h = numpy.zeros((n, 3, 3, 3))

    for i in range(0, 3):
        for j in range(0, 3):
            for k in range(0, 3):
                tmp_grad = autograd.elementwise_grad(
                    autograd.elementwise_grad(autograd.elementwise_grad(e_link_log_lik, i), j), k)
                # h[numpy.arange(0, n) * 3 + i, numpy.arange(0, n) * 3 + j, numpy.arange(0, n) * 3 + k] = \
                h[:, i, j, k] = \
                    tmp_grad(w[:, 0].reshape(-1, 1), w[:, 1].reshape(-1, 1), w[:, 2].reshape(-1, 1),
                             q, ln_q, ln_1_q, ln_s).ravel()

    return h 
Example 3
Project: momi2   Author: popgenmethods   File: compute_sfs.py    GNU General Public License v3.0 6 votes vote down vote up
def expected_tmrca(demography, sampled_pops=None, sampled_n=None):
    """
    The expected time to most recent common ancestor of the sample.

    Parameters
    ----------
    demography : Demography

    Returns
    -------
    tmrca : float-like

    See Also
    --------
    expected_deme_tmrca : tmrca of subsample within a deme
    expected_sfs_tensor_prod : compute general class of summary statistics
    """
    vecs = [np.ones(n + 1) for n in demography.sampled_n]
    n0 = len(vecs[0]) - 1.0
    vecs[0] = np.arange(n0 + 1) / n0
    return np.squeeze(expected_sfs_tensor_prod(vecs, demography)) 
Example 4
Project: momi2   Author: popgenmethods   File: sfs.py    GNU General Public License v3.0 6 votes vote down vote up
def resample(self):
        """Create a new SFS by resampling blocks with replacement.

        Note the resampled SFS is assumed to have the same length in base pairs \
        as the original SFS, which may be a poor assumption if the blocks are not of equal length.

        :returns: Resampled SFS
        :rtype: :class:`Sfs`
        """
        loci = np.random.choice(
            np.arange(self.n_loci), size=self.n_loci, replace=True)
        mat = self.freqs_matrix[:, loci]
        to_keep = np.asarray(mat.sum(axis=1) > 0).squeeze()
        to_keep = np.arange(len(self.configs))[to_keep]
        mat = mat[to_keep, :]
        configs = _ConfigList_Subset(self.configs, to_keep)

        return self.from_matrix(mat, configs, self.folded, self.length) 
Example 5
Project: momi2   Author: popgenmethods   File: size_history.py    GNU General Public License v3.0 6 votes vote down vote up
def sfs(self, n):
        if n == 0:
            return np.array([0.])
        Et_jj = self.etjj(n)
        #assert np.all(Et_jj[:-1] - Et_jj[1:] >= 0.0) and np.all(Et_jj >= 0.0) and np.all(Et_jj <= self.tau)

        ret = np.sum(Et_jj[:, None] * Wmatrix(n), axis=0)

        before_tmrca = self.tau - np.sum(ret * np.arange(1, n) / n)
        # ignore branch length above untruncated TMRCA
        if self.tau == float('inf'):
            before_tmrca = 0.0

        ret = np.concatenate((np.array([0.0]), ret, np.array([before_tmrca])))
        return ret

    # def transition_prob(self, v, axis=0):
    #     return moran_model.moran_action(self.scaled_time, v, axis=axis) 
Example 6
Project: ISM_supervised_DR   Author: chieh-neu   File: kernel_lib.py    MIT License 6 votes vote down vote up
def Allocation_2_Y(allocation):
	N = np.size(allocation)
	unique_elements = np.unique(allocation)
	num_of_classes = len(unique_elements)
	class_ids = np.arange(num_of_classes)

	i = 0
	Y = np.zeros(num_of_classes)
	for m in allocation:
		class_label = np.where(unique_elements == m)[0]
		a_row = np.zeros(num_of_classes)
		a_row[class_label] = 1
		Y = np.hstack((Y, a_row))

	Y = np.reshape(Y, (N+1,num_of_classes))
	Y = np.delete(Y, 0, 0)

	return Y 
Example 7
Project: vittles   Author: rgiordan   File: test_sensitivity_lib.py    Apache License 2.0 6 votes vote down vote up
def get_test_fun(self, dim1, dim2, eqdim):
        a1 = np.random.random(dim1)
        a2 = np.random.random(dim2)

        # Typically, we will be using the derivative array with a gradient.
        # But, to make sure we are using the dimensions correctly, we will
        # test it with a vector function whose dimension does not match either
        # regressor.
        def g(x1, x2):
            val = np.sin(np.dot(a1, x1) + np.dot(a2, x2))
            return (0.5 + np.arange(eqdim)) * val

        x1 = np.random.random(dim1)
        x2 = np.random.random(dim2)

        return g, x1, x2 
Example 8
Project: networked-popularity   Author: avalanchesiqi   File: predictors.py    MIT License 6 votes vote down vote up
def evaluate(self):
        true = self.true_test_output
        pred = self.pred_test_output
        return smape(true, pred)[0]

    # def plot(self):
    #     fig, ax1 = plt.subplots(nrows=1, ncols=1)
    #     ax1.plot(np.arange(1, len(self.ts_data) + 1), self.ts_data, 'k--', label='true')
    #     train_smape, _ = smape(self.true_train_output, self.pred_train_output)
    #     ax1.plot(np.arange(1 + self.num_input, len(self.ts_data) + 1 - self.num_output), self.pred_train_output, 'b-o', label='train LSTM: {0:.2f}'.format(train_smape))
    #     test_smape, _ = smape(self.true_test_output, self.pred_test_output)
    #     ax1.plot(np.arange(len(self.ts_data) + 1 - self.num_output, len(self.ts_data) + 1), self.pred_test_output, 'r-o', label='test LSTM: {0:.2f}'.format(test_smape))
    #     for i in range(1, 9):
    #         ax1.axvline(x=7 * i + .5, color='g', linestyle='--', lw=1.5, zorder=30)
    #     ax1.set_xlabel('time index')
    #     ax1.set_ylabel('views')
    #     ax1.legend(frameon=False)
    #
    #     # ax2.plot(self.history.history['loss'], label='loss')
    #     # ax2.plot(self.history.history['val_loss'], label='val_loss')
    #     # ax2.set_xlabel('epoch')
    #     # ax2.set_ylabel('loss')
    #     # ax2.legend(frameon=False)
    #
    #     plt.show() 
Example 9
Project: pilco-learner   Author: cryscan   File: util.py    MIT License 6 votes vote down vote up
def fill_mat(m, n, i=None, j=None):
    """
    Fill a matrix m of size [a, b] into a larger one [p, q],
    according to given indices i, j.
    """
    m, n = np.atleast_2d(m), np.atleast_2d(n)
    a, b = m.shape
    p, q = n.shape
    i = np.arange(a) if i is None else np.atleast_1d(i)
    j = np.arange(b) if j is None else np.atleast_1d(j)

    if a > p or b > q:
        raise ValueError("Shape error!")
    if len(i) != a or len(j) != b:
        raise ValueError("Indices not match!")
    if not (max(i) < p and max(j) < q):
        raise ValueError("Indices out of bound!")

    Ti = np.zeros((p, a))
    Tj = np.zeros((b, q))
    for u, v in enumerate(i):
        Ti[v, u] = 1
    for u, v in enumerate(j):
        Tj[u, v] = 1
    return Ti @ m @ Tj + n 
Example 10
Project: pilco-learner   Author: cryscan   File: control.py    MIT License 6 votes vote down vote up
def concat(con, sat, policy, m, s):
    max_u = policy.max_u
    E = len(max_u)
    D = len(m)

    F = D + E
    i, j = np.arange(D), np.arange(D, F)
    M = m
    S = fill_mat(s, np.zeros((F, F)))

    m, s, c = con(policy, m, s)
    M = np.hstack([M, m])
    S = fill_mat(s, S, j, j)
    q = np.matmul(S[np.ix_(i, i)], c)
    S = fill_mat(q, S, i, j)
    S = fill_mat(q.T, S, j, i)

    M, S, R = sat(M, S, j, max_u)
    C = np.hstack([np.eye(D), c]) @ R
    return M, S, C 
Example 11
Project: tree-regularization-public   Author: dtak   File: train.py    MIT License 6 votes vote down vote up
def average_path_length(tree, X):
    """Compute average path length: cost of simulating the average
    example; this is used in the objective function.

    @param tree: DecisionTreeClassifier instance
    @param X: NumPy array (D x N)
              D := number of dimensions
              N := number of examples
    @return path_length: float
                         average path length
    """
    leaf_indices = tree.apply(X)
    leaf_counts = np.bincount(leaf_indices)
    leaf_i = np.arange(tree.tree_.node_count)
    path_length = np.dot(leaf_i, leaf_counts) / float(X.shape[0])
    return path_length 
Example 12
Project: tree-regularization-public   Author: dtak   File: train.py    MIT License 6 votes vote down vote up
def get_ith_minibatch_ixs_fences(b_i, batch_size, fences):
    """Split timeseries data of uneven sequence lengths into batches.
    This is how we handle different sized sequences.
    
    @param b_i: integer
                iteration index
    @param batch_size: integer
                       size of batch
    @param fences: list of integers
                   sequence of cutoff array
    @return idx: integer
    @return batch_slice: slice object
    """
    num_data = len(fences) - 1
    num_minibatches = num_data / batch_size + ((num_data % batch_size) > 0)
    b_i = b_i % num_minibatches
    idx = slice(b_i * batch_size, (b_i+1) * batch_size)
    batch_i = np.arange(num_data)[idx]
    batch_slice = np.concatenate([range(i, j) for i, j in 
                                  zip(fences[batch_i], fences[batch_i+1])])
    return idx, batch_slice 
Example 13
Project: pylqr   Author: navigator8972   File: pylqr_test.py    GNU General Public License v3.0 6 votes vote down vote up
def plot_ilqr_result(self):
        if self.res is not None:
            #draw cost evolution and phase chart
            fig = plt.figure(figsize=(16, 8), dpi=80)
            ax_cost = fig.add_subplot(121)
            n_itrs = len(self.res['J_hist'])
            ax_cost.plot(np.arange(n_itrs), self.res['J_hist'], 'r', linewidth=3.5)
            ax_cost.set_xlabel('Number of Iterations', fontsize=20)
            ax_cost.set_ylabel('Trajectory Cost')

            ax_phase = fig.add_subplot(122)
            theta = self.res['x_array_opt'][:, 0]
            theta_dot = self.res['x_array_opt'][:, 1]
            ax_phase.plot(theta, theta_dot, 'k', linewidth=3.5)
            ax_phase.set_xlabel('theta (rad)', fontsize=20)
            ax_phase.set_ylabel('theta_dot (rad/s)', fontsize=20)
            ax_phase.set_title('Phase Plot', fontsize=20)

            ax_phase.plot([theta[-1]], [theta_dot[-1]], 'b*', markersize=16)

            plt.show()

        return 
Example 14
Project: ceviche   Author: fancompute   File: sources.py    MIT License 6 votes vote down vote up
def compute_f(theta, lambda0, dL, shape):
    """ Compute the 'vacuum' field vector """

    # get plane wave k vector components (in units of grid cells)
    k0 = 2 * npa.pi / lambda0 * dL
    kx =  k0 * npa.sin(theta)
    ky = -k0 * npa.cos(theta)  # negative because downwards

    # array to write into
    f_src = npa.zeros(shape, dtype=npa.complex128)

    # get coordinates
    Nx, Ny = shape
    xpoints = npa.arange(Nx)
    ypoints = npa.arange(Ny)
    xv, yv = npa.meshgrid(xpoints, ypoints, indexing='ij')

    # compute values and insert into array
    x_PW = npa.exp(1j * xpoints * kx)[:, None]
    y_PW = npa.exp(1j * ypoints * ky)[:, None]

    f_src[xv, yv] = npa.outer(x_PW, y_PW)

    return f_src.flatten() 
Example 15
Project: ceviche   Author: fancompute   File: utils.py    MIT License 6 votes vote down vote up
def make_IO_matrices(indices, N):
    """ Makes matrices that relate the sparse matrix entries to their locations in the matrix
            The kth column of I is a 'one hot' vector specifing the k-th entries row index into A
            The kth column of J is a 'one hot' vector specifing the k-th entries columnn index into A
            O = J^T is for notational convenience.
            Armed with a vector of M entries 'a', we can construct the sparse matrix 'A' as:
                A = I @ diag(a) @ O
            where 'diag(a)' is a (MxM) matrix with vector 'a' along its diagonal.
            In index notation:
                A_ij = I_ik * a_k * O_kj
            In an opposite way, given sparse matrix 'A' we can strip out the entries `a` using the IO matrices as follows:
                a = diag(I^T @ A @ O^T)
            In index notation:
                a_k = I_ik * A_ij * O_kj
    """
    M = indices.shape[1]                                 # number of indices in the matrix
    entries_1 = npa.ones(M)                              # M entries of all 1's
    ik, jk = indices                                     # separate i and j components of the indices
    indices_I = npa.vstack((ik, npa.arange(M)))          # indices into the I matrix
    indices_J = npa.vstack((jk, npa.arange(M)))          # indices into the J matrix
    I = make_sparse(entries_1, indices_I, shape=(N, M))  # construct the I matrix
    J = make_sparse(entries_1, indices_J, shape=(N, M))  # construct the J matrix
    O = J.T                                              # make O = J^T matrix for consistency with my notes.
    return I, O 
Example 16
Project: ceviche   Author: fancompute   File: fdfd.py    MIT License 6 votes vote down vote up
def _make_A(self, eps_vec):

        eps_vec_xx, eps_vec_yy = self._grid_average_2d(eps_vec)
        eps_vec_xx_inv = 1 / (eps_vec_xx + 1e-5)  # the 1e-5 is for numerical stability
        eps_vec_yy_inv = 1 / (eps_vec_yy + 1e-5)  # autograd throws 'divide by zero' errors.

        indices_diag = npa.vstack((npa.arange(self.N), npa.arange(self.N)))

        entries_DxEpsy,   indices_DxEpsy   = spsp_mult(self.entries_Dxb, self.indices_Dxb, eps_vec_yy_inv, indices_diag, self.N)
        entires_DxEpsyDx, indices_DxEpsyDx = spsp_mult(entries_DxEpsy, indices_DxEpsy, self.entries_Dxf, self.indices_Dxf, self.N)

        entries_DyEpsx,   indices_DyEpsx   = spsp_mult(self.entries_Dyb, self.indices_Dyb, eps_vec_xx_inv, indices_diag, self.N)
        entires_DyEpsxDy, indices_DyEpsxDy = spsp_mult(entries_DyEpsx, indices_DyEpsx, self.entries_Dyf, self.indices_Dyf, self.N)

        entries_d = 1 / EPSILON_0 * npa.hstack((entires_DxEpsyDx, entires_DyEpsxDy))
        indices_d = npa.hstack((indices_DxEpsyDx, indices_DyEpsxDy))

        entries_diag = MU_0 * self.omega**2 * npa.ones(self.N)

        entries_a = npa.hstack((entries_d, entries_diag))
        indices_a = npa.hstack((indices_d, indices_diag))

        return entries_a, indices_a 
Example 17
Project: paragami   Author: rgiordan   File: psdmatrix_patterns.py    Apache License 2.0 6 votes vote down vote up
def flat_indices(self, folded_bool, free=None):
        # If no indices are specified, save time and return an empty array.
        if not np.any(folded_bool):
            return np.array([], dtype=int)

        free = self._free_with_default(free)
        shape_ok, err_msg = self._validate_folded_shape(folded_bool)
        if not shape_ok:
            raise ValueError(err_msg)
        if not free:
            folded_indices = self.fold(
                np.arange(self.flat_length(False), dtype=int),
                validate_value=False, free=False)
            return folded_indices[folded_bool]
        else:
            # This indicates that each folded value depends on each
            # free value.  I think this is not true, but getting the exact
            # pattern may be complicated and will
            # probably not make much of a difference in practice.
            if np.any(folded_bool):
                return np.arange(self.flat_length(True), dtype=int)
            else:
                return np.array([]) 
Example 18
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 5 votes vote down vote up
def mc_link_lik(w, mu_shift, q, ln_q, ln_1_q, ln_s):

    n = numpy.shape(q)[0]

    w_a = w[:, numpy.arange(0, n)*3]

    w_b = w[:, numpy.arange(0, n)*3+1]

    a = -numpy.exp(w_a / mu_shift[0] + mu_shift[1])

    b = numpy.exp(w_b / mu_shift[2] + mu_shift[3])

    c = w[:, numpy.arange(0, n)*3+2] / mu_shift[4] + mu_shift[5]

    tmp_sum = a * ln_q.ravel() + b * ln_1_q.ravel() + c

    tmp_de = numpy.where(tmp_sum <= 0,
                         2 * numpy.log(1 + numpy.exp(tmp_sum)),
                         2 * (tmp_sum + numpy.log(1 + 1 / (numpy.exp(tmp_sum)))))

    ln_s_hat = (tmp_sum + numpy.log((a + b) * q.ravel() - a) - ln_q.ravel() - ln_1_q.ravel() - tmp_de) + ln_s.ravel()

    mean_exp = numpy.mean(numpy.exp(ln_s_hat), axis=0)

    ln_mean_s_hat = numpy.where(mean_exp > 0, numpy.log(mean_exp), numpy.log(1e-16))

    link_ll = numpy.sum(ln_mean_s_hat)

    return link_ll 
Example 19
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 5 votes vote down vote up
def get_link_h(w, q, ln_q, ln_1_q, ln_s):

    w = w.reshape(-1, 3)

    n = numpy.shape(w)[0]

    h = numpy.zeros((n*3, n*3))

    for i in range(0, 3):
        for j in range(0, 3):
            tmp_grad = autograd.elementwise_grad(autograd.elementwise_grad(e_link_log_lik, i), j)
            h[numpy.arange(0, n)*3 + i, numpy.arange(0, n)*3 + j] = \
                tmp_grad(w[:, 0].reshape(-1, 1), w[:, 1].reshape(-1, 1), w[:, 2].reshape(-1, 1),
                         q, ln_q, ln_1_q, ln_s).ravel()

    h_u = numpy.zeros((n*3, n*3))

    h_v = numpy.zeros((n*3, n*3))

    for i in range(0, n):
        tmp_u, tmp_s, tmp_v = scipy.linalg.svd(a=-h[i*3:(i+1)*3, i*3:(i+1)*3])

        tmp_s = tmp_s ** 0.5

        h_u[i*3:(i+1)*3, i*3:(i+1)*3] = numpy.matmul(tmp_u, numpy.diag(tmp_s))

        h_v[i*3:(i+1)*3, i*3:(i+1)*3] = numpy.matmul(numpy.diag(tmp_s), tmp_v)

    return -h, h_u, h_v 
Example 20
Project: ReducedVarianceReparamGradients   Author: andymiller   File: opt.py    MIT License 5 votes vote down vote up
def plot_gradients(self, dims=[1, 10, 50]):
        import matplotlib.pyplot as plt
        import seaborn as sns; sns.set_style("white")
        noisy_grads    = np.array(self.grad_trace)
        true_grads     = np.array(self.true_grad_trace)
        filt_grads     = np.array([g[0] for g in self.filtered_grad_trace])
        filt_grads_var = np.array([g[1] for g in self.filtered_grad_trace])
        tgrid = np.arange(true_grads.shape[0])
        fig, axarr = plt.subplots(len(dims), 1, figsize=(12, 3*len(dims)))
        for d, ax in zip(dims, axarr.flatten()):
            ax.plot(tgrid, true_grads[:,d], label="true")
            ax.plot(tgrid, filt_grads[:,d], label="filtered")
            ax.fill_between(tgrid, filt_grads[:,d]-2*np.sqrt(filt_grads_var[:,d]),
                                   filt_grads[:,d]+2*np.sqrt(filt_grads_var[:,d]),
                                   alpha = .25)
            ax.scatter(tgrid, noisy_grads[:,d], s=3)
            #yrange = (np.max([filt_grads[:,d], true_grads[:,d]]),
            #          np.min([filt_grads[:,d], true_grads[:,d]]))
            #ax.set_ylim(yrange)
            ax.set_xlim((tgrid[0], tgrid[-1]))
        ax.legend()
        print "Adam average grad deviation:", \
            np.sqrt(np.mean((filt_grads - true_grads)**2))
        print "sample average deviation: ", \
            np.sqrt(np.mean((noisy_grads - true_grads)**2))
        return fig, axarr 
Example 21
Project: differentiable-atomistic-potentials   Author: google   File: neighborlist.py    Apache License 2.0 5 votes vote down vote up
def get_neighbors(i, distances, offsets, oneway=False):
  """Get the indices and distances of neighbors to atom i.

  Parameters
  ----------

  i: int, index of the atom to get neighbors for.
  distances: the distances returned from `get_distances`.

  Returns
  -------
  indices: a list of indices for the neighbors corresponding to the index of the
  original atom list.
  offsets: a list of unit cell offsets to generate the position of the neighbor.
  """

  di = distances[i]

  within_cutoff = di > 0.0

  indices = np.arange(len(distances))

  inds = indices[np.where(within_cutoff)[0]]
  offs = offsets[np.where(within_cutoff)[1]]

  return inds, np.int_(offs) 
Example 22
Project: momi2   Author: popgenmethods   File: compute_sfs.py    GNU General Public License v3.0 5 votes vote down vote up
def expected_deme_tmrca(demography, deme, sampled_pops=None, sampled_n=None):
    """
    The expected time to most recent common ancestor, of the samples within
    a particular deme.

    Parameters
    ----------
    demography : Demography
    deme : the deme

    Returns
    -------
    tmrca : float

    See Also
    --------
    expected_tmrca : the tmrca of the whole sample
    expected_sfs_tensor_prod : compute general class of summary statistics
    """
    deme = list(demography.sampled_pops).index(deme)
    vecs = [np.ones(n + 1) for n in demography.sampled_n]

    n = len(vecs[deme]) - 1
    vecs[deme] = np.arange(n + 1) / (1.0 * n)
    vecs[deme][-1] = 0.0

    return np.squeeze(expected_sfs_tensor_prod(vecs, demography)) 
Example 23
Project: momi2   Author: popgenmethods   File: demography.py    GNU General Public License v3.0 5 votes vote down vote up
def _admixture_prob_helper(self, admixture_node):
        '''
        Array with dim [n_admixture_node+1, n_parent1_node+1, n_parent2_node+1],
        giving probability of derived counts in child, given derived counts in parents
        '''
        n_node = self._n_at_node(admixture_node)

        # admixture node must have two parents
        edge1, edge2 = sorted(self._G.in_edges(
            [admixture_node], data=True), key=lambda x: str(x[:2]))
        parent1, parent2 = [e[0] for e in (edge1, edge2)]
        prob1, prob2 = [e[2]['prob'] for e in (edge1, edge2)]
        assert prob1 + prob2 == 1.0

        #n_from_1 = np.arange(n_node + 1)
        #n_from_2 = n_node - n_from_1
        #binom_coeffs = (prob1**n_from_1) * (prob2**n_from_2) * \
        #    scipy.special.comb(n_node, n_from_1)
        #ret = par_einsum(_der_in_admixture_node(n_node), list(range(4)),
        #                 binom_coeffs, [0],
        #                 [1, 2, 3])
        ret = np.transpose(admixture_operator(n_node, prob1))
        assert ret.shape == tuple([n_node + 1] * 3)

        assert [admixture_node, parent1,
                parent2] == self._admixture_prob_idxs(admixture_node)
        return ret 
Example 24
Project: momi2   Author: popgenmethods   File: configurations.py    GNU General Public License v3.0 5 votes vote down vote up
def _vecs_and_idxs(self, folded):
        augmented_configs = self._augmented_configs(folded)
        augmented_idxs = self._augmented_idxs(folded)

        # construct the vecs
        vecs = [np.zeros((len(augmented_configs), n + 1))
                for n in self.sampled_n]

        for i in range(len(vecs)):
            n = self.sampled_n[i]
            derived = np.einsum(
                "i,j->ji", np.ones(len(augmented_configs)), np.arange(n + 1))
            curr = scipy.stats.hypergeom.pmf(
                    k=augmented_configs[:, i, 1],
                    M=n,
                    n=derived,
                    N=augmented_configs[:, i].sum(1)
                )
            assert not np.any(np.isnan(curr))
            vecs[i] = np.transpose(curr)

        # copy augmented_idxs to make it safe
        return vecs, dict(augmented_idxs)

    # def _config_str_iter(self):
    #     for c in self.value:
    #         yield _config2hashable(c) 
Example 25
Project: momi2   Author: popgenmethods   File: math_functions.py    GNU General Public License v3.0 5 votes vote down vote up
def binom_coeffs(n):
    return scipy.special.comb(n, np.arange(n + 1)) 
Example 26
Project: momi2   Author: popgenmethods   File: math_functions.py    GNU General Public License v3.0 5 votes vote down vote up
def hypergeom_mat(N, n):
    K = np.outer(np.ones(n + 1), np.arange(N + 1))
    k = np.outer(np.arange(n + 1), np.ones(N + 1))
    ret = log_binom(K, k)
    ret = ret + ret[::-1, ::-1]
    ret = ret - log_binom(N, n)
    return np.exp(ret) 
Example 27
Project: momi2   Author: popgenmethods   File: size_history.py    GNU General Public License v3.0 5 votes vote down vote up
def etjj(self, n):
        j = np.arange(2, n + 1)
        jChoose2 = binom(j, 2)

        pow0, pow1 = self.N_bottom / jChoose2 / 2.0, self.total_growth
        ret = -transformed_expi(pow0 * self.growth_rate / exp(pow1))
        ret = ret * exp(-expm1d(pow1) * self.tau / pow0 - pow1)
        ret = ret + transformed_expi(pow0 * self.growth_rate)
        ret = ret * pow0

        return ret 
Example 28
Project: momi2   Author: popgenmethods   File: size_history.py    GNU General Public License v3.0 5 votes vote down vote up
def etjj(self, n):
        j = np.arange(2, n + 1)
        jChoose2 = binom(j, 2)

        ret = np.zeros(len(j))
        noCoalProb = np.ones(len(j))
        for pop in self.pieces:
            ret = ret + noCoalProb * pop.etjj(n)
            if pop.scaled_time != float('inf'):
                noCoalProb = noCoalProb * exp(- pop.scaled_time * jChoose2)
            else:
                assert pop is self.pieces[-1]
        return ret 
Example 29
Project: momi2   Author: popgenmethods   File: sfs_stats.py    GNU General Public License v3.0 5 votes vote down vote up
def ordered_prob(self, subsample_dict,
                     fold=False):
        # The ordered probability for the subsample given by
        # subsample_dict.
        #
        # Parameters:
        # subsample_dict: dict of list
        #    dict mapping population to a list of 0s and 1s giving the
        #       ordered subsample within that population.

        if fold:
            rev_subsample = {p: 1 - np.array(s)
                             for p, s in subsample_dict.items()}

            return (self.ordered_prob(subsample_dict)
                    + self.ordered_prob(rev_subsample))

        derived_weights_dict = {}
        for pop, pop_subsample in subsample_dict.items():
            n = self.sampled_n_dict[pop]
            arange = np.arange(n+1)

            cnts = co.Counter(pop_subsample)

            prob = np.ones(n+1)
            for i in range(cnts[0]):
                prob *= (n - arange - i)
            for i in range(cnts[1]):
                prob *= (arange - i)
            for i in range(cnts[0] + cnts[1]):
                prob /= float(n - i)

            derived_weights_dict[pop] = prob

        return self.tensor_prod(derived_weights_dict) / self.denom 
Example 30
Project: momi2   Author: popgenmethods   File: moran_model.py    GNU General Public License v3.0 5 votes vote down vote up
def rate_matrix(n, sparse_format="csr"):
    i = np.arange(n + 1)
    diag = i * (n - i) / 2.
    diags = [diag[:-1], -2 * diag, diag[1:]]
    M = scipy.sparse.diags(
        diags, [1, 0, -1], (n + 1, n + 1), format=sparse_format)
    return M 
Example 31
Project: momi2   Author: popgenmethods   File: likelihood.py    GNU General Public License v3.0 5 votes vote down vote up
def _raw_log_lik(cache, G, data, truncate_probs, folded, error_matrices, vector=False):
    def wrapped_fun(cache):
        demo = Demography(G, cache=cache)
        return _composite_log_likelihood(data, demo, truncate_probs=truncate_probs, folded=folded, error_matrices=error_matrices, vector=vector)
    if vector:
        return ag.checkpoint(wrapped_fun)(cache)
    else:
        ## avoids second forward pass, and has proper
        ## checkpointing for hessian,
        ## but doesn't work for vectorized output
        return rearrange_dict_grad(wrapped_fun)(cache)


#def _build_sfs_batches(sfs, batch_size):
#    counts = sfs._total_freqs
#    sfs_len = len(counts)
#
#    if sfs_len <= batch_size:
#        return [sfs]
#
#    batch_size = int(batch_size)
#    slices = []
#    prev_idx, next_idx = 0, batch_size
#    while prev_idx < sfs_len:
#        slices.append(slice(prev_idx, next_idx))
#        prev_idx = next_idx
#        next_idx += batch_size
#
#    assert len(slices) == int(np.ceil(sfs_len / float(batch_size)))
#
#    idxs = np.arange(sfs_len, dtype=int)
#    idx_list = [idxs[s] for s in slices]
#    return [_sub_sfs(sfs.configs, counts[idx], subidxs=idx) for idx in idx_list] 
Example 32
Project: variational-smc   Author: blei-lab   File: variational_smc.py    MIT License 5 votes vote down vote up
def resampling(w, rs):
    """
    Stratified resampling with "nograd_primitive" to ensure autograd 
    takes no derivatives through it.
    """
    N = w.shape[0]
    bins = np.cumsum(w)
    ind = np.arange(N)
    u = (ind  + rs.rand(N))/N
    
    return np.digitize(u, bins) 
Example 33
Project: vittles   Author: rgiordan   File: sensitivity_lib.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, eps_order, eta_orders, prefactor):
        """
        Parameters
        -------------
        eps_order:
            The total number of epsilon partial derivatives of g.
        eta_orders:
            A vector of length order - 1.  Entry i contains the number
            of terms :math:`d\\eta^{i + 1} / d\\epsilon^{i + 1}`.
        prefactor:
            The constant multiple in front of this term.
        """
        # Base properties.
        self.eps_order = eps_order
        self.eta_orders = eta_orders
        self.prefactor = prefactor

        # Derived quantities.

        # The total number of eta partials.
        self.total_eta_order = np.sum(self.eta_orders)

        # The order is the total number of epsilon derivatives.
        self._order = int(
            self.eps_order + \
            np.sum(self.eta_orders * np.arange(1, len(self.eta_orders) + 1)))

        # Sanity checks.
        # The rules of differentiation require that these assertions be true
        # -- that is, if terms are generated using the differentiate()
        # method from other well-defined terms, these assertions should always
        # be sastisfied.
        assert isinstance(self.eps_order, int)
        assert len(self.eta_orders) == self._order
        assert self.eps_order >= 0 # Redundant
        for eta_order in self.eta_orders:
            assert eta_order >= 0
            assert isinstance(eta_order, int) 
Example 34
Project: vittles   Author: rgiordan   File: sensitivity_lib.py    Apache License 2.0 5 votes vote down vote up
def deriv_arrays(self, order1, order2):
        if self._swapped:
            orig_array = self._rmda.deriv_arrays(order2, order1)
            # Derivatives with respect to the second variable come first
            # (after the axis of the base function), and need to be at the end.
            axes2 = np.arange(order2) + 1
            return np.moveaxis(orig_array, axes2, -1 * axes2)
        else:
            return self._rmda.deriv_arrays(order1, order2) 
Example 35
Project: vittles   Author: rgiordan   File: test_sensitivity_lib.py    Apache License 2.0 5 votes vote down vote up
def _test_max_order(self, eta_order, eps_order, test_order):
        # TODO: test this with reverse mode and swapping.

        # Partial derivative of the gradient higher than eta_order and
        # eps_order are zero.
        def objective(eta, eps):
            eta_sum = np.sum(eta)
            eps_sum = np.sum(eps)
            return (eta_sum ** (eta_order + 1)) * (eps_sum ** eps_order)

        # These need to be nonzero for the test to be valid.
        # Note that this test doesn't require an actual optimum,
        # nor does it require the real Hessian.
        eta0 = 0.01 * np.arange(2)
        eps0 = 0.02 * np.arange(3)
        eps1 = eps0 + 1

        # We don't actually need the real Hessian for this test.
        hess0 = np.diag(np.array([2.1, 4.5]))

        taylor_expansion_truth = \
            ParametricSensitivityTaylorExpansion.optimization_objective(
                objective_function=objective,
                input_val0=eta0,
                hyper_val0=eps0,
                hess0=hess0,
                order=test_order)

        taylor_expansion_test = \
            ParametricSensitivityTaylorExpansion.optimization_objective(
                objective_function=objective,
                input_val0=eta0,
                hyper_val0=eps0,
                hess0=hess0,
                max_input_order=eta_order,
                max_hyper_order=eps_order,
                order=test_order)

        assert_array_almost_equal(
            taylor_expansion_truth.evaluate_taylor_series(eps1),
            taylor_expansion_test.evaluate_taylor_series(eps1)) 
Example 36
Project: pilco-learner   Author: cryscan   File: cart_pole.py    MIT License 5 votes vote down vote up
def loss_cp(self, m, s):
    D0 = np.size(s, 1)
    D1 = D0 + 2 * len(self.angle)
    M = m
    S = s

    ell = self.p
    Q = np.dot(np.vstack([1, ell]), np.array([[1, ell]]))
    Q = fill_mat(Q, np.zeros((D1, D1)), [0, D0], [0, D0])
    Q = fill_mat(ell**2, Q, [D0 + 1], [D0 + 1])

    target = gaussian_trig(self.target, 0 * s, self.angle)[0]
    target = np.hstack([self.target, target])
    i = np.arange(D0)
    m, s, c = gaussian_trig(M, S, self.angle)
    q = np.dot(S[np.ix_(i, i)], c)
    M = np.hstack([M, m])
    S = np.vstack([np.hstack([S, q]), np.hstack([q.T, s])])

    w = self.width if hasattr(self, "width") else [1]
    L = np.array([0])
    S2 = np.array(0)
    for i in range(len(w)):
        self.z = target
        self.W = Q / w[i]**2
        r, s2, c = self.loss_sat(M, S)
        L = L + r
        S2 = S2 + s2

    return L / len(w) 
Example 37
Project: pilco-learner   Author: cryscan   File: cart_pole.py    MIT License 5 votes vote down vote up
def draw_rollout(latent):
    x0 = latent[:, 0]
    y0 = np.zeros_like(x0)
    x1 = x0 + 0.6 * sin(latent[:, 3])
    y1 = -0.6 * cos(latent[:, 3])

    fig = plt.figure()
    ax = fig.add_subplot(111, autoscale_on=False, xlim=(-2, 2), ylim=(-2, 2))
    ax.set_aspect("equal")
    ax.grid()

    line, = ax.plot([], [], 'o-', lw=2)
    time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes)

    def animate(i):
        linex = [x0[i], x1[i]]
        liney = [y0[i], y1[i]]
        line.set_data(linex, liney)
        trail = math.floor(i / (H + 1))
        time_text.set_text("trail %d, time = %.1fs" % (trail, i * dt))
        return line, time_text

    interval = math.ceil(T / dt)
    ani = animation.FuncAnimation(
        fig, animate, np.arange(len(latent)), interval=interval, blit=True)
    ani.save('cart_pole.mp4', fps=20)
    plt.show() 
Example 38
Project: pilco-learner   Author: cryscan   File: base.py    MIT License 5 votes vote down vote up
def propagate(m, s, plant, dynmodel, policy):
    angi = plant.angi
    poli = plant.poli
    dyni = plant.dyni
    difi = plant.difi

    D0 = len(m)
    D1 = D0 + 2 * len(angi)
    D2 = D1 + len(policy.max_u)
    M = np.array(m)
    S = s

    i, j = np.arange(D0), np.arange(D0, D1)
    m, s, c = gaussian_trig(M[i], S[np.ix_(i, i)], angi)
    q = np.matmul(S[np.ix_(i, i)], c)
    M = np.hstack([M, m])
    S = np.vstack([np.hstack([S, q]), np.hstack([q.T, s])])

    i, j = poli, np.arange(D1)
    m, s, c = policy.fcn(M[i], S[np.ix_(i, i)])
    q = np.matmul(S[np.ix_(j, i)], c)
    M = np.hstack([M, m])
    S = np.vstack([np.hstack([S, q]), np.hstack([q.T, s])])

    i, j = np.hstack([dyni, np.arange(D1, D2)]), np.arange(D2)
    m, s, c = dynmodel.fcn(M[i], S[np.ix_(i, i)])
    q = np.matmul(S[np.ix_(j, i)], c)
    M = np.hstack([M, m])
    S = np.vstack([np.hstack([S, q]), np.hstack([q.T, s])])

    P = np.hstack([np.zeros((D0, D2)), np.eye(D0)])
    P = fill_mat(np.eye(len(difi)), P, difi, difi)
    M_next = np.matmul(P, M[:, newaxis]).flatten()
    S_next = P @ S @ P.T
    S_next = (S_next + S_next.T) / 2
    return M_next, S_next 
Example 39
Project: ParetoMTL   Author: Xi-L   File: run_synthetic_example.py    MIT License 5 votes vote down vote up
def get_d_paretomtl_init(grads,value,weights,i):
    # calculate the gradient direction for Pareto MTL initialization
    nobj, dim = grads.shape
    
    # check active constraints
    normalized_current_weight = weights[i]/np.linalg.norm(weights[i])
    normalized_rest_weights = np.delete(weights, (i), axis=0) / np.linalg.norm(np.delete(weights, (i), axis=0), axis = 1,keepdims = True)
    w = normalized_rest_weights - normalized_current_weight
    
    gx =  np.dot(w,value/np.linalg.norm(value))
    idx = gx >  0
    
    if np.sum(idx) <= 0:
        return np.zeros(nobj)
    if np.sum(idx) == 1:
        sol = np.ones(1)
    else:
        vec =  np.dot(w[idx],grads)
        sol, nd = MinNormSolver.find_min_norm_element(vec)

    # calculate the weights
    weight0 =  np.sum(np.array([sol[j] * w[idx][j ,0] for j in np.arange(0, np.sum(idx))]))
    weight1 =  np.sum(np.array([sol[j] * w[idx][j ,1] for j in np.arange(0, np.sum(idx))]))
    weight = np.stack([weight0,weight1])
   

    return weight 
Example 40
Project: ceviche   Author: fancompute   File: utils.py    MIT License 5 votes vote down vote up
def get_spectrum(series, dt):
    """ Get FFT of series """

    steps = len(series)
    times = np.arange(steps) * dt

    # reshape to be able to multiply by hamming window
    series = series.reshape((steps, -1))

    # multiply with hamming window to get rid of numerical errors
    hamming_window = np.hamming(steps).reshape((steps, 1))
    signal_f = my_fft(hamming_window * series)

    freqs = np.fft.fftfreq(steps, d=dt)
    return freqs, signal_f 
Example 41
Project: ceviche   Author: fancompute   File: fdfd.py    MIT License 5 votes vote down vote up
def _make_A(self, eps_vec):

        C = - 1 / MU_0 * self.Dxf.dot(self.Dxb) \
            - 1 / MU_0 * self.Dyf.dot(self.Dyb)
        entries_c, indices_c = get_entries_indices(C)

        # indices into the diagonal of a sparse matrix
        entries_diag = - EPSILON_0 * self.omega**2 * eps_vec
        indices_diag = npa.vstack((npa.arange(self.N), npa.arange(self.N)))

        entries_a = npa.hstack((entries_diag, entries_c))
        indices_a = npa.hstack((indices_diag, indices_c))

        return entries_a, indices_a 
Example 42
Project: Stein-Density-Ratio-Estimation   Author: anewgithubname   File: data.py    MIT License 5 votes vote down vote up
def load_mnist():
    partial_flatten = lambda x : np.reshape(x, (x.shape[0], np.prod(x.shape[1:])))
    one_hot = lambda x, k: np.array(x[:,None] == np.arange(k)[None, :], dtype=int)
    train_images, train_labels, test_images, test_labels = data_mnist.mnist()
    train_images = partial_flatten(train_images) / 255.0
    test_images  = partial_flatten(test_images)  / 255.0
    train_labels = one_hot(train_labels, 10)
    test_labels = one_hot(test_labels, 10)
    N_data = train_images.shape[0]

    return N_data, train_images, train_labels, test_images, test_labels 
Example 43
Project: Stein-Density-Ratio-Estimation   Author: anewgithubname   File: data.py    MIT License 5 votes vote down vote up
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate,
                  rs=npr.RandomState(0)):
    """Based on code by Ryan P. Adams."""
    rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False)

    features = rs.randn(num_classes*num_per_class, 2) \
        * np.array([radial_std, tangential_std])
    features[:, 0] += 1
    labels = np.repeat(np.arange(num_classes), num_per_class)

    angles = rads[labels] + rate * np.exp(features[:,0])
    rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
    rotations = np.reshape(rotations.T, (-1, 2, 2))

    return np.einsum('ti,tij->tj', features, rotations) 
Example 44
Project: paragami   Author: rgiordan   File: simplex_patterns.py    Apache License 2.0 5 votes vote down vote up
def flat_indices(self, folded_bool, free=None):
        # If no indices are specified, save time and return an empty array.
        if not np.any(folded_bool):
            return np.array([], dtype=int)

        free = self._free_with_default(free)
        shape_ok, err_msg = self._validate_folded_shape(folded_bool)
        if not shape_ok:
            raise ValueError(err_msg)
        if not free:
            folded_indices = self.fold(
                np.arange(self.flat_length(False), dtype=int),
                validate_value=False, free=False)
            return folded_indices[folded_bool]
        else:
            # Every element of a particular simplex depends on all
            # the free values for that simplex.

            # The simplex is the last index, which moves the fastest.
            indices = []
            offset = 0
            free_simplex_length = self.__simplex_size - 1
            array_ranges = (range(n) for n in self.__array_shape)
            for ind in itertools.product(*array_ranges):
                if np.any(folded_bool[ind]):
                    free_inds = np.arange(
                        offset * free_simplex_length,
                        (offset + 1) * free_simplex_length,
                        dtype=int)
                    indices.append(free_inds)
                offset += 1
            if len(indices) > 0:
                return np.hstack(indices)
            else:
                return np.array([]) 
Example 45
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 4 votes vote down vote up
def get_sample_w(u, C_u, C_wu, C_diag_w, raw_sample_w, n_u, n_y):

    A_u = u[:3*n_u]

    L_u = u[3*n_u:]

    sample_size_w = numpy.shape(raw_sample_w)[0]

    sample_w = numpy.zeros((sample_size_w, 3*n_y))

    Q_w = []

    A_u_g = numpy.zeros((sample_size_w*3*n_y, len(A_u)))

    L_u_g = numpy.zeros((sample_size_w*3*n_y, len(L_u)))

    C_u_g = numpy.zeros((sample_size_w*3*n_y, len(C_u)))

    C_wu_g = numpy.zeros((sample_size_w*3*n_y, len(C_wu)))

    C_diag_w_g = numpy.zeros((sample_size_w*3*n_y, len(C_diag_w)))

    for i in range(0, n_y):

        Q_w.append(get_Q_w(L_u, C_u, C_wu[i*(9*n_u):(i+1)*(9*n_u)], C_diag_w[i*9:(i+1)*9], n_u))

        sample_w[:, i*3:(i+1)*3] = \
            get_sample_w_step(A_u, Q_w[-1].ravel(), C_wu[i*(9*n_u):(i+1)*(9*n_u)],
                              raw_sample_w[:, i*3:(i+1)*3]).reshape(-1, 3)

        tmp_idx = ((numpy.arange(0, sample_size_w) * (3*n_y)).reshape(-1, 1).repeat(3, axis=1) +
                   (numpy.array([0, 1, 2]) + 3*i).ravel()).ravel()

        A_u_g[tmp_idx, :] = \
            A_u_grad(A_u, Q_w[-1], C_wu[i*(9*n_u):(i+1)*(9*n_u)], raw_sample_w[:, i*3:(i+1)*3])

        L_u_g[tmp_idx, :] = \
            numpy.matmul(Q_w_grad(A_u, Q_w[-1], C_wu[i*(9*n_u):(i+1)*(9*n_u)], raw_sample_w[:, i*3:(i+1)*3]),
                         L_u_grad(L_u, C_u, C_wu[i*(9*n_u):(i+1)*(9*n_u)], C_diag_w[i*9:(i+1)*9], n_u))

        C_u_g[tmp_idx, :] = \
            numpy.matmul(Q_w_grad(A_u, Q_w[-1], C_wu[i*(9*n_u):(i+1)*(9*n_u)], raw_sample_w[:, i*3:(i+1)*3]),
                         C_u_grad(L_u, C_u, C_wu[i*(9*n_u):(i+1)*(9*n_u)], C_diag_w[i*9:(i+1)*9], n_u))

        C_diag_w_g[tmp_idx, i*9:(i+1)*9] = \
            numpy.matmul(Q_w_grad(A_u, Q_w[-1], C_wu[i*(9*n_u):(i+1)*(9*n_u)], raw_sample_w[:, i*3:(i+1)*3]),
                         C_diag_w_grad(L_u, C_u, C_wu[i*(9*n_u):(i+1)*(9*n_u)], C_diag_w[i*9:(i+1)*9], n_u))

        C_wu_g[tmp_idx, i*(9*n_u):(i+1)*(9*n_u)] = \
            C_wu_grad(A_u, Q_w[-1], C_wu[i*(9*n_u):(i+1)*(9*n_u)], raw_sample_w[:, i*3:(i+1)*3]) + \
            numpy.matmul(Q_w_grad(A_u, Q_w[-1], C_wu[i*(9*n_u):(i+1)*(9*n_u)], raw_sample_w[:, i*3:(i+1)*3]),
                         C_wu_grad_Q_w(L_u, C_u, C_wu[i*(9*n_u):(i+1)*(9*n_u)], C_diag_w[i*9:(i+1)*9], n_u))

    return sample_w, A_u_g, L_u_g, C_u_g, C_diag_w_g, C_wu_g 
Example 46
Project: momi2   Author: popgenmethods   File: demography.py    GNU General Public License v3.0 4 votes vote down vote up
def admixture_operator(n_node, p):
    # axis0=n_from_parent, axis1=der_from_parent, axis2=der_in_parent
    der_in_parent = np.tile(np.arange(n_node + 1), (n_node + 1, n_node + 1, 1))
    n_from_parent = np.transpose(der_in_parent, [2, 0, 1])
    der_from_parent = np.transpose(der_in_parent, [0, 2, 1])

    anc_in_parent = n_node - der_in_parent
    anc_from_parent = n_from_parent - der_from_parent

    x = comb(der_in_parent, der_from_parent) * comb(
        anc_in_parent, anc_from_parent) / comb(n_node, n_from_parent)
    # rearrange so axis0=1, axis1=der_in_parent, axis2=der_from_parent, axis3=n_from_parent
    x = np.transpose(x)
    x = np.reshape(x, [1] + list(x.shape))

    n = np.arange(n_node+1)
    B = comb(n_node, n)

    # the two arrays to convolve_sum_axes
    x1 = (x * B * ((1-p)**n) * (p**(n[::-1])))
    x2 = x[:, :, :, ::-1]

    # reduce array size; approximate low probability events with 0
    mu = n_node * (1-p)
    sigma = np.sqrt(n_node * p * (1-p))
    n_sd = 4
    lower = np.max((0, np.floor(mu - n_sd*sigma)))
    upper = np.min((n_node, np.ceil(mu + n_sd*sigma))) + 1
    lower, upper = int(lower), int(upper)

    ##x1 = x1[:,:,:upper,lower:upper]
    ##x2 = x2[:,:,:(n_node-lower+1),lower:upper]

    ret = convolve_sum_axes(x1, x2)
    # axis0=der_in_parent1, axis1=der_in_parent2, axis2=der_in_child
    ret = np.reshape(ret, ret.shape[1:])
    if ret.shape[2] < (n_node+1):
        ret = np.pad(ret, [(0,0),(0,0),(0,n_node+1-ret.shape[2])], "constant")
    return ret[:, :, :(n_node+1)]


#@memoize
#def _der_in_admixture_node(n_node):
#    '''
#    returns 4d-array, [n_from_parent1, der_in_child, der_in_parent1, der_in_parent2].
#    Used by Demography._admixture_prob_helper
#    '''
#    # axis0=n_from_parent, axis1=der_from_parent, axis2=der_in_parent
#    der_in_parent = np.tile(np.arange(n_node + 1), (n_node + 1, n_node + 1, 1))
#    n_from_parent = np.transpose(der_in_parent, [2, 0, 1])
#    der_from_parent = np.transpose(der_in_parent, [0, 2, 1])
#
#    anc_in_parent = n_node - der_in_parent
#    anc_from_parent = n_from_parent - der_from_parent
#
#    x = scipy.special.comb(der_in_parent, der_from_parent) * scipy.special.comb(
#        anc_in_parent, anc_from_parent) / scipy.special.comb(n_node, n_from_parent)
#
#    ret, labels = convolve_axes(
#        x, x[::-1, ...], [[c for c in 'ijk'], [c for c in 'ilm']], ['j', 'l'], 'n')
#    return np.einsum('%s->inkm' % ''.join(labels), ret[..., :(n_node + 1)]) 
Example 47
Project: momi2   Author: popgenmethods   File: confidence_region.py    GNU General Public License v3.0 4 votes vote down vote up
def _long_score_cov(params, seg_sites, demo_func, **kwargs):
    if "mut_rate" in kwargs:
        raise NotImplementedError(
            "Currently only implemented for multinomial composite likelihood")
    params = np.array(params)

    configs = seg_sites.sfs.configs
    #_,snp_counts = seg_sites.sfs._idxs_counts(None)
    snp_counts = seg_sites.sfs._total_freqs
    weights = snp_counts / float(np.sum(snp_counts))

    def snp_log_probs(x):
        ret = np.log(expected_sfs(
            demo_func(*x), configs, normalized=True, **kwargs))
        return ret - np.sum(weights * ret)  # subtract off mean

    # g_out = sum(autocov(einsum("ij,ik->ikj",jacobian(idx_series), jacobian(idx_series))))
    # computed in roundabout way, in case jacobian is slow for many snps
    # autocovariance is truncated at sqrt(len(idx_series)), to avoid
    # statistical/numerical issues
    def g_out_antihess(y):
        lp = snp_log_probs(y)
        ret = 0.0
        for l in seg_sites._get_likelihood_sequences(lp):
            L = len(l)
            lc = make_constant(l)

            fft = np.fft.fft(l)
            # (assumes l is REAL)
            assert np.all(np.imag(l) == 0.0)
            fft_rev = np.conj(fft) * np.exp(2 * np.pi *
                                            1j * np.arange(L) / float(L))

            curr = 0.5 * (fft * fft_rev - fft *
                          make_constant(fft_rev) - make_constant(fft) * fft_rev)
            curr = np.fft.ifft(curr)[(L - 1)::-1]

            # make real
            assert np.allclose(np.imag(curr / L), 0.0)
            curr = np.real(curr)
            curr = curr[0] + 2.0 * np.sum(curr[1:int(np.sqrt(L))])
            ret = ret + curr
        return ret
    g_out = autograd.hessian(g_out_antihess)(params)
    g_out = 0.5 * (g_out + np.transpose(g_out))
    return g_out 
Example 48
Project: ParetoMTL   Author: Xi-L   File: run_synthetic_example.py    MIT License 4 votes vote down vote up
def get_d_paretomtl(grads,value,weights,i):
    # calculate the gradient direction for Pareto MTL
    nobj, dim = grads.shape
    
    # check active constraints
    normalized_current_weight = weights[i]/np.linalg.norm(weights[i])
    normalized_rest_weights = np.delete(weights, (i), axis=0) / np.linalg.norm(np.delete(weights, (i), axis=0), axis = 1,keepdims = True)
    w = normalized_rest_weights - normalized_current_weight
    
    
    # solve QP 
    gx =  np.dot(w,value/np.linalg.norm(value))
    idx = gx >  0
   
    
    vec =  np.concatenate((grads, np.dot(w[idx],grads)), axis = 0)
    
#    # use cvxopt to solve QP
#    
#    P = np.dot(vec , vec.T)
#    
#    q = np.zeros(nobj + np.sum(idx))
#    
#    G =  - np.eye(nobj + np.sum(idx) )
#    h = np.zeros(nobj + np.sum(idx))
#    
#
#    
#    A = np.ones(nobj + np.sum(idx)).reshape(1,nobj + np.sum(idx))
#    b = np.ones(1)
 
#    cvxopt.solvers.options['show_progress'] = False
#    sol = cvxopt_solve_qp(P, q, G, h, A, b)
  
    # use MinNormSolver to solve QP
    sol, nd = MinNormSolver.find_min_norm_element(vec)
   
    
    # reformulate ParetoMTL as linear scalarization method, return the weights
    weight0 =  sol[0] + np.sum(np.array([sol[j] * w[idx][j - 2,0] for j in np.arange(2,2 + np.sum(idx))]))
    weight1 = sol[1] + np.sum(np.array([sol[j] * w[idx][j - 2,1] for j in np.arange(2,2 + np.sum(idx))]))
    weight = np.stack([weight0,weight1])
   

    return weight 
Example 49
Project: paragami   Author: rgiordan   File: numeric_array_patterns.py    Apache License 2.0 4 votes vote down vote up
def __init__(self, shape,
                 lb=-float("inf"), ub=float("inf"),
                 default_validate=True, free_default=None):
        """
        Parameters
        -------------
        shape: `tuple` of `int`
            The shape of the array.
        lb: `float`
            The (inclusive) lower bound for the entries of the array.
        ub: `float`
            The (inclusive) upper bound for the entries of the array.
        default_validate: `bool`, optional
            Whether or not the array is checked by default to lie within the
            specified bounds.
        free_default: `bool`, optional
            Whether the pattern is free by default.
        """
        self.default_validate = default_validate
        self._shape = tuple(shape)
        self._lb = lb
        self._ub = ub
        assert lb >= -float('inf')
        assert ub <= float('inf')
        if lb >= ub:
            raise ValueError(
                'Upper bound ub must strictly exceed lower bound lb')

        free_flat_length = flat_length = int(np.product(self._shape))

        super().__init__(flat_length, free_flat_length,
                         free_default=free_default)

        # Cache arrays of indices for flat_indices
        # TODO: not sure this is a good idea or much of a speedup.
        self.__free_folded_indices = self.fold(
            np.arange(self.flat_length(free=True), dtype=int),
            validate_value=False, free=False)

        self.__nonfree_folded_indices = self.fold(
            np.arange(self.flat_length(free=False), dtype=int),
            validate_value=False, free=False) 
Example 50
Project: paragami   Author: rgiordan   File: test_autograd_supplement_lib.py    Apache License 2.0 4 votes vote down vote up
def test_get_differentiable_solver(self):
        dim = 5
        z = np.eye(dim)
        z[0, 1] = 0.2
        z[1, 0] = 0.2
        z[0, dim - 1] = 0.05
        z[dim - 1, 0] = 0.1

        z_sp = osp.sparse.csc_matrix(z)

        a = np.random.random(dim)

        # Check with simple lambda functions.
        z_solve, zt_solve = autograd_supplement_lib.get_differentiable_solver(
            lambda b: osp.sparse.linalg.spsolve(z_sp, b),
            lambda b: osp.sparse.linalg.spsolve(z_sp.T, b))

        assert_array_almost_equal(
            osp.sparse.linalg.spsolve(z_sp, a), z_solve(a))
        assert_array_almost_equal(
            osp.sparse.linalg.spsolve(z_sp.T, a), zt_solve(a))

        check_grads(z_solve)(a)
        check_grads(zt_solve)(a)

        # Check with factorized matrices.
        z_factorized = osp.sparse.linalg.factorized(z_sp)
        zt_factorized = osp.sparse.linalg.factorized(z_sp.T)
        z_solve_fac, zt_solve_fac = \
            autograd_supplement_lib.get_differentiable_solver(
                z_factorized, zt_factorized)

        assert_array_almost_equal(z_factorized(a), z_solve_fac(a))
        assert_array_almost_equal(zt_factorized(a), zt_solve_fac(a))

        check_grads(z_solve_fac)(a)
        check_grads(zt_solve_fac)(a)

        # # Test with a Cholesky decomposition.
        # z_chol = cholesky(z_sp)
        # zt_chol = cholesky(z_sp.T)
        #
        # # Make sure that we are testing the fill-reducing permutation.
        # self.assertTrue(not np.all(z_chol.P() == np.arange(dim)))
        # z_solve_chol, zt_solve_chol = \
        #     autograd_supplement_lib.get_differentiable_solver(
        #         z_chol, zt_chol)
        #
        # # TODO(why is Cholesky not working?)
        # # For some reason, ``z_chol(a)`` doesn't work unless ``a``
        # # is the same (square) dimension as ``z``.  This appears to be
        # # a problem with the Cholesky decomposition, not the sparse solver.
        # z_chol(a) # Fails
        # assert_array_almost_equal(z_chol(a), z_solve_chol(a))
        # assert_array_almost_equal(zt_chol(a), zt_solve_chol(a))
        #
        # check_grads(z_solve_chol)(a)
        # check_grads(zt_solve_chol)(a)