Python autograd.numpy.array() Examples

The following are 30 code examples of autograd.numpy.array(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: configurations.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def _build_old_new_idxs(self, folded):
        idxs = self.full_configs._augmented_idxs(folded)

        denom_idx_key = 'denom_idx'
        denom_idx = idxs[denom_idx_key]
        idxs = {k: v[self.sub_idxs]
                for k, v in list(idxs.items()) if k != denom_idx_key}

        old_idxs = np.array(
            list(set(sum(map(list, idxs.values()), [denom_idx]))))
        old_2_new_idxs = {old_id: new_id for new_id,
                          old_id in enumerate(old_idxs)}

        idxs = {k: np.array([old_2_new_idxs[old_id]
                             for old_id in v], dtype=int)
                for k, v in list(idxs.items())}
        idxs[denom_idx_key] = old_2_new_idxs[denom_idx]
        return old_idxs, idxs 
Example #2
Source File: demography.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def get_treeseq_configs(treeseq, sampled_n):
    mat = np.zeros((len(sampled_n), sum(sampled_n)), dtype=int)
    j = 0
    for i, n in enumerate(sampled_n):
        for _ in range(n):
            mat[i, j] = 1
            j += 1
    mat = scipy.sparse.csr_matrix(mat)

    def get_config(genos):
        derived_counts = mat.dot(genos)
        return np.array([
            sampled_n - derived_counts,
            derived_counts
        ]).T

    for v in treeseq.variants():
        yield get_config(v.genotypes) 
Example #3
Source File: fft.py    From scarlet with MIT License 6 votes vote down vote up
def fast_zero_pad(arr, pad_width):
    """Fast version of numpy.pad when `mode="constant"`

    Executing `numpy.pad` with zeros is ~1000 times slower
    because it doesn't make use of the `zeros` method for padding.

    Paramters
    ---------
    arr: array
        The array to pad
    pad_width: tuple
        Number of values padded to the edges of each axis.
        See numpy docs for more.

    Returns
    -------
    result: array
        The array padded with `constant_values`
    """
    newshape = tuple([a+ps[0]+ps[1] for a, ps in zip(arr.shape, pad_width)])

    result = np.zeros(newshape, dtype=arr.dtype)
    slices = tuple([slice(start, s-end) for s, (start, end) in zip(result.shape, pad_width)])
    result[slices] = arr
    return result 
Example #4
Source File: fft.py    From scarlet with MIT License 6 votes vote down vote up
def __init__(self, image, image_fft=None):
        """Initialize the object

        Parameters
        ----------
        image: array
            The real space image.
        image_fft: dict
            A dictionary of {shape: fft_value} for which each different
            shape has a precalculated FFT.
        axes: int or tuple
            The dimension(s) of the array that will be transformed.
        """
        if image_fft is None:
            self._fft = {}
        else:
            self._fft = image_fft
        self._image = image 
Example #5
Source File: sfs.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def _entropy(self):
        counts = self._total_freqs
        n_snps = float(self.n_snps())
        p = counts / n_snps
        # return np.sum(p * np.log(p))
        ret = np.sum(p * np.log(p))

        # correct for missing data
        sampled_n = np.sum(self.configs.value, axis=2)
        sampled_n_counts = co.Counter()
        assert len(counts) == len(sampled_n)
        for c, n in zip(counts, sampled_n):
            n = tuple(n)
            sampled_n_counts[n] += c
        sampled_n_counts = np.array(
            list(sampled_n_counts.values()), dtype=float)

        ret = ret + np.sum(sampled_n_counts / n_snps *
                           np.log(n_snps / sampled_n_counts))
        assert not np.isnan(ret)
        return ret 
Example #6
Source File: observation.py    From scarlet with MIT License 6 votes vote down vote up
def get_loss(self, model):
        """Computes the loss/fidelity of a given model wrt to the observation
        Parameters
        ----------
        model: array
            A model from `Blend`
        Returns
        -------
        loss: float
            Loss of the model
        """
        model_ = self.render(model)
        images_ = self.images
        weights_ = self.weights

        # properly normalized likelihood
        log_sigma = np.zeros(weights_.shape, dtype=weights_.dtype)
        cuts = weights_ > 0
        log_sigma[cuts] = np.log(1 / weights_[cuts])
        log_norm = (
                np.prod(images_.shape) / 2 * np.log(2 * np.pi)
                + np.sum(log_sigma) / 2
        )

        return log_norm + 0.5 * np.sum(weights_ * (model_ - images_) ** 2) 
Example #7
Source File: configurations.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def build_config_list(sampled_pops, counts, sampled_n=None, ascertainment_pop=None):
    """
    if sampled_n is not None, counts is the derived allele counts

    if sampled_n is None, counts has an extra trailing axis:
      counts[...,0] is ancestral allele count,
      counts[...,1] is derived allele count
    """
    if sampled_n is not None:
        sampled_n = np.array(sampled_n, dtype=int)
        counts1 = np.array(counts, dtype=int, ndmin=2)
        counts0 = sampled_n - counts1
        counts = np.array([counts0, counts1], dtype=int)
        counts = np.transpose(counts, axes=[1, 2, 0])
    counts = np.array(counts, ndmin=3, dtype=int)
    assert counts.shape[1:] == (len(sampled_pops), 2)
    counts.setflags(write=False)
    return ConfigList(sampled_pops, counts, sampled_n, ascertainment_pop) 
Example #8
Source File: configurations.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def build_full_config_list(sampled_pops, sampled_n, ascertainment_pop=None):
    sampled_n = np.array(sampled_n)
    if ascertainment_pop is None:
        ascertainment_pop = [True] * len(sampled_pops)
    ascertainment_pop = np.array(ascertainment_pop)

    ranges = [list(range(n + 1)) for n in sampled_n]
    config_list = []
    for x in it.product(*ranges):
        x = np.array(x, dtype=int)
        if not (np.all(x[ascertainment_pop] == 0) or np.all(
                x[ascertainment_pop] == sampled_n[ascertainment_pop])):
            config_list.append(x)
    return build_config_list(
        sampled_pops, np.array(config_list, dtype=int), sampled_n,
        ascertainment_pop=ascertainment_pop) 
Example #9
Source File: observation.py    From scarlet with MIT License 6 votes vote down vote up
def get_loss(self, model):
        """Computes the loss/fidelity of a given model wrt to the observation

        Parameters
        ----------
        model: array
            The model from `Blend`

        Returns
        -------
        result: array
            Scalar tensor with the likelihood of the model
            given the image data
        """
        model_ = self.render(model)
        if self.frame != self.model_frame:
            images_ = self.images[self.slices_for_images]
            weights_ = self.weights[self.slices_for_images]
        else:
            images_ = self.images
            weights_ = self.weights

        return self.log_norm + np.sum(weights_ * (model_ - images_) ** 2) / 2 
Example #10
Source File: configurations.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def subsample_probs(self, subconfig):
        """
        Returns the probability of subsampling subconfig
        from each config.
        """
        subconfig = np.array(subconfig)
        total_counts_dict = {p: n for p, n in zip(self.sampled_pops,
                                                  subconfig.sum(axis=1))
                             if n > 0}
        derived_counts_dict = {p: [0]*(n+1)
                               for p, n in total_counts_dict.items()}
        for p, d in zip(self.sampled_pops, subconfig[:, 1]):
            if p in derived_counts_dict:
                derived_counts_dict[p][d] = 1

        num = self.count_subsets(derived_counts_dict, total_counts_dict)
        denom = self.count_subsets({}, total_counts_dict)

        # avoid 0/0
        assert np.all(num[denom == 0] == 0)
        denom[denom == 0] = 1
        return num / denom

    # TODO: remove this method (and self.sampled_n attribute) 
Example #11
Source File: observation.py    From scarlet with MIT License 6 votes vote down vote up
def render(self, model):
        """Convolve a model to the observation frame

        Parameters
        ----------
        model: array
            The model from `Blend`

        Returns
        -------
        image_model: array
            `model` mapped into the observation frame
        """
        if self._diff_kernels is not None:
            model_images = self.convolve(model)
        else:
            model_images = model
        return model_images[self.slices_for_model] 
Example #12
Source File: confidence_region.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def _many_score_cov(params, data, demo_func, **kwargs):
    params = np.array(params)

    def f_vec(x):
        ret = _composite_log_likelihood(
            data, demo_func(*x), vector=True, **kwargs)
        # centralize
        return ret - np.mean(ret)

    # g_out = einsum('ij,ik', jacobian(f_vec)(params), jacobian(f_vec)(params))
    # but computed in a roundabout way because jacobian implementation is slow
    def _g_out_antihess(x):
        l = f_vec(x)
        lc = make_constant(l)
        return np.sum(0.5 * (l**2 - l * lc - lc * l))
    return autograd.hessian(_g_out_antihess)(params) 
Example #13
Source File: size_history.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def sfs(self, n):
        if n == 0:
            return np.array([0.])
        Et_jj = self.etjj(n)
        #assert np.all(Et_jj[:-1] - Et_jj[1:] >= 0.0) and np.all(Et_jj >= 0.0) and np.all(Et_jj <= self.tau)

        ret = np.sum(Et_jj[:, None] * Wmatrix(n), axis=0)

        before_tmrca = self.tau - np.sum(ret * np.arange(1, n) / n)
        # ignore branch length above untruncated TMRCA
        if self.tau == float('inf'):
            before_tmrca = 0.0

        ret = np.concatenate((np.array([0.0]), ret, np.array([before_tmrca])))
        return ret

    # def transition_prob(self, v, axis=0):
    #     return moran_model.moran_action(self.scaled_time, v, axis=axis) 
Example #14
Source File: standard_models.py    From pyhawkes with MIT License 6 votes vote down vote up
def G(self):
        full_W = np.array([node.w for node in self.nodes])
        WB = full_W[:,1:].reshape((self.K,self.K, self.B))

        # Weight matrix is summed over impulse response functions
        WT = WB.sum(axis=2)

        # Impulse response weights are normalized weights
        GT = WB / WT[:,:,None]

        # Then we transpose so that the impuolse matrix is (outgoing x incoming x basis)
        G = np.transpose(GT, [1,0,2])

        # TODO: Decide if this is still necessary
        for k1 in range(self.K):
            for k2 in range(self.K):
                if G[k1,k2,:].sum() < 1e-2:
                    G[k1,k2,:] = 1.0/self.B
        return G 
Example #15
Source File: demography.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def simulate_data(self, length, num_replicates=1, **kwargs):
        treeseq = self.simulate_trees(length=length, num_replicates=num_replicates,
                                      **kwargs)
        try:
            treeseq.variants
        except:
            pass
        else:
            treeseq = [treeseq]

        mat = np.zeros((len(self.sampled_n), sum(self.sampled_n)), dtype=int)
        j = 0
        for i, n in enumerate(self.sampled_n):
            for _ in range(n):
                mat[i, j] = 1
                j += 1
        mat = scipy.sparse.csr_matrix(mat)

        def get_config(genos):
            derived_counts = mat.dot(genos)
            return np.array([
                self.sampled_n - derived_counts,
                derived_counts
            ]).T

        #chrom = []
        chrom = _CompressedList()
        pos = []
        compressed_counts = _CompressedHashedCounts(len(self.sampled_pops))

        for c, locus in enumerate(treeseq):
            for v in locus.variants():
                compressed_counts.append(get_config(v.genotypes))
                chrom.append(c)
                pos.append(v.position)

        return SnpAlleleCounts(
            chrom, pos, compressed_counts.compressed_allele_counts(),
            self.sampled_pops, use_folded_sfs=False,
            non_ascertained_pops=[], length=length*num_replicates,
            n_read_snps=len(compressed_counts), n_excluded_snps=0) 
Example #16
Source File: confidence_region.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def wald_intervals(self, lower=.025, upper=.975):
        """
        Marginal wald-type confidence intervals.
        """
        conf_lower, conf_upper = scipy.stats.norm.interval(.95,
                                                           loc=self.point,
                                                           scale=np.sqrt(np.diag(self.godambe(inverse=True))))
        return np.array([conf_lower, conf_upper]).T 
Example #17
Source File: math_functions.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def expm1d(x, eps=1e-6):
    x = np.array(x)
    abs_x = np.abs(x)
    if x.shape:
        # FIXME: don't require abs_x to be increasing
        assert np.all(abs_x[1:] >= abs_x[:-1])
        small = abs_x < eps
        big = ~small
        return np.concatenate([expm1d_taylor(x[small]),
                               expm1d_naive(x[big])])
    elif abs_x < eps:
        return expm1d_taylor(x)
    else:
        return expm1d_naive(x) 
Example #18
Source File: optimizers.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def callback(self, x, fx, i):
        # msg = "Optimization step, {{'it': {i}, 'fun': {fx}, 'x': {x}}}".format(
        #     i=i, x=list(x), fx=fx)
        # logger.info(msg)
        if self.user_callback:
            x = np.array(x)
            x = x.view(LoggingCallbackArray)
            x.iteration = i
            x.fun = fx
            self.user_callback(x)

# special array subclass for LoggingCallback 
Example #19
Source File: sfs.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def from_matrix(cls, mat, configs, *args, **kwargs):
        # convert to csc
        mat = scipy.sparse.csc_matrix(mat)
        indptr = mat.indptr
        loci = []
        for i in range(mat.shape[1]):
            loci.append(np.array([
                mat.indices[indptr[i]:indptr[i+1]],
                mat.data[indptr[i]:indptr[i+1]]]))

        return cls(loci, configs, *args, **kwargs) 
Example #20
Source File: optimizers.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def sgd(fun, x0, fun_and_jac, pieces, stepsize, num_iters, bounds=None, callback=None, iter_per_output=10, rgen=np.random):
    x0 = np.array(x0)

    if callback is None:
        callback = lambda *a, **kw: None

    if bounds is None:
        bounds = [(None, None) for _ in x0]
    lower, upper = zip(*bounds)
    lower = [-float('inf') if l is None else l
             for l in lower]
    upper = [float('inf') if u is None else u
             for u in upper]

    def truncate(x):
        return np.maximum(np.minimum(x, upper), lower)

    x = x0
    for nit in range(num_iters):
        i = rgen.randint(pieces)
        f_x, g_x = fun_and_jac(x, i)
        x = truncate(x - stepsize * g_x)
        if nit % iter_per_output == 0:
            callback(x, f_x, nit)

    return scipy.optimize.OptimizeResult({'x': x, 'fun': f_x, 'jac': g_x}) 
Example #21
Source File: sfs_stats.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def ordered_prob(self, subsample_dict,
                     fold=False):
        # The ordered probability for the subsample given by
        # subsample_dict.
        #
        # Parameters:
        # subsample_dict: dict of list
        #    dict mapping population to a list of 0s and 1s giving the
        #       ordered subsample within that population.

        if fold:
            rev_subsample = {p: 1 - np.array(s)
                             for p, s in subsample_dict.items()}

            return (self.ordered_prob(subsample_dict)
                    + self.ordered_prob(rev_subsample))

        derived_weights_dict = {}
        for pop, pop_subsample in subsample_dict.items():
            n = self.sampled_n_dict[pop]
            arange = np.arange(n+1)

            cnts = co.Counter(pop_subsample)

            prob = np.ones(n+1)
            for i in range(cnts[0]):
                prob *= (n - arange - i)
            for i in range(cnts[1]):
                prob *= (arange - i)
            for i in range(cnts[0] + cnts[1]):
                prob /= float(n - i)

            derived_weights_dict[pop] = prob

        return self.tensor_prod(derived_weights_dict) / self.denom 
Example #22
Source File: demography.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def simulate_trees(self, **kwargs):
        sampled_t = self.sampled_t
        if sampled_t is None:
            sampled_t = 0.0
        sampled_t = np.array(sampled_t) * np.ones(len(self.sampled_pops))

        pops = {p: i for i, p in enumerate(self.sampled_pops)}

        demographic_events = []
        for e in self._G.graph["events"]:
            e = e.get_msprime_event(self._G.graph["params"], pops)
            if e is not None:
                demographic_events.append(e)

        return msprime.simulate(
            population_configurations=[
                msprime.PopulationConfiguration()
                for _ in range(len(pops))],
            Ne=self.default_N / 4,
            demographic_events=demographic_events,
            samples=[
                msprime.Sample(population=pops[p], time=t)
                for p, t, n in zip(
                        self.sampled_pops, self.sampled_t,
                        self.sampled_n)
                for _ in range(n)],
            **kwargs) 
Example #23
Source File: sfs.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def sampled_pops(self):
        """Names of sampled populations

        :returns: 1-d array of strings
        :rtype: :class:`numpy.ndarray`
        """
        return self.configs.sampled_pops 
Example #24
Source File: demography.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def sampled_t(self):
        """
        An array of times at which each population was sampled
        """
        return np.array(tuple(self._G.node[(l, 0)]['sizes'][0]['t'] for l in self.sampled_pops)) 
Example #25
Source File: compute_sfs.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def admix_trailing_pop(self, admix_probs_3tensor,
                           newpop_name):
        """
        admix_probs_3tensor should be array returned by demography._admixture_prob_helper
        """
        self.matmul_last_axis(admix_probs_3tensor)
        self.pop_labels.append(newpop_name) 
Example #26
Source File: util.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def set0(x, indices):
    y = np.array(x)
    y[indices] = 0
    return y 
Example #27
Source File: util.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def check_psd(X, **tol_kwargs):
    X = check_symmetric(X)
    d, U = np.linalg.eigh(X)
    d = truncate0(d, **tol_kwargs)
    ret = np.dot(U, np.dot(np.diag(d), np.transpose(U)))
    #assert np.allclose(ret, X)
    return np.array(ret, ndmin=2) 
Example #28
Source File: standard_models.py    From pyhawkes with MIT License 5 votes vote down vote up
def add_data(self, S, F=None):
        """
        Add a data set to the list of observations.
        First, filter the data with the impulse response basis,
        then instantiate a set of parents for this data set.

        :param S: a TxK matrix of of event counts for each time bin
                  and each process.
        """
        assert isinstance(S, np.ndarray) and S.ndim == 2 and S.shape[1] == self.K \
               and np.amin(S) >= 0 and S.dtype == np.int, \
               "Data must be a TxK array of event counts"

        T = S.shape[0]

        if F is None:
            # Filter the data into a TxKxB array
            Ftens = self.basis.convolve_with_basis(S)

            # Flatten this into a T x (KxB) matrix
            # [F00, F01, F02, F10, F11, ... F(K-1)0, F(K-1)(B-1)]
            F = Ftens.reshape((T, self.K * self.B))
            assert np.allclose(F[:,0], Ftens[:,0,0])
            if self.B > 1:
                assert np.allclose(F[:,1], Ftens[:,0,1])
            if self.K > 1:
                assert np.allclose(F[:,self.B], Ftens[:,1,0])

            # Prepend a column of ones
            F = np.hstack((np.ones((T,1)), F))

        for k,node in enumerate(self.nodes):
            node.add_data(F, S[:,k]) 
Example #29
Source File: standard_models.py    From pyhawkes with MIT License 5 votes vote down vote up
def bias(self):
        full_W = np.array([node.w for node in self.nodes])
        return full_W[:,0] 
Example #30
Source File: pressure_vessel.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def __init__(self):
        super().__init__(n_var=4, n_obj=1, n_constr=4, type_var=anp.double)
        self.xl = anp.array([1, 1, 10.0, 10.0])
        self.xu = anp.array([99, 99, 200.0, 200.0])