Python autograd.numpy.any() Examples

The following are 10 code examples of autograd.numpy.any(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: compute_sfs.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def _expected_sfs(demography, configs, folded, error_matrices):
    if np.any(configs.sampled_n != demography.sampled_n) or np.any(configs.sampled_pops != demography.sampled_pops):
        raise ValueError(
            "configs and demography must have same sampled_n, sampled_pops. Use Demography.copy() or ConfigList.copy() to make a copy with different sampled_n.")

    vecs, idxs = configs._vecs_and_idxs(folded)

    if error_matrices is not None:
        vecs = _apply_error_matrices(vecs, error_matrices)

    vals = expected_sfs_tensor_prod(vecs, demography)

    sfs = vals[idxs['idx_2_row']]
    if folded:
        sfs = sfs + vals[idxs['folded_2_row']]

    denom = vals[idxs['denom_idx']]
    for i in (0, 1):
        denom = denom - vals[idxs[("corrections_2_denom", i)]]

    #assert np.all(np.logical_or(vals >= 0.0, np.isclose(vals, 0.0)))

    return sfs, denom 
Example #2
Source File: sfs.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def _get_subsample_counts(configs, n):
    subconfigs, weights = [], []
    for pop_comb in it.combinations_with_replacement(configs.sampled_pops, n):
        subsample_n = co.Counter(pop_comb)
        subsample_n = np.array([subsample_n[pop]
                                for pop in configs.sampled_pops], dtype=int)
        if np.any(subsample_n > configs.sampled_n):
            continue

        for sfs_entry in it.product(*(range(sub_n + 1)
                                      for sub_n in subsample_n)):
            sfs_entry = np.array(sfs_entry, dtype=int)
            if np.all(sfs_entry == 0) or np.all(sfs_entry == subsample_n):
                # monomorphic
                continue

            sfs_entry = np.transpose([subsample_n - sfs_entry, sfs_entry])
            cnt_vec = configs.subsample_probs(sfs_entry)
            if not np.all(cnt_vec == 0):
                subconfigs.append(sfs_entry)
                weights.append(cnt_vec)

    return np.array(subconfigs), np.array(weights) 
Example #3
Source File: sfs.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, loci, configs, folded, length):
        self.folded = folded
        self._length = length

        self.configs = configs

        self.loc_idxs, self.loc_counts = [], []
        for loc in loci:
            if len(loc) == 0:
                self.loc_idxs.append(np.array([], dtype=int))
                self.loc_counts.append(np.array([], dtype=float))
            else:
                try:
                    loc.items()
                except:
                    loc = np.array(loc)
                    if len(loc.shape) == 2:
                        assert loc.shape[0] == 2
                        idxs, cnts = loc[0, :], loc[1, :]
                    else:
                        idxs, cnts = np.unique(loc, return_counts=True)
                else:
                    idxs, cnts = zip(*loc.items())
                self.loc_idxs.append(np.array(idxs, dtype=int))
                self.loc_counts.append(np.array(cnts, dtype=float))

        if len(self.loc_idxs) > 1:
            self._total_freqs = self.freqs_matrix.dot(np.ones(self.n_loci))
            assert self._total_freqs.shape == (self.freqs_matrix.shape[0],)
        else:
            # avoid costly building of frequency matrix, when there are many
            # Sfs's of a single locus (e.g. in many stochastic minibatches)
            idxs, = self.loc_idxs
            cnts, = self.loc_counts
            self._total_freqs = np.zeros(len(self.configs))
            self._total_freqs[idxs] = cnts

        assert not np.any(self._total_freqs == 0) 
Example #4
Source File: configurations.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, sampled_pops, conf_arr, sampled_n=None,
                 ascertainment_pop=None):
        """Use build_config_list() instead of calling this constructor directly"""
        # If sampled_n=None, ConfigList.sampled_n will be the max number of
        # observed individuals/alleles per population.
        self.sampled_pops = tuple(sampled_pops)
        self.value = conf_arr

        if ascertainment_pop is None:
            ascertainment_pop = [True] * len(sampled_pops)
        self.ascertainment_pop = np.array(ascertainment_pop)
        self.ascertainment_pop.setflags(write=False)
        if all(not a for a in self.ascertainment_pop):
            raise ValueError(
                "At least one of the populations must be used for "
                "ascertainment of polymorphic sites")

        max_n = np.max(np.sum(self.value, axis=2), axis=0)

        if sampled_n is None:
            sampled_n = max_n
        sampled_n = np.array(sampled_n)
        if np.any(sampled_n < max_n):
            raise ValueError("config greater than sampled_n")
        self.sampled_n = sampled_n
        if not np.sum(sampled_n[self.ascertainment_pop]) >= 2:
            raise ValueError("The total sample size of the ascertainment "
                             "populations must be >= 2")

        config_sampled_n = np.sum(self.value, axis=2)
        self.has_missing_data = np.any(config_sampled_n != self.sampled_n)

        if np.any(np.sum(self.value[:, self.ascertainment_pop, :], axis=1)
                  == 0):
            raise ValueError("Monomorphic sites not allowed. In addition, all"
                             " sites must be polymorphic when restricted to"
                             " the ascertainment populations") 
Example #5
Source File: wrapper.py    From pennylane with Apache License 2.0 5 votes vote down vote up
def extract_tensors(x):
    """Iterate through an iterable, and extract any PennyLane
    tensors that appear.
    """
    if isinstance(x, tensor):
        # If the item is a tensor, return it
        yield x
    elif isinstance(x, Sequence) and not isinstance(x, (str, bytes)):
        # If the item is a sequence, recursively look through its
        # elements for tensors.
        # NOTE: we choose to branch on Sequence here and not Iterable,
        # as NumPy arrays are not Sequences.
        for item in x:
            yield from extract_tensors(item) 
Example #6
Source File: test_systematic.py    From autograd with MIT License 5 votes vote down vote up
def test_max():  stat_check(np.max)
# def test_all():  stat_check(np.all)
# def test_any():  stat_check(np.any) 
Example #7
Source File: density.py    From kernel-gof with MIT License 5 votes vote down vote up
def __init__(self, mean, cov):
        """
        mean: a numpy array of length d.
        cov: d x d numpy array for the covariance.
        """
        self.mean = mean 
        self.cov = cov
        assert mean.shape[0] == cov.shape[0]
        assert cov.shape[0] == cov.shape[1]
        E, V = np.linalg.eigh(cov)
        if np.any(np.abs(E) <= 1e-7):
            raise ValueError('covariance matrix is not full rank.')
        # The precision matrix
        self.prec = np.dot(np.dot(V, np.diag(old_div(1.0,E))), V.T)
        #print self.prec 
Example #8
Source File: source.py    From scarlet with MIT License 4 votes vote down vote up
def __init__(self, model_frame, sky_coord, observations):
        """Source intialized with a single pixel

        Parameters
        ----------
        frame: `~scarlet.Frame`
            The frame of the full model
        sky_coord: tuple
            Center of the source
        observations: instance or list of `~scarlet.Observation`
            Observation(s) to initialize this source
        """
        C, Ny, Nx = model_frame.shape
        self.center = np.array(model_frame.get_pixel(sky_coord), dtype="float")

        # initialize SED from sky_coord
        try:
            iter(observations)
        except TypeError:
            observations = [observations]

        # determine initial SED from peak position
        # SED in the frame for source detection
        seds = []
        for obs in observations:
            _sed = get_psf_sed(sky_coord, obs, model_frame)
            seds.append(_sed)
        sed = np.concatenate(seds).reshape(-1)

        if np.any(sed <= 0):
            # If the flux in all channels is  <=0,
            # the new sed will be filled with NaN values,
            # which will cause the code to crash later
            msg = "Zero or negative SED {} at y={}, x={}".format(sed, *sky_coord)
            if np.all(sed <= 0):
                logger.warning(msg)
            else:
                logger.info(msg)

        # set up parameters
        sed = Parameter(
            sed,
            name="sed",
            step=partial(relative_step, factor=1e-2),
            constraint=PositivityConstraint(),
        )
        center = Parameter(self.center, name="center", step=1e-1)

        # define bbox
        pixel_center = tuple(np.round(center).astype("int"))
        front, back = 0, C
        bottom = pixel_center[0] - model_frame.psf.shape[1] // 2
        top = pixel_center[0] + model_frame.psf.shape[1] // 2
        left = pixel_center[1] - model_frame.psf.shape[2] // 2
        right = pixel_center[1] + model_frame.psf.shape[2] // 2
        bbox = Box.from_bounds((front, back), (bottom, top), (left, right))

        super().__init__(model_frame, bbox, sed, center, self._psf_wrapper) 
Example #9
Source File: demography.py    From momi2 with GNU General Public License v3.0 4 votes vote down vote up
def simulate_vcf(self, out_prefix, mutation_rate,
                     recombination_rate, length,
                     chrom_name=1, ploidy=1, random_seed=None,
                     force=False, print_aa=True):
        out_prefix = os.path.expanduser(out_prefix)
        vcf_name = out_prefix + ".vcf"
        bed_name = out_prefix + ".bed"
        for fname in (vcf_name, bed_name):
            if not force and os.path.isfile(fname):
                raise FileExistsError(
                    "{} exists and force=False".format(fname))

        if np.any(self.sampled_n % ploidy != 0):
            raise ValueError("Sampled alleles per population must be"
                             " integer multiple of ploidy")

        with open(bed_name, "w") as bed_f:
            print(chrom_name, 0, length, sep="\t", file=bed_f)

        with open(vcf_name, "w") as vcf_f:
            treeseq = self.simulate_trees(
                mutation_rate=mutation_rate,
                recombination_rate=recombination_rate,
                length=length, num_replicates=1,
                random_seed=random_seed)

            print("##fileformat=VCFv4.2", file=vcf_f)
            print('##source="VCF simulated by momi2 using'
                  ' msprime backend"', file=vcf_f)
            print("##contig=<ID={chrom_name},length={length}>".format(
                chrom_name=chrom_name, length=length), file=vcf_f)
            print('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">',
                  file=vcf_f)
            print('##INFO=<ID=AA,Number=1,Type=String,Description="Ancestral Allele">',
                  file=vcf_f)

            n_samples = int(np.sum(self.sampled_n) / ploidy)
            fields = ["#CHROM", "POS", "ID", "REF", "ALT", "QUAL",
                      "FILTER", "INFO", "FORMAT"]
            for pop, n in zip(self.sampled_pops, self.sampled_n):
                for i in range(int(n / ploidy)):
                    fields.append("{}_{}".format(pop, i))
            print(*fields, sep="\t", file=vcf_f)

            loc = next(treeseq)
            if print_aa:
                info_str = "AA=A"
            else:
                info_str = "."

            for v in loc.variants():
                gt = np.reshape(v.genotypes, (n_samples, ploidy))
                print(chrom_name, int(np.floor(v.position)),
                      ".", "A", "T", ".", ".", info_str, "GT",
                      *["|".join(map(str, sample)) for sample in gt],
                      sep="\t", file=vcf_f)

        pysam.tabix_index(vcf_name, preset="vcf", force=force) 
Example #10
Source File: wrapper.py    From pennylane with Apache License 2.0 4 votes vote down vote up
def tensor_wrapper(obj):
    """Decorator that wraps callable objects and classes so that they both accept
    a ``requires_grad`` keyword argument, as well as returning a PennyLane
    :class:`~.tensor`.

    Only if the decorated object returns an ``ndarray`` is the
    output converted to a :class:`~.tensor`; this avoids superfluous conversion
    of scalars and other native-Python types.

    Args:
        obj: a callable object or class
    """

    @functools.wraps(obj)
    def _wrapped(*args, **kwargs):
        """Wrapped NumPy function"""

        tensor_kwargs = {}

        if "requires_grad" in kwargs:
            tensor_kwargs["requires_grad"] = kwargs.pop("requires_grad")
        else:
            tensor_args = list(extract_tensors(args))

            if tensor_args:
                # Unless the user specifies otherwise, if all tensors in the argument
                # list are non-trainable, the output is also non-trainable.
                # Equivalently: if any tensor is trainable, the output is also trainable.
                # NOTE: Use of Python's ``any`` results in an infinite recursion,
                # and I'm not sure why. Using ``np.any`` works fine.
                tensor_kwargs["requires_grad"] = _np.any([i.requires_grad for i in tensor_args])

        # evaluate the original object
        res = obj(*args, **kwargs)

        if isinstance(res, _np.ndarray):
            # only if the output of the object is a ndarray,
            # then convert to a PennyLane tensor
            res = tensor(res, **tensor_kwargs)

        return res

    return _wrapped