Python numpy.lib.recfunctions.stack_arrays() Examples

The following are 11 code examples of numpy.lib.recfunctions.stack_arrays(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy.lib.recfunctions , or try the search function .
Example #1
Source File: utils.py    From diogenes with MIT License 6 votes vote down vote up
def stack_rows(args):
    """Returns a structured array containing all the rows in its arguments
    
    Each argument must be a structured array with the same column names
    and column types. Similar to SQL UNION
    """
    if len(args) > 0:
        M0 = check_sa(args[0], argument_name='args[0]')
        dtype0 = M0.dtype
        checked_args = [M0]
        for idx, M in enumerate(args[1:]):
            M = check_sa(M)
            if dtype0 != M.dtype:
                raise ValueError('args[{}] does not have the same dtype as '
                                 'args[0]'.format(idx + 1))
            checked_args.append(M)
        args = checked_args
    return nprf.stack_arrays(args, usemask=False) 
Example #2
Source File: cpnest.py    From cpnest with MIT License 6 votes vote down vote up
def get_nested_samples(self, filename='nested_samples.dat'):
        """
        returns nested sampling chain
        Parameters
        ----------
        filename : string
                   If given, file to save nested samples to

        Returns
        -------
        pos : :obj:`numpy.ndarray`
        """
        import numpy.lib.recfunctions as rfn
        self.nested_samples = rfn.stack_arrays(
                [s.asnparray()
                    for s in self.NS.nested_samples]
                ,usemask=False)
        if filename:
            np.savetxt(os.path.join(
                self.NS.output_folder,'nested_samples.dat'),
                self.nested_samples.ravel(),
                header=' '.join(self.nested_samples.dtype.names),
                newline='\n',delimiter=' ')
        return self.nested_samples 
Example #3
Source File: simpletable.py    From pyphot with MIT License 6 votes vote down vote up
def stack(self, r, *args, **kwargs):
        """
        Superposes arrays fields by fields inplace

        t.stack(t1, t2, t3, default=None, inplace=True)

        Parameters
        ----------
        r: Table
        """
        if not hasattr(r, 'data'):
            raise AttributeError('r should be a Table object')
        defaults = kwargs.get('defaults', None)
        inplace = kwargs.get('inplace', False)

        data = [self.data, r.data] + [k.data for k in args]
        sdata = recfunctions.stack_arrays(data, defaults, usemask=False,
                                          asrecarray=True)

        if inplace:
            self.data = sdata
        else:
            t = self.__class__(self)
            t.data = sdata
            return t 
Example #4
Source File: simpletable.py    From pyphot with MIT License 6 votes vote down vote up
def stack(self, r, *args, **kwargs):
        """
        Superposes arrays fields by fields inplace

        t.stack(t1, t2, t3, default=None, inplace=True)

        Parameters
        ----------
        r: Table
        """
        if not hasattr(r, 'data'):
            raise AttributeError('r should be a Table object')
        defaults = kwargs.get('defaults', None)
        inplace = kwargs.get('inplace', False)

        data = [self.data, r.data] + [k.data for k in args]
        sdata = recfunctions.stack_arrays(data, defaults, usemask=False,
                                          asrecarray=True)

        if inplace:
            self.data = sdata
        else:
            t = self.__class__(self)
            t.data = sdata
            return t 
Example #5
Source File: simpletable.py    From TheCannon with MIT License 5 votes vote down vote up
def stack(self, r, defaults=None):
        """
        Superposes arrays fields by fields inplace

        Parameters
        ----------
        r: Table
        """
        if not hasattr(r, 'data'):
            raise AttributeError('r should be a Table object')
        self.data = recfunctions.stack_arrays([self.data, r.data], defaults,
                                              usemask=False, asrecarray=True) 
Example #6
Source File: sampler.py    From cpnest with MIT License 5 votes vote down vote up
def reset(self):
        """
        Initialise the sampler by generating :int:`poolsize` `cpnest.parameter.LivePoint`
        and distributing them according to :obj:`cpnest.model.Model.log_prior`
        """
        np.random.seed(seed=self.seed)
        for n in tqdm(range(self.poolsize), desc='SMPLR {} init draw'.format(self.thread_id),
                disable= not self.verbose, position=self.thread_id, leave=False):
            while True: # Generate an in-bounds sample
                p = self.model.new_point()
                p.logP = self.model.log_prior(p)
                if np.isfinite(p.logP): break
            p.logL=self.model.log_likelihood(p)
            if p.logL is None or not np.isfinite(p.logL):
                self.logger.warning("Received non-finite logL value {0} with parameters {1}".format(str(p.logL), str(p)))
                self.logger.warning("You may want to check your likelihood function to improve sampling")
            self.evolution_points.append(p)

        self.proposal.set_ensemble(self.evolution_points)

        # Now, run evolution so samples are drawn from actual prior
        for k in tqdm(range(self.poolsize), desc='SMPLR {} init evolve'.format(self.thread_id),
                disable= not self.verbose, position=self.thread_id, leave=False):
            _, p = next(self.yield_sample(-np.inf))
        if self.verbose >= 3:
            # save the poolsize as prior samples
            
            prior_samples = []
            for k in tqdm(range(self.maxmcmc), desc='SMPLR {} generating prior samples'.format(self.thread_id),
                disable= not self.verbose, position=self.thread_id, leave=False):
                _, p = next(self.yield_sample(-np.inf))
                prior_samples.append(p)
            prior_samples = rfn.stack_arrays([prior_samples[j].asnparray()
                for j in range(0,len(prior_samples))],usemask=False)
            np.savetxt(os.path.join(self.output,'prior_samples_%s.dat'%os.getpid()),
                       prior_samples.ravel(),header=' '.join(prior_samples.dtype.names),
                       newline='\n',delimiter=' ')
            self.logger.critical("Sampler process {0!s}: saved {1:d} prior samples in {2!s}".format(os.getpid(),self.maxmcmc,'prior_samples_%s.dat'%os.getpid()))
            self.prior_samples = prior_samples
        self.proposal.set_ensemble(self.evolution_points)
        self.initialised=True 
Example #7
Source File: average3.py    From picasso with MIT License 5 votes vote down vote up
def centerofmass_all(self):
        # Align all by center of mass
        n_channels = len(self.locs)

        out_locs_x = []
        out_locs_y = []
        out_locs_z = []
        for j in range(n_channels):
            sel_locs_x = []
            sel_locs_y = []
            sel_locs_z = []

            # stack arrays
            sel_locs_x = self.locs[j].x
            sel_locs_y = self.locs[j].y
            sel_locs_z = self.locs[j].z
            out_locs_x.append(sel_locs_x)
            out_locs_y.append(sel_locs_y)
            out_locs_z.append(sel_locs_z)

        out_locs_x = stack_arrays(out_locs_x, asrecarray=True, usemask=False)
        out_locs_y = stack_arrays(out_locs_y, asrecarray=True, usemask=False)
        out_locs_z = stack_arrays(out_locs_z, asrecarray=True, usemask=False)

        mean_x = np.mean(out_locs_x)
        mean_y = np.mean(out_locs_y)
        mean_z = np.mean(out_locs_z)

        for j in range(n_channels):
            self.locs[j].x -= mean_x
            self.locs[j].y -= mean_y
            self.locs[j].z -= mean_z 
Example #8
Source File: sampleset.py    From dimod with Apache License 2.0 4 votes vote down vote up
def concatenate(samplesets, defaults=None):
    """Combine sample sets.

    Args:
        samplesets (iterable[:obj:`.SampleSet`):
            Iterable of sample sets.

        defaults (dict, optional):
            Dictionary mapping data vector names to the corresponding default values.

    Returns:
        :obj:`.SampleSet`: A sample set with the same vartype and variable order as the first
        given in `samplesets`.

    Examples:
        >>> a = dimod.SampleSet.from_samples(([-1, +1], 'ab'), dimod.SPIN, energy=-1)
        >>> b = dimod.SampleSet.from_samples(([-1, +1], 'ba'), dimod.SPIN, energy=-1)
        >>> ab = dimod.concatenate((a, b))
        >>> ab.record.sample
        array([[-1,  1],
               [ 1, -1]], dtype=int8)

    """

    itertup = iter(samplesets)

    try:
        first = next(itertup)
    except StopIteration:
        raise ValueError("samplesets must contain at least one SampleSet")

    vartype = first.vartype
    variables = first.variables

    records = [first.record]
    records.extend(_iter_records(itertup, vartype, variables))

    # dev note: I was able to get ~2x performance boost when trying to
    # implement the same functionality here by hand (I didn't know that
    # this function existed then). However I think it is better to use
    # numpy's function and rely on their testing etc. If however this becomes
    # a performance bottleneck in the future, it might be worth changing.
    record = recfunctions.stack_arrays(records, defaults=defaults,
                                       asrecarray=True, usemask=False)

    return SampleSet(record, variables, {}, vartype) 
Example #9
Source File: cpnest.py    From cpnest with MIT License 4 votes vote down vote up
def get_posterior_samples(self, filename='posterior.dat'):
        """
        Returns posterior samples

        Parameters
        ----------
        filename : string
                   If given, file to save posterior samples to

        Returns
        -------
        pos : :obj:`numpy.ndarray`
        """
        import numpy as np
        import os
        from .nest2pos import draw_posterior_many
        nested_samples     = self.get_nested_samples()
        posterior_samples  = draw_posterior_many([nested_samples],[self.nlive],verbose=self.verbose)
        posterior_samples  = np.array(posterior_samples)
        self.prior_samples = {n:None for n in self.user.names}
        self.mcmc_samples  = {n:None for n in self.user.names}
        # if we run with full verbose, read in and output
        # the mcmc thinned posterior samples
        if self.verbose >= 3:
            from .nest2pos import resample_mcmc_chain
            from numpy.lib.recfunctions import stack_arrays

            prior_samples = []
            mcmc_samples  = []
            for file in os.listdir(self.NS.output_folder):
                if 'prior_samples' in file:
                    prior_samples.append(np.genfromtxt(os.path.join(self.NS.output_folder,file), names = True))
                    os.system('rm {0}'.format(os.path.join(self.NS.output_folder,file)))
                elif 'mcmc_chain' in file:
                    mcmc_samples.append(resample_mcmc_chain(np.genfromtxt(os.path.join(self.NS.output_folder,file), names = True)))
                    os.system('rm {0}'.format(os.path.join(self.NS.output_folder,file)))

            # first deal with the prior samples
            self.prior_samples = stack_arrays([p for p in prior_samples])
            if filename:
                np.savetxt(os.path.join(
                           self.NS.output_folder,'prior.dat'),
                           self.prior_samples.ravel(),
                           header=' '.join(self.prior_samples.dtype.names),
                           newline='\n',delimiter=' ')
            # now stack all the mcmc chains
            self.mcmc_samples = stack_arrays([p for p in mcmc_samples])
            if filename:
                np.savetxt(os.path.join(
                           self.NS.output_folder,'mcmc.dat'),
                           self.mcmc_samples.ravel(),
                           header=' '.join(self.mcmc_samples.dtype.names),
                           newline='\n',delimiter=' ')
        # TODO: Replace with something to output samples in whatever format
        if filename:
            np.savetxt(os.path.join(
                self.NS.output_folder,'posterior.dat'),
                posterior_samples.ravel(),
                header=' '.join(posterior_samples.dtype.names),
                newline='\n',delimiter=' ')
        return posterior_samples 
Example #10
Source File: postprocess.py    From picasso with MIT License 4 votes vote down vote up
def calculate_fret(acc_locs, don_locs):
    """
    Calculate the FRET efficiceny in picked regions, this is for one trace
    """
    fret_dict = {}
    if len(acc_locs) == 0:
        max_frames = _np.max(don_locs["frame"])
    elif len(don_locs) == 0:
        max_frames = _np.max(acc_locs["frame"])
    else:
        max_frames = _np.max(
            [_np.max(acc_locs["frame"]), _np.max(don_locs["frame"])]
        )

    # Initialize a vector filled with zeros for the duration of the movie
    xvec = _np.arange(max_frames + 1)
    yvec = xvec[:] * 0
    acc_trace = yvec.copy()
    don_trace = yvec.copy()
    # Fill vector with the photon numbers of events that happend
    acc_trace[acc_locs["frame"]] = acc_locs["photons"] - acc_locs["bg"]
    don_trace[don_locs["frame"]] = don_locs["photons"] - don_locs["bg"]

    # Calculate the FRET efficiency
    fret_trace = acc_trace / (acc_trace + don_trace)
    # Only select FRET values between 0 and 1
    selector = _np.logical_and(fret_trace > 0, fret_trace < 1)

    # Select the final fret events based on the 0 to 1 range
    fret_events = fret_trace[selector]
    fret_timepoints = _np.arange(len(fret_trace))[selector]

    f_locs = []
    if len(fret_timepoints) > 0:
        # Calculate FRET locs: Select the locs when FRET happens
        sel_locs = []
        for element in fret_timepoints:
            sel_locs.append(don_locs[don_locs["frame"] == element])

        f_locs = stack_arrays(sel_locs, asrecarray=True, usemask=False)
        f_locs = _lib.append_to_rec(f_locs, _np.array(fret_events), "fret")

    fret_dict["fret_events"] = _np.array(fret_events)
    fret_dict["fret_timepoints"] = fret_timepoints
    fret_dict["acc_trace"] = acc_trace
    fret_dict["don_trace"] = don_trace
    fret_dict["frames"] = xvec
    fret_dict["maxframes"] = max_frames

    return fret_dict, f_locs 
Example #11
Source File: average3.py    From picasso with MIT License 4 votes vote down vote up
def centerofmass(self):
        print("Aligning by center of mass.. ", end="", flush=True)
        n_groups = self.n_groups
        n_channels = len(self.locs)
        progress = lib.ProgressDialog(
            "Aligning by center of mass", 0, n_groups, self
        )
        progress.set_value(0)

        for i in range(n_groups):
            out_locs_x = []
            out_locs_y = []
            out_locs_z = []
            for j in range(n_channels):
                sel_locs_x = []
                sel_locs_y = []
                sel_locs_z = []
                index = self.group_index[j][i, :].nonzero()[1]
                # stack arrays
                sel_locs_x = self.locs[j].x[index]
                sel_locs_y = self.locs[j].y[index]
                sel_locs_z = self.locs[j].z[index]

                out_locs_x.append(sel_locs_x)
                out_locs_y.append(sel_locs_y)
                out_locs_z.append(sel_locs_z)
                progress.set_value(i + 1)

            out_locs_x = stack_arrays(
                out_locs_x, asrecarray=True, usemask=False
            )
            out_locs_y = stack_arrays(
                out_locs_y, asrecarray=True, usemask=False
            )
            out_locs_z = stack_arrays(
                out_locs_z, asrecarray=True, usemask=False
            )

            mean_x = np.mean(out_locs_x)
            mean_y = np.mean(out_locs_y)
            mean_z = np.mean(out_locs_z)

            for j in range(n_channels):
                index = self.group_index[j][i, :].nonzero()[1]
                self.locs[j].x[index] -= mean_x
                self.locs[j].y[index] -= mean_y
                self.locs[j].z[index] -= mean_z

        self.calculate_radii()
        self.updateLayout()

        print("Complete.")