Python numpy.uint32() Examples

The following are 30 code examples of numpy.uint32(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: test_dtype.py    From recruit with Apache License 2.0 8 votes vote down vote up
def test_union_with_struct_packed(self):
        class Struct(ctypes.Structure):
            _pack_ = 1
            _fields_ = [
                ('one', ctypes.c_uint8),
                ('two', ctypes.c_uint32)
            ]

        class Union(ctypes.Union):
            _fields_ = [
                ('a', ctypes.c_uint8),
                ('b', ctypes.c_uint16),
                ('c', ctypes.c_uint32),
                ('d', Struct),
            ]
        expected = np.dtype(dict(
            names=['a', 'b', 'c', 'd'],
            formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
            offsets=[0, 0, 0, 0],
            itemsize=ctypes.sizeof(Union)
        ))
        self.check(Union, expected) 
Example #2
Source File: test_function_base.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_basic(self):
        ba = [1, 2, 10, 11, 6, 5, 4]
        ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
        for ctype in [np.int16, np.uint16, np.int32, np.uint32,
                      np.float32, np.float64, np.complex64, np.complex128]:
            a = np.array(ba, ctype)
            a2 = np.array(ba2, ctype)
            if ctype in ['1', 'b']:
                assert_raises(ArithmeticError, np.cumprod, a)
                assert_raises(ArithmeticError, np.cumprod, a2, 1)
                assert_raises(ArithmeticError, np.cumprod, a)
            else:
                assert_array_equal(np.cumprod(a, axis=-1),
                                   np.array([1, 2, 20, 220,
                                             1320, 6600, 26400], ctype))
                assert_array_equal(np.cumprod(a2, axis=0),
                                   np.array([[1, 2, 3, 4],
                                             [5, 12, 21, 36],
                                             [50, 36, 84, 180]], ctype))
                assert_array_equal(np.cumprod(a2, axis=-1),
                                   np.array([[1, 2, 6, 24],
                                             [5, 30, 210, 1890],
                                             [10, 30, 120, 600]], ctype)) 
Example #3
Source File: pfilter.py    From pfilter with MIT License 6 votes vote down vote up
def residual_resample(weights):
    n = len(weights)
    indices = np.zeros(n, np.uint32)
    # take int(N*w) copies of each weight
    num_copies = (n * weights).astype(np.uint32)
    k = 0
    for i in range(n):
        for _ in range(num_copies[i]):  # make n copies
            indices[k] = i
            k += 1
    # use multinormial resample on the residual to fill up the rest.
    residual = weights - num_copies  # get fractional part
    residual /= np.sum(residual)
    cumsum = np.cumsum(residual)
    cumsum[-1] = 1
    indices[k:n] = np.searchsorted(cumsum, np.random.uniform(0, 1, n - k))
    return indices 
Example #4
Source File: test_dtype.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_union_packed(self):
        class Struct(ctypes.Structure):
            _fields_ = [
                ('one', ctypes.c_uint8),
                ('two', ctypes.c_uint32)
            ]
            _pack_ = 1
        class Union(ctypes.Union):
            _pack_ = 1
            _fields_ = [
                ('a', ctypes.c_uint8),
                ('b', ctypes.c_uint16),
                ('c', ctypes.c_uint32),
                ('d', Struct),
            ]
        expected = np.dtype(dict(
            names=['a', 'b', 'c', 'd'],
            formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
            offsets=[0, 0, 0, 0],
            itemsize=ctypes.sizeof(Union)
        ))
        self.check(Union, expected) 
Example #5
Source File: pfilter.py    From pfilter with MIT License 6 votes vote down vote up
def create_indices(positions, weights):
    n = len(weights)
    indices = np.zeros(n, np.uint32)
    cumsum = np.cumsum(weights)
    i, j = 0, 0
    while i < n:
        if positions[i] < cumsum[j]:
            indices[i] = j
            i += 1
        else:
            j += 1

    return indices


### end rlabbe's resampling functions 
Example #6
Source File: test_dtype.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_large_packed_structure(self):
        class PackedStructure(ctypes.Structure):
            _pack_ = 2
            _fields_ = [
                ('a', ctypes.c_uint8),
                ('b', ctypes.c_uint16),
                ('c', ctypes.c_uint8),
                ('d', ctypes.c_uint16),
                ('e', ctypes.c_uint32),
                ('f', ctypes.c_uint32),
                ('g', ctypes.c_uint8)
                ]
        expected = np.dtype(dict(
            formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],
            offsets=[0, 2, 4, 6, 8, 12, 16],
            names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
            itemsize=18))
        self.check(PackedStructure, expected) 
Example #7
Source File: test_stata.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_bool_uint(self, byteorder, version):
        s0 = Series([0, 1, True], dtype=np.bool)
        s1 = Series([0, 1, 100], dtype=np.uint8)
        s2 = Series([0, 1, 255], dtype=np.uint8)
        s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
        s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
        s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
        s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)

        original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3,
                              's4': s4, 's5': s5, 's6': s6})
        original.index.name = 'index'
        expected = original.copy()
        expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32,
                          np.int32, np.float64)
        for c, t in zip(expected.columns, expected_types):
            expected[c] = expected[c].astype(t)

        with tm.ensure_clean() as path:
            original.to_stata(path, byteorder=byteorder, version=version)
            written_and_read_again = self.read_dta(path)
            written_and_read_again = written_and_read_again.set_index('index')
            tm.assert_frame_equal(written_and_read_again, expected) 
Example #8
Source File: test_utils.py    From me-ica with GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_can_cast():
    tests = ((np.float32, np.float32, True, True, True),
             (np.float64, np.float32, True, True, True),
             (np.complex128, np.float32, False, False, False),
             (np.float32, np.complex128, True, True, True),
             (np.float32, np.uint8, False, True, True),
             (np.uint32, np.complex128, True, True, True),
             (np.int64, np.float32, True, True, True),
             (np.complex128, np.int16, False, False, False),
             (np.float32, np.int16, False, True, True),
             (np.uint8, np.int16, True, True, True),
             (np.uint16, np.int16, False, True, True),
             (np.int16, np.uint16, False, False, True),
             (np.int8, np.uint16, False, False, True),
             (np.uint16, np.uint8, False, True, True),
             )
    for intype, outtype, def_res, scale_res, all_res in tests:
        assert_equal(def_res, can_cast(intype, outtype))
        assert_equal(scale_res, can_cast(intype, outtype, False, True))
        assert_equal(all_res, can_cast(intype, outtype, True, True)) 
Example #9
Source File: test_arraysetops.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_setdiff1d(self):
        a = np.array([6, 5, 4, 7, 1, 2, 7, 4])
        b = np.array([2, 4, 3, 3, 2, 1, 5])

        ec = np.array([6, 7])
        c = setdiff1d(a, b)
        assert_array_equal(c, ec)

        a = np.arange(21)
        b = np.arange(19)
        ec = np.array([19, 20])
        c = setdiff1d(a, b)
        assert_array_equal(c, ec)

        assert_array_equal([], setdiff1d([], []))
        a = np.array((), np.uint32)
        assert_equal(setdiff1d(a, []).dtype, np.uint32) 
Example #10
Source File: test_function_base.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_basic(self):
        ba = [1, 2, 10, 11, 6, 5, 4]
        ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
        for ctype in [np.int16, np.uint16, np.int32, np.uint32,
                      np.float32, np.float64, np.complex64, np.complex128]:
            a = np.array(ba, ctype)
            a2 = np.array(ba2, ctype)
            if ctype in ['1', 'b']:
                assert_raises(ArithmeticError, np.prod, a)
                assert_raises(ArithmeticError, np.prod, a2, 1)
            else:
                assert_equal(a.prod(axis=0), 26400)
                assert_array_equal(a2.prod(axis=0),
                                   np.array([50, 36, 84, 180], ctype))
                assert_array_equal(a2.prod(axis=-1),
                                   np.array([24, 1890, 600], ctype)) 
Example #11
Source File: test_ctypeslib.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_padded_union(self):
        dt = np.dtype(dict(
            names=['a', 'b'],
            offsets=[0, 0],
            formats=[np.uint16, np.uint32],
            itemsize=5,
        ))

        ct = np.ctypeslib.as_ctypes_type(dt)
        assert_(issubclass(ct, ctypes.Union))
        assert_equal(ctypes.sizeof(ct), dt.itemsize)
        assert_equal(ct._fields_, [
            ('a', ctypes.c_uint16),
            ('b', ctypes.c_uint32),
            ('', ctypes.c_char * 5),  # padding
        ]) 
Example #12
Source File: header.py    From baseband with GNU General Public License v3.0 6 votes vote down vote up
def stream2words(stream, track=None):
    """Convert a stream of integers to uint32 header words.

    Parameters
    ----------
    stream : `~numpy.array` of int
        For each int, every bit corresponds to a particular track.
    track : int, array, or None, optional
        The track to extract.  If `None` (default), extract all tracks that
        the type of int in the stream can hold.
    """
    if track is None:
        track = np.arange(stream.dtype.itemsize * 8, dtype=stream.dtype)

    track_sel = ((stream.reshape(-1, 32, 1) >> track) & 1).astype(np.uint32)
    track_sel <<= np.arange(31, -1, -1, dtype=np.uint32).reshape(-1, 1)
    words = np.bitwise_or.reduce(track_sel, axis=1)
    return words.squeeze() 
Example #13
Source File: header.py    From baseband with GNU General Public License v3.0 6 votes vote down vote up
def words2stream(words):
    """Convert a set of uint32 header words to a stream of integers.

    Parameters
    ----------
    words : `~numpy.array` of uint32

    Returns
    -------
    stream : `~numpy.array` of int
        For each int, every bit corresponds to a particular track.
    """
    ntrack = words.shape[1]
    dtype = MARK4_DTYPES[ntrack]
    nbits = words.dtype.itemsize * 8
    bit = np.arange(nbits - 1, -1, -1, dtype=words.dtype).reshape(-1, 1)

    bit_sel = ((words[:, np.newaxis, :] >> bit) & 1).astype(dtype[1:])
    bit_sel <<= np.arange(ntrack, dtype=dtype[1:])
    words = np.empty(bit_sel.shape[:2], dtype)
    words = np.bitwise_or.reduce(bit_sel, axis=2, out=words)
    return words.ravel() 
Example #14
Source File: avazu.py    From pytorch-fm with MIT License 6 votes vote down vote up
def __yield_buffer(self, path, feat_mapper, defaults, buffer_size=int(1e5)):
        item_idx = 0
        buffer = list()
        with open(path) as f:
            f.readline()
            pbar = tqdm(f, mininterval=1, smoothing=0.1)
            pbar.set_description('Create avazu dataset cache: setup lmdb')
            for line in pbar:
                values = line.rstrip('\n').split(',')
                if len(values) != self.NUM_FEATS + 2:
                    continue
                np_array = np.zeros(self.NUM_FEATS + 1, dtype=np.uint32)
                np_array[0] = int(values[1])
                for i in range(1, self.NUM_FEATS + 1):
                    np_array[i] = feat_mapper[i].get(values[i+1], defaults[i])
                buffer.append((struct.pack('>I', item_idx), np_array.tobytes()))
                item_idx += 1
                if item_idx % buffer_size == 0:
                    yield buffer
                    buffer.clear()
            yield buffer 
Example #15
Source File: model.py    From modelforge with Apache License 2.0 6 votes vote down vote up
def squeeze_bits(arr: numpy.ndarray) -> numpy.ndarray:
    """Return a copy of an integer numpy array with the minimum bitness."""
    assert arr.dtype.kind in ("i", "u")
    if arr.size == 0:
        return arr
    if arr.dtype.kind == "i":
        assert arr.min() >= 0
    mlbl = int(arr.max()).bit_length()
    if mlbl <= 8:
        dtype = numpy.uint8
    elif mlbl <= 16:
        dtype = numpy.uint16
    elif mlbl <= 32:
        dtype = numpy.uint32
    else:
        dtype = numpy.uint64
    return arr.astype(dtype) 
Example #16
Source File: nanduri2012.py    From pulse2percept with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _predict_temporal(self, stim, t_percept):
        """Predict the temporal response"""
        # Pass the stimulus as a 2D NumPy array to the fast Cython function:
        stim_data = stim.data.reshape((-1, len(stim.time)))
        # Calculate at which simulation time steps we need to output a percept.
        # This is basically t_percept/self.dt, but we need to beware of
        # floating point rounding errors! 29.999 will be rounded down to 29 by
        # np.uint32, so we need to np.round it first:
        idx_percept = np.uint32(np.round(t_percept / self.dt))
        if np.unique(idx_percept).size < t_percept.size:
            raise ValueError("All times 't_percept' must be distinct multiples "
                             "of `dt`=%.2e" % self.dt)
        # Cython returns a 2D (space x time) NumPy array:
        return temporal_fast(stim_data.astype(np.float32),
                             stim.time.astype(np.float32),
                             idx_percept,
                             self.dt, self.tau1, self.tau2, self.tau3,
                             self.asymptote, self.shift, self.slope, self.eps,
                             self.thresh_percept) 
Example #17
Source File: horsager2009.py    From pulse2percept with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _predict_temporal(self, stim, t_percept):
        """Predict the temporal response"""
        # Pass the stimulus as a 2D NumPy array to the fast Cython function:
        stim_data = stim.data.reshape((-1, len(stim.time)))
        # Calculate at which simulation time steps we need to output a percept.
        # This is basically t_percept/self.dt, but we need to beware of
        # floating point rounding errors! 29.999 will be rounded down to 29 by
        # np.uint32, so we need to np.round it first:
        idx_percept = np.uint32(np.round(t_percept / self.dt))
        if np.unique(idx_percept).size < t_percept.size:
            raise ValueError("All times 't_percept' must be distinct multiples "
                             "of `dt`=%.2e" % self.dt)
        # Cython returns a 2D (space x time) NumPy array:
        return temporal_fast(stim_data.astype(np.float32),
                             stim.time.astype(np.float32),
                             idx_percept,
                             self.dt, self.tau1, self.tau2, self.tau3,
                             self.eps, self.beta, self.thresh_percept) 
Example #18
Source File: beyeler2019.py    From pulse2percept with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _predict_spatial(self, earray, stim):
        """Predicts the brightness at specific times ``t``"""
        # This does the expansion of a compact stimulus and a list of
        # electrodes to activation values at X,Y grid locations:
        assert isinstance(earray, ElectrodeArray)
        assert isinstance(stim, Stimulus)
        return fast_axon_map(stim.data,
                             np.array([earray[e].x for e in stim.electrodes],
                                      dtype=np.float32),
                             np.array([earray[e].y for e in stim.electrodes],
                                      dtype=np.float32),
                             self.axon_contrib,
                             self.axon_idx_start.astype(np.uint32),
                             self.axon_idx_end.astype(np.uint32),
                             self.rho,
                             self.thresh_percept) 
Example #19
Source File: test_random.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_repeatability(self):
        import hashlib
        # We use a md5 hash of generated sequences of 1000 samples
        # in the range [0, 6) for all but bool, where the range
        # is [0, 2). Hashes are for little endian numbers.
        tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
               'int16': '1b7741b80964bb190c50d541dca1cac1',
               'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
               'int64': '17db902806f448331b5a758d7d2ee672',
               'int8': '27dd30c4e08a797063dffac2490b0be6',
               'uint16': '1b7741b80964bb190c50d541dca1cac1',
               'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
               'uint64': '17db902806f448331b5a758d7d2ee672',
               'uint8': '27dd30c4e08a797063dffac2490b0be6'}

        for dt in self.itype[1:]:
            np.random.seed(1234)

            # view as little endian for hash
            if sys.byteorder == 'little':
                val = self.rfunc(0, 6, size=1000, dtype=dt)
            else:
                val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()

            res = hashlib.md5(val.view(np.int8)).hexdigest()
            assert_(tgt[np.dtype(dt).name] == res)

        # bools do not depend on endianness
        np.random.seed(1234)
        val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
        res = hashlib.md5(val).hexdigest()
        assert_(tgt[np.dtype(bool).name] == res) 
Example #20
Source File: test_ctypeslib.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_structure(self):
        dt = np.dtype([
            ('a', np.uint16),
            ('b', np.uint32),
        ])

        ct = np.ctypeslib.as_ctypes_type(dt)
        assert_(issubclass(ct, ctypes.Structure))
        assert_equal(ctypes.sizeof(ct), dt.itemsize)
        assert_equal(ct._fields_, [
            ('a', ctypes.c_uint16),
            ('b', ctypes.c_uint32),
        ]) 
Example #21
Source File: pq.py    From navec with MIT License 5 votes vote down vote up
def from_file(cls, file):
        buffer = file.read(4 * 4)
        vectors, dim, qdim, centroids = np.frombuffer(buffer, np.uint32)
        buffer = file.read(vectors * qdim)
        indexes = np.frombuffer(buffer, np.uint8).reshape(vectors, qdim)
        buffer = file.read()
        codes = np.frombuffer(buffer, np.float32).reshape(qdim, centroids, -1)
        return cls(vectors, dim, qdim, centroids, indexes, codes) 
Example #22
Source File: pq.py    From navec with MIT License 5 votes vote down vote up
def as_bytes(self):
        meta = self.vectors, self.dim, self.qdim, self.centroids
        meta = np.array(meta).astype(np.uint32).tobytes()
        indexes = self.indexes.astype(np.uint8).tobytes()
        codes = self.codes.astype(np.float32).tobytes()
        return meta + indexes + codes 
Example #23
Source File: vocab.py    From navec with MIT License 5 votes vote down vote up
def from_file(cls, file):
        file = GzipFile(mode='rb', fileobj=file)

        buffer = file.read(4)
        size, = np.frombuffer(buffer, np.uint32)

        buffer = file.read(4 * size)
        counts = np.frombuffer(buffer, np.uint32).tolist()

        text = file.read().decode('utf8')
        words = text.splitlines()

        return cls(words, counts) 
Example #24
Source File: test_regression.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_zerosize_accumulate(self):
        "Ticket #1733"
        x = np.array([[42, 0]], dtype=np.uint32)
        assert_equal(np.add.accumulate(x[:-1, 0]), []) 
Example #25
Source File: vocab.py    From navec with MIT License 5 votes vote down vote up
def as_bytes(self):
        meta = [len(self.counts)]
        meta = np.array(meta).astype(np.uint32).tobytes()

        words = '\n'.join(self.words)
        words = words.encode('utf8')

        counts = np.array(self.counts, dtype=np.uint32).tobytes()
        return compress(meta + counts + words) 
Example #26
Source File: longtermmean.py    From yatsm with MIT License 5 votes vote down vote up
def _fit_prep(self, model):
        if self.evi_index:
            if not isinstance(self.evi_scale, float):
                raise ValueError('Must provide scale factor for EVI')
            self.evi = model.Y[self.evi_index, :] * self.evi_scale
        else:
            self.evi = EVI(model.Y[self.red_index, :] * self.scale,
                           model.Y[self.nir_index, :] * self.scale,
                           model.Y[self.blue_index, :] * self.scale)

        self.ordinal = model.dates.astype(np.uint32)
        self.yeardoy = ordinal2yeardoy(self.ordinal)

        # Mask based on unusual EVI values
        valid_evi = np.where((self.evi >= 0) & (self.evi <= 1))[0]
        self.evi = self.evi[valid_evi]
        self.ordinal = self.ordinal[valid_evi]
        self.yeardoy = self.yeardoy[valid_evi, :]

        self.pheno = np.zeros(self.model.record.shape, dtype=[
            ('spring_doy', 'u2'),
            ('autumn_doy', 'u2'),
            ('pheno_cor', 'f4'),
            ('peak_evi', 'f4'),
            ('peak_doy', 'u2'),
            ('spline_evi', 'f8', 366),
            ('pheno_nobs', 'u2')
        ]) 
Example #27
Source File: test_function.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_lower_int_prec_count():
    df = DataFrame({'a': np.array(
        [0, 1, 2, 100], np.int8),
        'b': np.array(
        [1, 2, 3, 6], np.uint32),
        'c': np.array(
        [4, 5, 6, 8], np.int16),
        'grp': list('ab' * 2)})
    result = df.groupby('grp').count()
    expected = DataFrame({'a': [2, 2],
                          'b': [2, 2],
                          'c': [2, 2]}, index=pd.Index(list('ab'),
                                                       name='grp'))
    tm.assert_frame_equal(result, expected) 
Example #28
Source File: stata.py    From recruit with Apache License 2.0 5 votes vote down vote up
def __init__(self, df, columns, version=117, byteorder=None):
        if version not in (117, 118, 119):
            raise ValueError('Only dta versions 117, 118 and 119 supported')
        self._dta_ver = version

        self.df = df
        self.columns = columns
        self._gso_table = OrderedDict((('', (0, 0)),))
        if byteorder is None:
            byteorder = sys.byteorder
        self._byteorder = _set_endianness(byteorder)

        gso_v_type = 'I'  # uint32
        gso_o_type = 'Q'  # uint64
        self._encoding = 'utf-8'
        if version == 117:
            o_size = 4
            gso_o_type = 'I'  # 117 used uint32
            self._encoding = 'latin-1'
        elif version == 118:
            o_size = 6
        else:  # version == 119
            o_size = 5
        self._o_offet = 2 ** (8 * (8 - o_size))
        self._gso_o_type = gso_o_type
        self._gso_v_type = gso_v_type 
Example #29
Source File: task.py    From ibllib with MIT License 5 votes vote down vote up
def _get_spike_counts_in_bins(spike_times, spike_clusters, intervals):
    """
    Return the number of spikes in a sequence of time intervals, for each neuron.

    Parameters
    ----------
    spike_times : 1D array
        spike times (in seconds)
    spike_clusters : 1D array
        cluster ids corresponding to each event in `spikes`
    intervals : 2D array of shape (n_events, 2)
        the start and end times of the events

    Returns
    ---------
    counts : 2D array of shape (n_neurons, n_events)
        the spike counts of all neurons ffrom scipy.stats import sem, tor all events
        value (i, j) is the number of spikes of neuron `neurons[i]` in interval #j
    cluster_ids : 1D array
        list of cluster ids
    """

    # Check input
    assert intervals.ndim == 2
    assert intervals.shape[1] == 2

    # For each neuron and each interval, the number of spikes in the interval.
    cluster_ids = np.unique(spike_clusters)
    n_neurons = len(cluster_ids)
    n_intervals = intervals.shape[0]
    counts = np.zeros((n_neurons, n_intervals), dtype=np.uint32)
    for j in range(n_intervals):
        t0, t1 = intervals[j, :]
        # Count the number of spikes in the window, for each neuron.
        x = np.bincount(
            spike_clusters[(t0 <= spike_times) & (spike_times < t1)],
            minlength=cluster_ids.max() + 1)
        counts[:, j] = x[cluster_ids]
    return counts, cluster_ids 
Example #30
Source File: test_regression.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_nonzero_byteswap(self):
        a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
        a.dtype = np.float32
        assert_equal(a.nonzero()[0], [1])
        a = a.byteswap().newbyteorder()
        assert_equal(a.nonzero()[0], [1])  # [0] if nonzero() ignores swap