Python numpy.int8() Examples

The following are 30 code examples for showing how to use numpy.int8(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: deep-learning-note   Author: wdxtub   File: utils.py    License: MIT License 7 votes vote down vote up
def parse_data(path, dataset, flatten):
    if dataset != 'train' and dataset != 't10k':
        raise NameError('dataset must be train or t10k')

    label_file = os.path.join(path, dataset + '-labels-idx1-ubyte')
    with open(label_file, 'rb') as file:
        _, num = struct.unpack(">II", file.read(8))
        labels = np.fromfile(file, dtype=np.int8)  # int8
        new_labels = np.zeros((num, 10))
        new_labels[np.arange(num), labels] = 1

    img_file = os.path.join(path, dataset + '-images-idx3-ubyte')
    with open(img_file, 'rb') as file:
        _, num, rows, cols = struct.unpack(">IIII", file.read(16))
        imgs = np.fromfile(file, dtype=np.uint8).reshape(num, rows, cols)  # uint8
        imgs = imgs.astype(np.float32) / 255.0
        if flatten:
            imgs = imgs.reshape([num, -1])

    return imgs, new_labels 
Example 2
Project: DDPAE-video-prediction   Author: jthsieh   File: metrics.py    License: MIT License 6 votes vote down vote up
def find_match(self, pred, gt):
    '''
    Match component to balls.
    '''
    batch_size, n_frames_input, n_components, _ = pred.shape
    diff = pred.reshape(batch_size, n_frames_input, n_components, 1, 2) - \
               gt.reshape(batch_size, n_frames_input, 1, n_components, 2)
    diff = np.sum(np.sum(diff ** 2, axis=-1), axis=1)
    # Direct indices
    indices = np.argmin(diff, axis=2)
    ambiguous = np.zeros(batch_size, dtype=np.int8)
    for i in range(batch_size):
      _, counts = np.unique(indices[i], return_counts=True)
      if not np.all(counts == 1):
        ambiguous[i] = 1
    return indices, ambiguous 
Example 3
def test_quantize_float32_to_int8():
    shape = rand_shape_nd(4)
    data = rand_ndarray(shape, 'default', dtype='float32')
    min_range = mx.nd.min(data)
    max_range = mx.nd.max(data)
    qdata, min_val, max_val = mx.nd.contrib.quantize(data, min_range, max_range, out_type='int8')
    data_np = data.asnumpy()
    min_range = min_range.asscalar()
    max_range = max_range.asscalar()
    real_range = np.maximum(np.abs(min_range), np.abs(max_range))
    quantized_range = 127.0
    scale = quantized_range / real_range
    assert qdata.dtype == np.int8
    assert min_val.dtype == np.float32
    assert max_val.dtype == np.float32
    assert same(min_val.asscalar(), -real_range)
    assert same(max_val.asscalar(), real_range)
    qdata_np = (np.sign(data_np) * np.minimum(np.abs(data_np) * scale + 0.5, quantized_range)).astype(np.int8)
    assert_almost_equal(qdata.asnumpy(), qdata_np, atol = 1) 
Example 4
def test_quantized_flatten():
    def check_quantized_flatten(shape, qdtype):
        if qdtype == 'uint8':
            data_low = 0.0
            data_high = 127.0
        else:
            data_low = -127.0
            data_high = 127.0
        qdata = mx.nd.random.uniform(low=data_low, high=data_high, shape=shape).astype(qdtype)
        min_data = mx.nd.array([-1023.343], dtype='float32')
        max_data = mx.nd.array([2343.324275], dtype='float32')
        qoutput, min_output, max_output = mx.nd.contrib.quantized_flatten(qdata, min_data, max_data)
        assert qoutput.ndim == 2
        assert qoutput.shape[0] == qdata.shape[0]
        assert qoutput.shape[1] == np.prod(qdata.shape[1:])
        assert same(qdata.asnumpy().flatten(), qoutput.asnumpy().flatten())
        assert same(min_data.asnumpy(), min_output.asnumpy())
        assert same(max_data.asnumpy(), max_output.asnumpy())

    for qdtype in ['int8', 'uint8']:
        check_quantized_flatten((10,), qdtype)
        check_quantized_flatten((10, 15), qdtype)
        check_quantized_flatten((10, 15, 18), qdtype)
        check_quantized_flatten((3, 4, 23, 23), qdtype) 
Example 5
Project: Kaggler   Author: jeongyoonlee   File: test_ftrl.py    License: MIT License 6 votes vote down vote up
def main():
    print('create y...')
    y = np.random.randint(2, size=N_OBS)
    print('create X...')
    row = np.random.randint(N_OBS, size=N_VALUE)
    col = np.random.randint(N_FEATURE, size=N_VALUE)
    data = np.ones(N_VALUE)
    X = sparse.csr_matrix((data, (row, col)), dtype=np.int8)

    print('train...')
    profiler = cProfile.Profile(subcalls=True, builtins=True, timeunit=0.001,)
    clf = FTRL(interaction=False)
    profiler.enable()
    clf.fit(X, y)
    profiler.disable()
    profiler.print_stats()

    p = clf.predict(X)
    print('AUC: {:.4f}'.format(auc(y, p)))

    assert auc(y, p) > .5 
Example 6
Project: pyscf   Author: pyscf   File: numpy_helper.py    License: Apache License 2.0 6 votes vote down vote up
def frompointer(pointer, count, dtype=float):
    '''Interpret a buffer that the pointer refers to as a 1-dimensional array.

    Args:
        pointer : int or ctypes pointer
            address of a buffer
        count : int
            Number of items to read.
        dtype : data-type, optional
            Data-type of the returned array; default: float.

    Examples:

    >>> s = numpy.ones(3, dtype=numpy.int32)
    >>> ptr = s.ctypes.data
    >>> frompointer(ptr, count=6, dtype=numpy.int16)
    [1, 0, 1, 0, 1, 0]
    '''
    dtype = numpy.dtype(dtype)
    count *= dtype.itemsize
    buf = (ctypes.c_char * count).from_address(pointer)
    a = numpy.ndarray(count, dtype=numpy.int8, buffer=buf)
    return a.view(dtype) 
Example 7
Project: me-ica   Author: ME-ICA   File: test_arraywriters.py    License: GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_no_offset_scale():
    # Specific tests of no-offset scaling
    SAW = SlopeArrayWriter
    # Floating point
    for data in ((-128, 127),
                  (-128, 126),
                  (-128, -127),
                  (-128, 0),
                  (-128, -1),
                  (126, 127),
                  (-127, 127)):
        aw = SAW(np.array(data, dtype=np.float32), np.int8)
        assert_equal(aw.slope, 1.0)
    aw = SAW(np.array([-126, 127 * 2.0], dtype=np.float32), np.int8)
    assert_equal(aw.slope, 2)
    aw = SAW(np.array([-128 * 2.0, 127], dtype=np.float32), np.int8)
    assert_equal(aw.slope, 2)
    # Test that nasty abs behavior does not upset us
    n = -2**15
    aw = SAW(np.array([n, n], dtype=np.int16), np.uint8)
    assert_array_almost_equal(aw.slope, n / 255.0, 5) 
Example 8
Project: me-ica   Author: ME-ICA   File: test_scaling.py    License: GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_calculate_scale():
    # Test for special cases in scale calculation
    npa = np.array
    # Here the offset handles it
    res = calculate_scale(npa([-2, -1], dtype=np.int8), np.uint8, True)
    assert_equal(res, (1.0, -2.0, None, None))
    # Not having offset not a problem obviously
    res = calculate_scale(npa([-2, -1], dtype=np.int8), np.uint8, 0)
    assert_equal(res, (-1.0, 0.0, None, None))
    # Case where offset handles scaling
    res = calculate_scale(npa([-1, 1], dtype=np.int8), np.uint8, 1)
    assert_equal(res, (1.0, -1.0, None, None))
    # Can't work for no offset case
    assert_raises(ValueError,
                  calculate_scale, npa([-1, 1], dtype=np.int8), np.uint8, 0)
    # Offset trick can't work when max is out of range
    res = calculate_scale(npa([-1, 255], dtype=np.int16), np.uint8, 1)
    assert_not_equal(res, (1.0, -1.0, None, None)) 
Example 9
Project: me-ica   Author: ME-ICA   File: test_utils.py    License: GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_a2f_min_max():
    str_io = BytesIO()
    for in_dt in (np.float32, np.int8):
        for out_dt in (np.float32, np.int8):
            arr = np.arange(4, dtype=in_dt)
            # min thresholding
            data_back = write_return(arr, str_io, out_dt, 0, 0, 1, 1)
            assert_array_equal(data_back, [1, 1, 2, 3])
            # max thresholding
            data_back = write_return(arr, str_io, out_dt, 0, 0, 1, None, 2)
            assert_array_equal(data_back, [0, 1, 2, 2])
            # min max thresholding
            data_back = write_return(arr, str_io, out_dt, 0, 0, 1, 1, 2)
            assert_array_equal(data_back, [1, 1, 2, 2])
    # Check that works OK with scaling and intercept
    arr = np.arange(4, dtype=np.float32)
    data_back = write_return(arr, str_io, np.int, 0, -1, 0.5, 1, 2)
    assert_array_equal(data_back * 0.5 - 1, [1, 1, 2, 2])
    # Even when scaling is negative
    data_back = write_return(arr, str_io, np.int, 0, 1, -0.5, 1, 2)
    assert_array_equal(data_back * -0.5 + 1, [1, 1, 2, 2]) 
Example 10
Project: me-ica   Author: ME-ICA   File: test_utils.py    License: GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_can_cast():
    tests = ((np.float32, np.float32, True, True, True),
             (np.float64, np.float32, True, True, True),
             (np.complex128, np.float32, False, False, False),
             (np.float32, np.complex128, True, True, True),
             (np.float32, np.uint8, False, True, True),
             (np.uint32, np.complex128, True, True, True),
             (np.int64, np.float32, True, True, True),
             (np.complex128, np.int16, False, False, False),
             (np.float32, np.int16, False, True, True),
             (np.uint8, np.int16, True, True, True),
             (np.uint16, np.int16, False, True, True),
             (np.int16, np.uint16, False, False, True),
             (np.int8, np.uint16, False, False, True),
             (np.uint16, np.uint8, False, True, True),
             )
    for intype, outtype, def_res, scale_res, all_res in tests:
        assert_equal(def_res, can_cast(intype, outtype))
        assert_equal(scale_res, can_cast(intype, outtype, False, True))
        assert_equal(all_res, can_cast(intype, outtype, True, True)) 
Example 11
Project: hsds   Author: HDFGroup   File: hdf5dtypeTest.py    License: Apache License 2.0 6 votes vote down vote up
def testEnumArrayTypeItem(self):
        mapping = {'RED': 0, 'GREEN': 1, 'BLUE': 2}
        dt_enum = special_dtype(enum=(np.int8, mapping))
        typeItem = hdf5dtype.getTypeItem(dt_enum)
        dt_array = np.dtype('(2,3)'+dt_enum.str, metadata=dict(dt_enum.metadata))

        typeItem = hdf5dtype.getTypeItem(dt_array)

        self.assertEqual(typeItem['class'], 'H5T_ARRAY')
        self.assertTrue("dims" in typeItem)
        self.assertEqual(typeItem["dims"], (2,3))
        baseItem = typeItem['base']
        self.assertEqual(baseItem['class'], 'H5T_ENUM')
        self.assertTrue('mapping' in baseItem)
        self.assertEqual(baseItem['mapping']['GREEN'], 1)
        self.assertTrue("base" in baseItem)
        basePrim = baseItem["base"]
        self.assertEqual(basePrim["class"], 'H5T_INTEGER')
        self.assertEqual(basePrim['base'], 'H5T_STD_I8LE')
        typeSize = hdf5dtype.getItemSize(typeItem)
        self.assertEqual(typeSize, 6)  # one-byte for base enum type * shape of (2,3) 
Example 12
Project: baseband   Author: mhvk   File: test_base.py    License: GNU General Public License v3.0 6 votes vote down vote up
def test_payload_getitem_setitem(self, item):
        data = self.payload.data
        sel_data = data[item]
        assert np.all(self.payload[item] == sel_data)
        payload = self.Payload(self.payload.words.copy(), sample_shape=(2,),
                               bps=8, complex_data=False)
        assert payload == self.payload
        payload[item] = 1 - sel_data
        check = self.payload.data
        check[item] = 1 - sel_data
        assert np.all(payload[item] == 1 - sel_data)
        assert np.all(payload.data == check)
        assert np.all(payload[:]
                      == payload.words.view(np.int8).reshape(-1, 2))
        assert payload != self.payload
        payload[item] = sel_data
        assert np.all(payload[item] == sel_data)
        assert payload == self.payload
        payload = self.Payload.fromdata(data + 1j * data, bps=8)
        sel_data = payload.data[item]
        assert np.all(payload[item] == sel_data)
        payload[item] = 1 - sel_data
        check = payload.data
        check[item] = 1 - sel_data
        assert np.all(payload.data == check) 
Example 13
Project: baseband   Author: mhvk   File: test_vlbi_base.py    License: GNU General Public License v3.0 6 votes vote down vote up
def test_payload_getitem_setitem(self, item):
        data = self.payload.data
        sel_data = data[item]
        assert np.all(self.payload[item] == sel_data)
        payload = self.Payload(self.payload.words.copy(), sample_shape=(2,),
                               bps=8, complex_data=False)
        assert payload == self.payload
        payload[item] = 1 - sel_data
        check = self.payload.data
        check[item] = 1 - sel_data
        assert np.all(payload[item] == 1 - sel_data)
        assert np.all(payload.data == check)
        assert np.all(payload[:]
                      == payload.words.view(np.int8).reshape(-1, 2))
        assert payload != self.payload
        payload[item] = sel_data
        assert np.all(payload[item] == sel_data)
        assert payload == self.payload
        payload = self.Payload.fromdata(data + 1j * data, bps=8)
        sel_data = payload.data[item]
        assert np.all(payload[item] == sel_data)
        payload[item] = 1 - sel_data
        check = payload.data
        check[item] = 1 - sel_data
        assert np.all(payload.data == check) 
Example 14
Project: vergeml   Author: mme   File: env.py    License: MIT License 5 votes vote down vote up
def _convert(self, vals):
        res = {}
        for k, v in vals.items():
            if isinstance(v, (np.int, np.int8, np.int16, np.int32, np.int64)):
                v = int(v)
            elif isinstance(v, (np.float, np.float16, np.float32, np.float64)):
                v = float(v)
            elif isinstance(v, Labels):
                v = list(v)
            elif isinstance(v, np.ndarray):
                v = v.tolist()
            elif isinstance(v, dict):
                v = self._convert(v)
            res[k] = v
        return res 
Example 15
Project: vergeml   Author: mme   File: env.py    License: MIT License 5 votes vote down vote up
def _toscalar(v):
    if isinstance(v, (np.float16, np.float32, np.float64,
                      np.uint8, np.uint16, np.uint32, np.uint64,
                      np.int8, np.int16, np.int32, np.int64)):
        return np.asscalar(v)
    else:
        return v 
Example 16
Project: spectrum_painter   Author: polygon   File: radios.py    License: MIT License 5 votes vote down vote up
def convert(self, complex_iq):
        intlv = self._interleave(complex_iq)
        clipped = self._clip(intlv)
        converted = 127. * clipped
        hackrf_out = converted.astype(np.int8)
        return hackrf_out 
Example 17
Project: models   Author: kipoi   File: dataloader_m.py    License: MIT License 5 votes vote down vote up
def annotate(anno_file, chromo, pos):
    #anno_file = dat.GzipFile(anno_file, 'r')
    anno_file = get_fh(anno_file, 'r')
    anno = pd.read_table(anno_file, header=None, usecols=[0, 1, 2],
                         dtype={0: 'str', 1: 'int32', 2: 'int32'})
    anno_file.close()
    anno.columns = ['chromo', 'start', 'end']
    anno.chromo = anno.chromo.str.upper().str.replace('CHR', '')
    anno = anno.loc[anno.chromo == chromo]
    anno.sort_values('start', inplace=True)
    start, end = an.join_overlapping(anno.start.values, anno.end.values)
    anno = np.array(an.is_in(pos, start, end), dtype='int8')
    return anno 
Example 18
Project: neuropythy   Author: noahbenson   File: images.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def parse_type(self, hdat, dataobj=None):
        dtype = super(MGHImageType, self).parse_type(hdat, dataobj=dataobj)
        if   np.issubdtype(dtype, np.floating): dtype = np.float32
        elif np.issubdtype(dtype, np.int8):     dtype = np.int8
        elif np.issubdtype(dtype, np.int16):    dtype = np.int16
        elif np.issubdtype(dtype, np.integer):  dtype = np.int32
        else: raise ValueError('Could not deduce appropriate MGH type for dtype %s' % dtype)
        return dtype 
Example 19
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: capsulenet.py    License: Apache License 2.0 5 votes vote down vote up
def read_data(label_url, image_url):
    with gzip.open(download_data(label_url)) as flbl:
        magic, num = struct.unpack(">II", flbl.read(8))
        label = np.fromstring(flbl.read(), dtype=np.int8)
    with gzip.open(download_data(image_url), 'rb') as fimg:
        magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
        image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)
    return label, image 
Example 20
def read_data(label, image):
    """
    download and read data into numpy
    """
    base_url = 'http://yann.lecun.com/exdb/mnist/'
    with gzip.open(download_file(base_url+label, os.path.join('data',label))) as flbl:
        magic, num = struct.unpack(">II", flbl.read(8))
        label = np.fromstring(flbl.read(), dtype=np.int8)
    with gzip.open(download_file(base_url+image, os.path.join('data',image)), 'rb') as fimg:
        magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
        image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)
    return (label, image) 
Example 21
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_utils.py    License: Apache License 2.0 5 votes vote down vote up
def get_mnist():
    """Download and load the MNIST dataset

    Returns
    -------
    dict
        A dict containing the data
    """
    def read_data(label_url, image_url):
        with gzip.open(mx.test_utils.download(label_url)) as flbl:
            struct.unpack(">II", flbl.read(8))
            label = np.frombuffer(flbl.read(), dtype=np.int8)
        with gzip.open(mx.test_utils.download(image_url), 'rb') as fimg:
            _, _, rows, cols = struct.unpack(">IIII", fimg.read(16))
            image = np.frombuffer(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)
            image = image.reshape(image.shape[0], 1, 28, 28).astype(np.float32)/255
        return (label, image)

    # changed to mxnet.io for more stable hosting
    # path = 'http://yann.lecun.com/exdb/mnist/'
    path = 'http://data.mxnet.io/data/mnist/'
    (train_lbl, train_img) = read_data(
        path+'train-labels-idx1-ubyte.gz', path+'train-images-idx3-ubyte.gz')
    (test_lbl, test_img) = read_data(
        path+'t10k-labels-idx1-ubyte.gz', path+'t10k-images-idx3-ubyte.gz')
    return {'train_data':train_img, 'train_label':train_lbl,
            'test_data':test_img, 'test_label':test_lbl} 
Example 22
def test_ndarray_elementwise():
    nrepeat = 10
    maxdim = 4
    all_type = [np.float32, np.float64, np.float16, np.uint8, np.int8, np.int32, np.int64]
    real_type = [np.float32, np.float64, np.float16]
    for repeat in range(nrepeat):
        for dim in range(1, maxdim):
            check_with_uniform(lambda x, y: x + y, 2, dim, type_list=all_type)
            check_with_uniform(lambda x, y: x - y, 2, dim, type_list=all_type)
            check_with_uniform(lambda x, y: x * y, 2, dim, type_list=all_type)
            check_with_uniform(lambda x, y: x / y, 2, dim, type_list=real_type)
            check_with_uniform(lambda x, y: x / y, 2, dim, rmin=1, type_list=all_type)
            check_with_uniform(mx.nd.sqrt, 1, dim, np.sqrt, rmin=0)
            check_with_uniform(mx.nd.square, 1, dim, np.square, rmin=0)
            check_with_uniform(lambda x: mx.nd.norm(x).asscalar(), 1, dim, np.linalg.norm) 
Example 23
Project: DOTA_models   Author: ringringyi   File: skip_thoughts_encoder.py    License: Apache License 2.0 5 votes vote down vote up
def _pad(seq, target_len):
  """Pads a sequence of word embeddings up to the target length.

  Args:
    seq: Sequence of word embeddings.
    target_len: Desired padded sequence length.

  Returns:
    embeddings: Input sequence padded with zero embeddings up to the target
      length.
    mask: A 0/1 vector with zeros corresponding to padded embeddings.

  Raises:
    ValueError: If len(seq) is not in the interval (0, target_len].
  """
  seq_len = len(seq)
  if seq_len <= 0 or seq_len > target_len:
    raise ValueError("Expected 0 < len(seq) <= %d, got %d" % (target_len,
                                                              seq_len))

  emb_dim = seq[0].shape[0]
  padded_seq = np.zeros(shape=(target_len, emb_dim), dtype=seq[0].dtype)
  mask = np.zeros(shape=(target_len,), dtype=np.int8)
  for i in range(seq_len):
    padded_seq[i] = seq[i]
    mask[i] = 1
  return padded_seq, mask 
Example 24
Project: DOTA_models   Author: ringringyi   File: gen_synthetic_single.py    License: Apache License 2.0 5 votes vote down vote up
def GenerateSample(filename, code_shape, layer_depth):
  # {0, +1} binary codes.
  # No conversion since the output file is expected to store
  # codes using {0, +1} codes (and not {-1, +1}).
  code = synthetic_model.GenerateSingleCode(code_shape)
  code = np.round(code)

  # Reformat the code so as to be compatible with what is generated
  # by the image encoder.
  # The image encoder generates a tensor of size:
  # iteration_count x batch_size x height x width x iteration_depth.
  # Here: batch_size = 1
  if code_shape[-1] % layer_depth != 0:
    raise ValueError('Number of layers is not an integer')
  height = code_shape[0]
  width = code_shape[1]
  code = code.reshape([1, height, width, -1, layer_depth])
  code = np.transpose(code, [3, 0, 1, 2, 4])

  int_codes = code.astype(np.int8)
  exported_codes = np.packbits(int_codes.reshape(-1))

  output = io.BytesIO()
  np.savez_compressed(output, shape=int_codes.shape, codes=exported_codes)
  with tf.gfile.FastGFile(filename, 'wb') as code_file:
    code_file.write(output.getvalue()) 
Example 25
Project: fine-lm   Author: akzaidi   File: text_encoder.py    License: MIT License 5 votes vote down vote up
def decode(self, ids, strip_extraneous=False):
    del strip_extraneous
    label_id = ids
    if isinstance(label_id, np.ndarray):
      label_id = np.squeeze(label_id).astype(np.int8).tolist()
    assert isinstance(label_id, list)
    assert len(label_id) == self.vocab_size
    return self._class_labels[label_id.index(1)] 
Example 26
Project: Recipes   Author: Lasagne   File: cifar10.py    License: MIT License 5 votes vote down vote up
def load_dataset(path):
    download_dataset(path)

    # training data
    data = [np.load(os.path.join(path, 'cifar-10-batches-py',
                                 'data_batch_%d' % (i + 1))) for i in range(5)]
    X_train = np.vstack([d['data'] for d in data])
    y_train = np.hstack([np.asarray(d['labels'], np.int8) for d in data])

    # test data
    data = np.load(os.path.join(path, 'cifar-10-batches-py', 'test_batch'))
    X_test = data['data']
    y_test = np.asarray(data['labels'], np.int8)

    # reshape
    X_train = X_train.reshape(-1, 3, 32, 32)
    X_test = X_test.reshape(-1, 3, 32, 32)

    # normalize
    try:
        mean_std = np.load(os.path.join(path, 'cifar-10-mean_std.npz'))
        mean = mean_std['mean']
        std = mean_std['std']
    except IOError:
        mean = X_train.mean(axis=(0, 2, 3), keepdims=True).astype(np.float32)
        std = X_train.std(axis=(0, 2, 3), keepdims=True).astype(np.float32)
        np.savez(os.path.join(path, 'cifar-10-mean_std.npz'),
                 mean=mean, std=std)
    X_train = (X_train - mean) / std
    X_test = (X_test - mean) / std

    return X_train, y_train, X_test, y_test 
Example 27
Project: dataflow   Author: tensorpack   File: parallel_map.py    License: Apache License 2.0 5 votes vote down vote up
def _create_shared_arr(self):
        TYPE = {
            np.float32: ctypes.c_float,
            np.float64: ctypes.c_double,
            np.uint8: ctypes.c_uint8,
            np.int8: ctypes.c_int8,
            np.int32: ctypes.c_int32,
        }
        ctype = TYPE[self.output_dtype]
        arr = mp.RawArray(ctype, int(np.prod(self.output_shape)))
        return arr 
Example 28
Project: me-ica   Author: ME-ICA   File: casting.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def int_abs(arr):
    """ Absolute values of array taking care of max negative int values

    Parameters
    ----------
    arr : array-like

    Returns
    -------
    abs_arr : array
        array the same shape as `arr` in which all negative numbers have been
        changed to positive numbers with the magnitude.

    Examples
    --------
    This kind of thing is confusing in base numpy:

    >>> import numpy as np
    >>> np.abs(np.int8(-128))
    -128

    ``int_abs`` fixes that:

    >>> int_abs(np.int8(-128))
    128
    >>> int_abs(np.array([-128, 127], dtype=np.int8))
    array([128, 127], dtype=uint8)
    >>> int_abs(np.array([-128, 127], dtype=np.float32))
    array([ 128.,  127.], dtype=float32)
    """
    arr = np.array(arr, copy=False)
    dt = arr.dtype
    if dt.kind == 'u':
        return arr
    if dt.kind != 'i':
        return np.absolute(arr)
    out = arr.astype(np.dtype(dt.str.replace('i', 'u')))
    return np.choose(arr < 0, (arr, arr * -1), out=out) 
Example 29
Project: me-ica   Author: ME-ICA   File: casting.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def able_int_type(values):
    """ Find the smallest integer numpy type to contain sequence `values`

    Prefers uint to int if minimum is >= 0

    Parameters
    ----------
    values : sequence
        sequence of integer values

    Returns
    -------
    itype : None or numpy type
        numpy integer type or None if no integer type holds all `values`

    Examples
    --------
    >>> able_int_type([0, 1]) == np.uint8
    True
    >>> able_int_type([-1, 1]) == np.int8
    True
    """
    if any([v % 1 for v in values]):
        return None
    mn = min(values)
    mx = max(values)
    if mn >= 0:
        for ityp in np.sctypes['uint']:
            if mx <= np.iinfo(ityp).max:
                return ityp
    for ityp in np.sctypes['int']:
        info = np.iinfo(ityp)
        if mn >= info.min and mx <= info.max:
            return ityp
    return None 
Example 30
Project: me-ica   Author: ME-ICA   File: arraywriters.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def __init__(self, array, out_dtype=None, calc_scale=True):
        """ Initialize array writer

        Parameters
        ----------
        array : array-like
            array-like object
        out_dtype : None or dtype
            dtype with which `array` will be written.  For this class,
            `out_dtype`` needs to be the same as the dtype of the input `array`
            or a swapped version of the same.
        \*\*kwargs : keyword arguments

        Examples
        --------
        >>> arr = np.array([0, 255], np.uint8)
        >>> aw = ArrayWriter(arr)
        >>> aw = ArrayWriter(arr, np.int8) #doctest: +IGNORE_EXCEPTION_DETAIL
        Traceback (most recent call last):
            ...
        WriterError: Scaling needed but cannot scale
        """
        self._array = np.asanyarray(array)
        arr_dtype = self._array.dtype
        if out_dtype is None:
            out_dtype = arr_dtype
        else:
            out_dtype = np.dtype(out_dtype)
        self._out_dtype = out_dtype
        self._finite_range = None
        if self.scaling_needed():
            raise WriterError("Scaling needed but cannot scale")