Python numpy.savez() Examples

The following are 30 code examples for showing how to use numpy.savez(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
def test_consistency(dump=False):
    shape = (299, 299)
    _get_model()
    _get_data(shape)
    if dump:
        _dump_images(shape)
        gt = None
    else:
        gt = {n: mx.nd.array(a) for n, a in np.load('data/inception-v3-dump.npz').items()}
    data = np.load('data/test_images_%d_%d.npy'%shape)
    sym, arg_params, aux_params = mx.model.load_checkpoint('model/Inception-7', 1)
    arg_params['data'] = data
    arg_params['softmax_label'] = np.random.randint(low=1, high=1000, size=(data.shape[0],))
    ctx_list = [{'ctx': mx.gpu(0), 'data': data.shape, 'type_dict': {'data': data.dtype}},
                {'ctx': mx.cpu(0), 'data': data.shape, 'type_dict': {'data': data.dtype}}]
    gt = check_consistency(sym, ctx_list, arg_params=arg_params, aux_params=aux_params,
                           tol=1e-3, grad_req='null', raise_on_err=False, ground_truth=gt)
    if dump:
        np.savez('data/inception-v3-dump.npz', **{n: a.asnumpy() for n, a in gt.items()}) 
Example 2
Project: robosuite   Author: StanfordVL   File: data_collection_wrapper.py    License: MIT License 6 votes vote down vote up
def _flush(self):
        """
        Method to flush internal state to disk.
        """
        t1, t2 = str(time.time()).split(".")
        state_path = os.path.join(self.ep_directory, "state_{}_{}.npz".format(t1, t2))
        if hasattr(self.env, "unwrapped"):
            env_name = self.env.unwrapped.__class__.__name__
        else:
            env_name = self.env.__class__.__name__
        np.savez(
            state_path,
            states=np.array(self.states),
            action_infos=self.action_infos,
            env=env_name,
        )
        self.states = []
        self.action_infos = [] 
Example 3
Project: scanorama   Author: brianhie   File: process.py    License: MIT License 6 votes vote down vote up
def process_tab(fname, min_trans=MIN_TRANSCRIPTS):
    X, cells, genes = load_tab(fname)

    gt_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
               if s >= min_trans ]
    X = X[gt_idx, :]
    cells = cells[gt_idx]
    if len(gt_idx) == 0:
        print('Warning: 0 cells passed QC in {}'.format(fname))
    if fname.endswith('.txt'):
        cache_prefix = '.'.join(fname.split('.')[:-1])
    elif fname.endswith('.txt.gz'):
        cache_prefix = '.'.join(fname.split('.')[:-2])
    elif fname.endswith('.tsv'):
        cache_prefix = '.'.join(fname.split('.')[:-1])
    elif fname.endswith('.tsv.gz'):
        cache_prefix = '.'.join(fname.split('.')[:-2])
    else:
        sys.stderr.write('Tab files should end with ".txt" or ".tsv"\n')
        exit(1)
        
    cache_fname = cache_prefix + '.npz'
    np.savez(cache_fname, X=X, genes=genes)

    return X, cells, genes 
Example 4
Project: DeepLab_v3   Author: leimao   File: utils.py    License: MIT License 6 votes vote down vote up
def save_load_means(means_filename, image_filenames, recalculate=False):
    '''
    Calculate and save the means of RGB channels in image dataset if the mean file does not exist.
    Otherwise read the means directly from the mean file.
    means_filename: npz filename for image channel means
    image_filenames: list of image filenames
    recalculate: recalculate image channel means regardless the existence of mean file
    '''

    if (not os.path.isfile(means_filename)) or recalculate:
        print('Calculating pixel means for each channel of images...')
        channel_means = image_channel_means(image_filenames=image_filenames)
        np.savez(means_filename, channel_means=channel_means)
    else:
        channel_means = np.load(means_filename)['channel_means']

    return channel_means 
Example 5
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 6 votes vote down vote up
def test_savez_filename_clashes(self):
        # Test that issue #852 is fixed
        # and savez functions in multithreaded environment

        def writer(error_list):
            with temppath(suffix='.npz') as tmp:
                arr = np.random.randn(500, 500)
                try:
                    np.savez(tmp, arr=arr)
                except OSError as err:
                    error_list.append(err)

        errors = []
        threads = [threading.Thread(target=writer, args=(errors,))
                   for j in range(3)]
        for t in threads:
            t.start()
        for t in threads:
            t.join()

        if errors:
            raise AssertionError(errors) 
Example 6
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 6 votes vote down vote up
def test_closing_fid(self):
        # Test that issue #1517 (too many opened files) remains closed
        # It might be a "weak" test since failed to get triggered on
        # e.g. Debian sid of 2012 Jul 05 but was reported to
        # trigger the failure on Ubuntu 10.04:
        # http://projects.scipy.org/numpy/ticket/1517#comment:2
        with temppath(suffix='.npz') as tmp:
            np.savez(tmp, data='LOVELY LOAD')
            # We need to check if the garbage collector can properly close
            # numpy npz file returned by np.load when their reference count
            # goes to zero.  Python 3 running in debug mode raises a
            # ResourceWarning when file closing is left to the garbage
            # collector, so we catch the warnings.  Because ResourceWarning
            # is unknown in Python < 3.x, we take the easy way out and
            # catch all warnings.
            with suppress_warnings() as sup:
                sup.filter(Warning)  # TODO: specify exact message
                for i in range(1, 1025):
                    try:
                        np.load(tmp)["data"]
                    except Exception as e:
                        msg = "Failed to load data from a file: %s" % e
                        raise AssertionError(msg) 
Example 7
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 6 votes vote down vote up
def test_npzfile_dict():
    s = BytesIO()
    x = np.zeros((3, 3))
    y = np.zeros((3, 3))

    np.savez(s, x=x, y=y)
    s.seek(0)

    z = np.load(s)

    assert_('x' in z)
    assert_('y' in z)
    assert_('x' in z.keys())
    assert_('y' in z.keys())

    for f, a in z.items():
        assert_(f in ['x', 'y'])
        assert_equal(a.shape, (3, 3))

    assert_(len(z.items()) == 2)

    for f in z:
        assert_(f in ['x', 'y'])

    assert_('x' in z.keys()) 
Example 8
Project: recruit   Author: Frank-qlu   File: test_format.py    License: Apache License 2.0 6 votes vote down vote up
def test_large_archive():
    # Regression test for product of saving arrays with dimensions of array
    # having a product that doesn't fit in int32.  See gh-7598 for details.
    try:
        a = np.empty((2**30, 2), dtype=np.uint8)
    except MemoryError:
        pytest.skip("Could not create large file")

    fname = os.path.join(tempdir, "large_archive")

    with open(fname, "wb") as f:
        np.savez(f, arr=a)

    with open(fname, "rb") as f:
        new_a = np.load(f)["arr"]

    assert_(a.shape == new_a.shape) 
Example 9
Project: adversarial-policies   Author: HumanCompatibleAI   File: wrappers.py    License: MIT License 6 votes vote down vote up
def save(self, save_dir):
        """Save trajectories to save_dir in NumPy compressed-array format, per-agent.

        Our format consists of a dictionary with keys -- e.g. 'observations', 'actions'
        and 'rewards' -- containing lists of NumPy arrays, one for each episode.

        :param save_dir: (str) path to save trajectories; will create directory if needed.
        :return None
        """
        os.makedirs(save_dir, exist_ok=True)

        save_paths = []
        for dict_idx, agent_idx in enumerate(self.agent_indices):
            agent_dicts = self.full_traj_dicts[dict_idx]
            dump_dict = {k: np.asarray(v) for k, v in agent_dicts.items()}

            save_path = os.path.join(save_dir, f"agent_{agent_idx}.npz")
            np.savez(save_path, **dump_dict)
            save_paths.append(save_path)
        return save_paths 
Example 10
Project: lambda-packs   Author: ryfeus   File: test_io.py    License: MIT License 6 votes vote down vote up
def test_closing_fid(self):
        # Test that issue #1517 (too many opened files) remains closed
        # It might be a "weak" test since failed to get triggered on
        # e.g. Debian sid of 2012 Jul 05 but was reported to
        # trigger the failure on Ubuntu 10.04:
        # http://projects.scipy.org/numpy/ticket/1517#comment:2
        with temppath(suffix='.npz') as tmp:
            np.savez(tmp, data='LOVELY LOAD')
            # We need to check if the garbage collector can properly close
            # numpy npz file returned by np.load when their reference count
            # goes to zero.  Python 3 running in debug mode raises a
            # ResourceWarning when file closing is left to the garbage
            # collector, so we catch the warnings.  Because ResourceWarning
            # is unknown in Python < 3.x, we take the easy way out and
            # catch all warnings.
            with suppress_warnings() as sup:
                sup.filter(Warning)  # TODO: specify exact message
                for i in range(1, 1025):
                    try:
                        np.load(tmp)["data"]
                    except Exception as e:
                        msg = "Failed to load data from a file: %s" % e
                        raise AssertionError(msg) 
Example 11
Project: lambda-packs   Author: ryfeus   File: test_io.py    License: MIT License 6 votes vote down vote up
def test_npzfile_dict():
    s = BytesIO()
    x = np.zeros((3, 3))
    y = np.zeros((3, 3))

    np.savez(s, x=x, y=y)
    s.seek(0)

    z = np.load(s)

    assert_('x' in z)
    assert_('y' in z)
    assert_('x' in z.keys())
    assert_('y' in z.keys())

    for f, a in z.items():
        assert_(f in ['x', 'y'])
        assert_equal(a.shape, (3, 3))

    assert_(len(z.items()) == 2)

    for f in z:
        assert_(f in ['x', 'y'])

    assert_('x' in z.keys()) 
Example 12
Project: lambda-packs   Author: ryfeus   File: test_io.py    License: MIT License 6 votes vote down vote up
def test_load_refcount():
    # Check that objects returned by np.load are directly freed based on
    # their refcount, rather than needing the gc to collect them.

    f = BytesIO()
    np.savez(f, [1, 2, 3])
    f.seek(0)

    assert_(gc.isenabled())
    gc.disable()
    try:
        gc.collect()
        np.load(f)
        # gc.collect returns the number of unreachable objects in cycles that
        # were found -- we are checking that no cycles were created by np.load
        n_objects_in_cycles = gc.collect()
    finally:
        gc.enable()
    assert_equal(n_objects_in_cycles, 0) 
Example 13
Project: lambda-packs   Author: ryfeus   File: test_format.py    License: MIT License 6 votes vote down vote up
def test_large_archive():
    # Regression test for product of saving arrays with dimensions of array
    # having a product that doesn't fit in int32.  See gh-7598 for details.
    try:
        a = np.empty((2**30, 2), dtype=np.uint8)
    except MemoryError:
        raise SkipTest("Could not create large file")

    fname = os.path.join(tempdir, "large_archive")

    with open(fname, "wb") as f:
        np.savez(f, arr=a)

    with open(fname, "rb") as f:
        new_a = np.load(f)["arr"]

    assert_(a.shape == new_a.shape) 
Example 14
Project: lambda-packs   Author: ryfeus   File: npyio.py    License: MIT License 6 votes vote down vote up
def savez_compressed(file, *args, **kwds):
    """
    Save several arrays into a single file in compressed ``.npz`` format.

    If keyword arguments are given, then filenames are taken from the keywords.
    If arguments are passed in with no keywords, then stored file names are
    arr_0, arr_1, etc.

    Parameters
    ----------
    file : str
        File name of ``.npz`` file.
    args : Arguments
        Function arguments.
    kwds : Keyword arguments
        Keywords.

    See Also
    --------
    numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
    numpy.load : Load the files created by savez_compressed.

    """
    _savez(file, args, kwds, True) 
Example 15
Project: auto-alt-text-lambda-api   Author: abhisuri97   File: test_io.py    License: MIT License 6 votes vote down vote up
def test_savez_filename_clashes(self):
        # Test that issue #852 is fixed
        # and savez functions in multithreaded environment

        def writer(error_list):
            with temppath(suffix='.npz') as tmp:
                arr = np.random.randn(500, 500)
                try:
                    np.savez(tmp, arr=arr)
                except OSError as err:
                    error_list.append(err)

        errors = []
        threads = [threading.Thread(target=writer, args=(errors,))
                   for j in range(3)]
        for t in threads:
            t.start()
        for t in threads:
            t.join()

        if errors:
            raise AssertionError(errors) 
Example 16
Project: sklearn-audio-transfer-learning   Author: jordipons   File: audio_transfer_learning.py    License: ISC License 5 votes vote down vote up
def extract_features_wrapper(paths, path2gt, model='vggish', save_as=False):
    """Wrapper function for extracting features (MusiCNN, VGGish or OpenL3) per batch.
       If a save_as string argument is passed, the features wiil be saved in 
       the specified file.
    """
    if model == 'vggish':
        feature_extractor = extract_vggish_features
    elif model == 'openl3' or model == 'musicnn':
        feature_extractor = extract_other_features
    else:
        raise NotImplementedError('Current implementation only supports MusiCNN, VGGish and OpenL3 features')

    batch_size = config['batch_size']
    first_batch = True
    for batch_id in tqdm(range(ceil(len(paths)/batch_size))):
        batch_paths = paths[(batch_id)*batch_size:(batch_id+1)*batch_size]
        [x, y, refs] = feature_extractor(batch_paths, path2gt, model)
        if first_batch:
            [X, Y, IDS] = [x, y, refs]
            first_batch = False
        else:
            X = np.concatenate((X, x), axis=0)
            Y = np.concatenate((Y, y), axis=0)
            IDS = np.concatenate((IDS, refs), axis=0)
    
    if save_as:  # save data to file
        # create a directory where to store the extracted training features
        audio_representations_folder = DATA_FOLDER + 'audio_representations/'
        if not os.path.exists(audio_representations_folder):
            os.makedirs(audio_representations_folder)
        np.savez(audio_representations_folder + save_as, X=X, Y=Y, IDS=IDS)
        print('Audio features stored: ', save_as)

    return [X, Y, IDS] 
Example 17
Project: Recipes   Author: Lasagne   File: cifar10.py    License: MIT License 5 votes vote down vote up
def load_dataset(path):
    download_dataset(path)

    # training data
    data = [np.load(os.path.join(path, 'cifar-10-batches-py',
                                 'data_batch_%d' % (i + 1))) for i in range(5)]
    X_train = np.vstack([d['data'] for d in data])
    y_train = np.hstack([np.asarray(d['labels'], np.int8) for d in data])

    # test data
    data = np.load(os.path.join(path, 'cifar-10-batches-py', 'test_batch'))
    X_test = data['data']
    y_test = np.asarray(data['labels'], np.int8)

    # reshape
    X_train = X_train.reshape(-1, 3, 32, 32)
    X_test = X_test.reshape(-1, 3, 32, 32)

    # normalize
    try:
        mean_std = np.load(os.path.join(path, 'cifar-10-mean_std.npz'))
        mean = mean_std['mean']
        std = mean_std['std']
    except IOError:
        mean = X_train.mean(axis=(0, 2, 3), keepdims=True).astype(np.float32)
        std = X_train.std(axis=(0, 2, 3), keepdims=True).astype(np.float32)
        np.savez(os.path.join(path, 'cifar-10-mean_std.npz'),
                 mean=mean, std=std)
    X_train = (X_train - mean) / std
    X_test = (X_test - mean) / std

    return X_train, y_train, X_test, y_test 
Example 18
Project: lirpg   Author: Hwhitetooth   File: run_mujoco.py    License: MIT License 5 votes vote down vote up
def runner(env, policy_func, load_model_path, timesteps_per_batch, number_trajs,
           stochastic_policy, save=False, reuse=False):

    # Setup network
    # ----------------------------------------
    ob_space = env.observation_space
    ac_space = env.action_space
    pi = policy_func("pi", ob_space, ac_space, reuse=reuse)
    U.initialize()
    # Prepare for rollouts
    # ----------------------------------------
    U.load_state(load_model_path)

    obs_list = []
    acs_list = []
    len_list = []
    ret_list = []
    for _ in tqdm(range(number_trajs)):
        traj = traj_1_generator(pi, env, timesteps_per_batch, stochastic=stochastic_policy)
        obs, acs, ep_len, ep_ret = traj['ob'], traj['ac'], traj['ep_len'], traj['ep_ret']
        obs_list.append(obs)
        acs_list.append(acs)
        len_list.append(ep_len)
        ret_list.append(ep_ret)
    if stochastic_policy:
        print('stochastic policy:')
    else:
        print('deterministic policy:')
    if save:
        filename = load_model_path.split('/')[-1] + '.' + env.spec.id
        np.savez(filename, obs=np.array(obs_list), acs=np.array(acs_list),
                 lens=np.array(len_list), rets=np.array(ret_list))
    avg_len = sum(len_list)/len(len_list)
    avg_ret = sum(ret_list)/len(ret_list)
    print("Average length:", avg_len)
    print("Average return:", avg_ret)
    return avg_len, avg_ret


# Sample one trajectory (until trajectory end) 
Example 19
Project: post--memorization-in-rnns   Author: distillpub   File: generate.py    License: MIT License 5 votes vote down vote up
def preprocess_generate(**kwargs):
    with ContentDir() as content:
        content.download('text8.zip', 'http://mattmahoney.net/dc/text8.zip')

    with ZipFile(content.filepath('text8.zip')) as zip_reader:
        with zip_reader.open('text8') as text8_file:
            text = io.TextIOWrapper(text8_file).read()
            dataset = build_dataset(text, **kwargs)
            train, valid, test = split_dataset(dataset, **kwargs)

            print('saving train data ...')
            save_tfrecord(content.filepath('generate.train.tfrecord'),
                          train,
                          verbose=True)

            print('saving valid data ...')
            save_tfrecord(content.filepath('generate.valid.tfrecord'),
                          valid,
                          verbose=True)

            print('saving test data ...')
            save_tfrecord(content.filepath('generate.test.tfrecord'),
                          test,
                          verbose=True)

            print('saving maps ...')
            np.savez(content.filepath('generate.map.npz'),
                     char_map=dataset['char_map'],
                     verbose=True)

            print('saving metadata ...')
            metadata = {
                'observations': {
                    'train': len(train['length']),
                    'valid': len(valid['length']),
                    'test': len(test['length'])
                }
            }
            with open(content.filepath('generate.meta.json'), 'w') as fp:
                json.dump(metadata, fp) 
Example 20
Project: post--memorization-in-rnns   Author: distillpub   File: autocomplete.py    License: MIT License 5 votes vote down vote up
def preprocess_autocomplete(**kwargs):
    with ContentDir() as content:
        content.download('text8.zip', 'http://mattmahoney.net/dc/text8.zip')

    with ZipFile(content.filepath('text8.zip')) as zip_reader:
        with zip_reader.open('text8') as text8_file:
            text = io.TextIOWrapper(text8_file).read()
            dataset = build_dataset(text, **kwargs)
            train, valid, test = split_dataset(dataset, **kwargs)

            print('saving train data ...')
            save_tfrecord(content.filepath('autocomplete.train.tfrecord'),
                          train,
                          verbose=True)

            print('saving valid data ...')
            save_tfrecord(content.filepath('autocomplete.valid.tfrecord'),
                          valid,
                          verbose=True)

            print('saving test data ...')
            save_tfrecord(content.filepath('autocomplete.test.tfrecord'),
                          test,
                          verbose=True)

            print('saving maps ...')
            np.savez(content.filepath('autocomplete.map.npz'),
                     word_map=dataset['word_map'],
                     char_map=dataset['char_map'],
                     verbose=True)

            print('saving metadata ...')
            metadata = {
                'observations': {
                    'train': len(train['length']),
                    'valid': len(valid['length']),
                    'test': len(test['length'])
                }
            }
            with open(content.filepath('autocomplete.meta.json'), 'w') as fp:
                json.dump(metadata, fp) 
Example 21
Project: HardRLWithYoutube   Author: MaxSobolMark   File: run_mujoco.py    License: MIT License 5 votes vote down vote up
def runner(env, policy_func, load_model_path, timesteps_per_batch, number_trajs,
           stochastic_policy, save=False, reuse=False):

    # Setup network
    # ----------------------------------------
    ob_space = env.observation_space
    ac_space = env.action_space
    pi = policy_func("pi", ob_space, ac_space, reuse=reuse)
    U.initialize()
    # Prepare for rollouts
    # ----------------------------------------
    U.load_state(load_model_path)

    obs_list = []
    acs_list = []
    len_list = []
    ret_list = []
    for _ in tqdm(range(number_trajs)):
        traj = traj_1_generator(pi, env, timesteps_per_batch, stochastic=stochastic_policy)
        obs, acs, ep_len, ep_ret = traj['ob'], traj['ac'], traj['ep_len'], traj['ep_ret']
        obs_list.append(obs)
        acs_list.append(acs)
        len_list.append(ep_len)
        ret_list.append(ep_ret)
    if stochastic_policy:
        print('stochastic policy:')
    else:
        print('deterministic policy:')
    if save:
        filename = load_model_path.split('/')[-1] + '.' + env.spec.id
        np.savez(filename, obs=np.array(obs_list), acs=np.array(acs_list),
                 lens=np.array(len_list), rets=np.array(ret_list))
    avg_len = sum(len_list)/len(len_list)
    avg_ret = sum(ret_list)/len(ret_list)
    print("Average length:", avg_len)
    print("Average return:", avg_ret)
    return avg_len, avg_ret


# Sample one trajectory (until trajectory end) 
Example 22
Project: tartarus   Author: sergiooramas   File: load_w2v.py    License: MIT License 5 votes vote down vote up
def save_sparse_csr(filename,array):
    np.savez(filename,data = array.data ,indices=array.indices,
             indptr =array.indptr, shape=array.shape ) 
Example 23
Project: tartarus   Author: sergiooramas   File: load_vsm.py    License: MIT License 5 votes vote down vote up
def save_sparse_csr(filename,array):
    np.savez(filename,data = array.data ,indices=array.indices,
             indptr =array.indptr, shape=array.shape ) 
Example 24
Project: tartarus   Author: sergiooramas   File: create_patches.py    License: MIT License 5 votes vote down vote up
def save_sparse_csr(filename,array):
    np.savez(filename,data = array.data ,indices=array.indices,
             indptr =array.indptr, shape=array.shape ) 
Example 25
Project: jwalk   Author: jwplayer   File: io.py    License: Apache License 2.0 5 votes vote down vote up
def save_graph(filename, csr_matrix, labels=None):
    np.savez(filename,
             data=csr_matrix.data,
             indices=csr_matrix.indices,
             indptr=csr_matrix.indptr,
             shape=csr_matrix.shape,
             labels=labels)
    return filename 
Example 26
Project: ocsvm-anomaly-detection   Author: hiram64   File: cae.py    License: MIT License 5 votes vote down vote up
def main():
    """main function"""
    args = parse_args()
    data_path = args.data_path
    height = args.height
    width = args.width
    channel = args.channel
    num_epoch = args.num_epoch
    batch_size = args.batch_size
    output_path = args.output_path

    # load CIFAR-10 data from data directory
    all_image, all_label = load_data(data_path)

    # build model and train
    autoencoder = build_cae_model(height, width, channel)
    autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
    autoencoder.fit(all_image, all_image,
                    epochs=num_epoch,
                    batch_size=batch_size,
                    shuffle=True)

    # inference from encoder
    layer_name = 'enc'
    encoded_layer = Model(inputs=autoencoder.input, outputs=autoencoder.get_layer(layer_name).output)
    enc_out = encoded_layer.predict(all_image)

    # flat features for OC-SVM input
    enc_out = flat_feature(enc_out)

    # save cae output
    np.savez(output_path, ae_out=enc_out, labels=all_label) 
Example 27
Project: e2c   Author: ericjang   File: plane_data2.py    License: Apache License 2.0 5 votes vote down vote up
def save(self):
    print("Saving P,U...")
    np.savez(self.cache, P=self.P, U=self.U) 
Example 28
Project: TreeGAN   Author: seowok   File: FPD.py    License: MIT License 5 votes vote down vote up
def save_statistics(real_pointclouds, path, model, batch_size, dims, cuda):
    m, s = calculate_activation_statistics(real_pointclouds, model, batch_size,
                                         dims, cuda)
    np.savez(path, m = m, s = s)
    print('save done !!!') 
Example 29
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 5 votes vote down vote up
def roundtrip(self, *args, **kwargs):
        RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
        try:
            for n, arr in enumerate(self.arr):
                reloaded = self.arr_reloaded['arr_%d' % n]
                assert_equal(arr, reloaded)
                assert_equal(arr.dtype, reloaded.dtype)
                assert_equal(arr.flags.fnc, reloaded.flags.fnc)
        finally:
            # delete tempfile, must be done here on windows
            if self.arr_reloaded.fid:
                self.arr_reloaded.fid.close()
                os.remove(self.arr_reloaded.fid.name) 
Example 30
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 5 votes vote down vote up
def test_big_arrays(self):
        L = (1 << 31) + 100000
        a = np.empty(L, dtype=np.uint8)
        with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp:
            np.savez(tmp, a=a)
            del a
            npfile = np.load(tmp)
            a = npfile['a']  # Should succeed
            npfile.close()
            del a  # Avoid pyflakes unused variable warning.