Python numpy.savez() Examples

The following are 30 code examples of numpy.savez(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: npyio.py    From lambda-packs with MIT License 6 votes vote down vote up
def savez_compressed(file, *args, **kwds):
    """
    Save several arrays into a single file in compressed ``.npz`` format.

    If keyword arguments are given, then filenames are taken from the keywords.
    If arguments are passed in with no keywords, then stored file names are
    arr_0, arr_1, etc.

    Parameters
    ----------
    file : str
        File name of ``.npz`` file.
    args : Arguments
        Function arguments.
    kwds : Keyword arguments
        Keywords.

    See Also
    --------
    numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
    numpy.load : Load the files created by savez_compressed.

    """
    _savez(file, args, kwds, True) 
Example #2
Source File: data_collection_wrapper.py    From robosuite with MIT License 6 votes vote down vote up
def _flush(self):
        """
        Method to flush internal state to disk.
        """
        t1, t2 = str(time.time()).split(".")
        state_path = os.path.join(self.ep_directory, "state_{}_{}.npz".format(t1, t2))
        if hasattr(self.env, "unwrapped"):
            env_name = self.env.unwrapped.__class__.__name__
        else:
            env_name = self.env.__class__.__name__
        np.savez(
            state_path,
            states=np.array(self.states),
            action_infos=self.action_infos,
            env=env_name,
        )
        self.states = []
        self.action_infos = [] 
Example #3
Source File: test_forward.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_consistency(dump=False):
    shape = (299, 299)
    _get_model()
    _get_data(shape)
    if dump:
        _dump_images(shape)
        gt = None
    else:
        gt = {n: mx.nd.array(a) for n, a in np.load('data/inception-v3-dump.npz').items()}
    data = np.load('data/test_images_%d_%d.npy'%shape)
    sym, arg_params, aux_params = mx.model.load_checkpoint('model/Inception-7', 1)
    arg_params['data'] = data
    arg_params['softmax_label'] = np.random.randint(low=1, high=1000, size=(data.shape[0],))
    ctx_list = [{'ctx': mx.gpu(0), 'data': data.shape, 'type_dict': {'data': data.dtype}},
                {'ctx': mx.cpu(0), 'data': data.shape, 'type_dict': {'data': data.dtype}}]
    gt = check_consistency(sym, ctx_list, arg_params=arg_params, aux_params=aux_params,
                           tol=1e-3, grad_req='null', raise_on_err=False, ground_truth=gt)
    if dump:
        np.savez('data/inception-v3-dump.npz', **{n: a.asnumpy() for n, a in gt.items()}) 
Example #4
Source File: process.py    From scanorama with MIT License 6 votes vote down vote up
def process_tab(fname, min_trans=MIN_TRANSCRIPTS):
    X, cells, genes = load_tab(fname)

    gt_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
               if s >= min_trans ]
    X = X[gt_idx, :]
    cells = cells[gt_idx]
    if len(gt_idx) == 0:
        print('Warning: 0 cells passed QC in {}'.format(fname))
    if fname.endswith('.txt'):
        cache_prefix = '.'.join(fname.split('.')[:-1])
    elif fname.endswith('.txt.gz'):
        cache_prefix = '.'.join(fname.split('.')[:-2])
    elif fname.endswith('.tsv'):
        cache_prefix = '.'.join(fname.split('.')[:-1])
    elif fname.endswith('.tsv.gz'):
        cache_prefix = '.'.join(fname.split('.')[:-2])
    else:
        sys.stderr.write('Tab files should end with ".txt" or ".tsv"\n')
        exit(1)
        
    cache_fname = cache_prefix + '.npz'
    np.savez(cache_fname, X=X, genes=genes)

    return X, cells, genes 
Example #5
Source File: utils.py    From DeepLab_v3 with MIT License 6 votes vote down vote up
def save_load_means(means_filename, image_filenames, recalculate=False):
    '''
    Calculate and save the means of RGB channels in image dataset if the mean file does not exist.
    Otherwise read the means directly from the mean file.
    means_filename: npz filename for image channel means
    image_filenames: list of image filenames
    recalculate: recalculate image channel means regardless the existence of mean file
    '''

    if (not os.path.isfile(means_filename)) or recalculate:
        print('Calculating pixel means for each channel of images...')
        channel_means = image_channel_means(image_filenames=image_filenames)
        np.savez(means_filename, channel_means=channel_means)
    else:
        channel_means = np.load(means_filename)['channel_means']

    return channel_means 
Example #6
Source File: test_io.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def test_savez_filename_clashes(self):
        # Test that issue #852 is fixed
        # and savez functions in multithreaded environment

        def writer(error_list):
            with temppath(suffix='.npz') as tmp:
                arr = np.random.randn(500, 500)
                try:
                    np.savez(tmp, arr=arr)
                except OSError as err:
                    error_list.append(err)

        errors = []
        threads = [threading.Thread(target=writer, args=(errors,))
                   for j in range(3)]
        for t in threads:
            t.start()
        for t in threads:
            t.join()

        if errors:
            raise AssertionError(errors) 
Example #7
Source File: test_format.py    From lambda-packs with MIT License 6 votes vote down vote up
def test_large_archive():
    # Regression test for product of saving arrays with dimensions of array
    # having a product that doesn't fit in int32.  See gh-7598 for details.
    try:
        a = np.empty((2**30, 2), dtype=np.uint8)
    except MemoryError:
        raise SkipTest("Could not create large file")

    fname = os.path.join(tempdir, "large_archive")

    with open(fname, "wb") as f:
        np.savez(f, arr=a)

    with open(fname, "rb") as f:
        new_a = np.load(f)["arr"]

    assert_(a.shape == new_a.shape) 
Example #8
Source File: test_io.py    From lambda-packs with MIT License 6 votes vote down vote up
def test_load_refcount():
    # Check that objects returned by np.load are directly freed based on
    # their refcount, rather than needing the gc to collect them.

    f = BytesIO()
    np.savez(f, [1, 2, 3])
    f.seek(0)

    assert_(gc.isenabled())
    gc.disable()
    try:
        gc.collect()
        np.load(f)
        # gc.collect returns the number of unreachable objects in cycles that
        # were found -- we are checking that no cycles were created by np.load
        n_objects_in_cycles = gc.collect()
    finally:
        gc.enable()
    assert_equal(n_objects_in_cycles, 0) 
Example #9
Source File: test_io.py    From lambda-packs with MIT License 6 votes vote down vote up
def test_npzfile_dict():
    s = BytesIO()
    x = np.zeros((3, 3))
    y = np.zeros((3, 3))

    np.savez(s, x=x, y=y)
    s.seek(0)

    z = np.load(s)

    assert_('x' in z)
    assert_('y' in z)
    assert_('x' in z.keys())
    assert_('y' in z.keys())

    for f, a in z.items():
        assert_(f in ['x', 'y'])
        assert_equal(a.shape, (3, 3))

    assert_(len(z.items()) == 2)

    for f in z:
        assert_(f in ['x', 'y'])

    assert_('x' in z.keys()) 
Example #10
Source File: test_io.py    From lambda-packs with MIT License 6 votes vote down vote up
def test_closing_fid(self):
        # Test that issue #1517 (too many opened files) remains closed
        # It might be a "weak" test since failed to get triggered on
        # e.g. Debian sid of 2012 Jul 05 but was reported to
        # trigger the failure on Ubuntu 10.04:
        # http://projects.scipy.org/numpy/ticket/1517#comment:2
        with temppath(suffix='.npz') as tmp:
            np.savez(tmp, data='LOVELY LOAD')
            # We need to check if the garbage collector can properly close
            # numpy npz file returned by np.load when their reference count
            # goes to zero.  Python 3 running in debug mode raises a
            # ResourceWarning when file closing is left to the garbage
            # collector, so we catch the warnings.  Because ResourceWarning
            # is unknown in Python < 3.x, we take the easy way out and
            # catch all warnings.
            with suppress_warnings() as sup:
                sup.filter(Warning)  # TODO: specify exact message
                for i in range(1, 1025):
                    try:
                        np.load(tmp)["data"]
                    except Exception as e:
                        msg = "Failed to load data from a file: %s" % e
                        raise AssertionError(msg) 
Example #11
Source File: test_io.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_savez_filename_clashes(self):
        # Test that issue #852 is fixed
        # and savez functions in multithreaded environment

        def writer(error_list):
            with temppath(suffix='.npz') as tmp:
                arr = np.random.randn(500, 500)
                try:
                    np.savez(tmp, arr=arr)
                except OSError as err:
                    error_list.append(err)

        errors = []
        threads = [threading.Thread(target=writer, args=(errors,))
                   for j in range(3)]
        for t in threads:
            t.start()
        for t in threads:
            t.join()

        if errors:
            raise AssertionError(errors) 
Example #12
Source File: test_io.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_closing_fid(self):
        # Test that issue #1517 (too many opened files) remains closed
        # It might be a "weak" test since failed to get triggered on
        # e.g. Debian sid of 2012 Jul 05 but was reported to
        # trigger the failure on Ubuntu 10.04:
        # http://projects.scipy.org/numpy/ticket/1517#comment:2
        with temppath(suffix='.npz') as tmp:
            np.savez(tmp, data='LOVELY LOAD')
            # We need to check if the garbage collector can properly close
            # numpy npz file returned by np.load when their reference count
            # goes to zero.  Python 3 running in debug mode raises a
            # ResourceWarning when file closing is left to the garbage
            # collector, so we catch the warnings.  Because ResourceWarning
            # is unknown in Python < 3.x, we take the easy way out and
            # catch all warnings.
            with suppress_warnings() as sup:
                sup.filter(Warning)  # TODO: specify exact message
                for i in range(1, 1025):
                    try:
                        np.load(tmp)["data"]
                    except Exception as e:
                        msg = "Failed to load data from a file: %s" % e
                        raise AssertionError(msg) 
Example #13
Source File: test_io.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_npzfile_dict():
    s = BytesIO()
    x = np.zeros((3, 3))
    y = np.zeros((3, 3))

    np.savez(s, x=x, y=y)
    s.seek(0)

    z = np.load(s)

    assert_('x' in z)
    assert_('y' in z)
    assert_('x' in z.keys())
    assert_('y' in z.keys())

    for f, a in z.items():
        assert_(f in ['x', 'y'])
        assert_equal(a.shape, (3, 3))

    assert_(len(z.items()) == 2)

    for f in z:
        assert_(f in ['x', 'y'])

    assert_('x' in z.keys()) 
Example #14
Source File: test_format.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_large_archive():
    # Regression test for product of saving arrays with dimensions of array
    # having a product that doesn't fit in int32.  See gh-7598 for details.
    try:
        a = np.empty((2**30, 2), dtype=np.uint8)
    except MemoryError:
        pytest.skip("Could not create large file")

    fname = os.path.join(tempdir, "large_archive")

    with open(fname, "wb") as f:
        np.savez(f, arr=a)

    with open(fname, "rb") as f:
        new_a = np.load(f)["arr"]

    assert_(a.shape == new_a.shape) 
Example #15
Source File: wrappers.py    From adversarial-policies with MIT License 6 votes vote down vote up
def save(self, save_dir):
        """Save trajectories to save_dir in NumPy compressed-array format, per-agent.

        Our format consists of a dictionary with keys -- e.g. 'observations', 'actions'
        and 'rewards' -- containing lists of NumPy arrays, one for each episode.

        :param save_dir: (str) path to save trajectories; will create directory if needed.
        :return None
        """
        os.makedirs(save_dir, exist_ok=True)

        save_paths = []
        for dict_idx, agent_idx in enumerate(self.agent_indices):
            agent_dicts = self.full_traj_dicts[dict_idx]
            dump_dict = {k: np.asarray(v) for k, v in agent_dicts.items()}

            save_path = os.path.join(save_dir, f"agent_{agent_idx}.npz")
            np.savez(save_path, **dump_dict)
            save_paths.append(save_path)
        return save_paths 
Example #16
Source File: test_io.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def roundtrip(self, *args, **kwargs):
        RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
        try:
            for n, arr in enumerate(self.arr):
                reloaded = self.arr_reloaded['arr_%d' % n]
                assert_equal(arr, reloaded)
                assert_equal(arr.dtype, reloaded.dtype)
                assert_equal(arr.flags.fnc, reloaded.flags.fnc)
        finally:
            # delete tempfile, must be done here on windows
            if self.arr_reloaded.fid:
                self.arr_reloaded.fid.close()
                os.remove(self.arr_reloaded.fid.name) 
Example #17
Source File: test_io.py    From lambda-packs with MIT License 5 votes vote down vote up
def test_savez_load(self):
        # Test that pathlib.Path instances can be used with savez.
        with temppath(suffix='.npz') as path:
            path = Path(path)
            np.savez(path, lab='place holder')
            with np.load(path) as data:
                assert_array_equal(data['lab'], 'place holder') 
Example #18
Source File: CUB_preprocess_token.py    From show-adapt-and-tell with MIT License 5 votes vote down vote up
def clean_words(data):
    dict = {}
    freq = {}
    # start with 1
    idx = 1
    sentence_count = 0
    eliminate = 0
    max_w = 30
    for k in tqdm(range(len(data['caption']))):
        sen = data['caption'][k]
        filename = data['file_name'][k]
        # skip the no image description
        words = re.split(' ', sen)
        # pop the last u'.'
        n = len(words)
        if n <= max_w:
            sentence_count += 1
            for word in words:
                for p in string.punctuation:
                    if p in word:
                        word = word.replace(p,'')
                word = word.lower()
                if word not in dict.keys():
                    dict[word] = idx
                    idx += 1
                    freq[word] = 1
                else:
                    freq[word] += 1
        else:
            eliminate += 1
    print 'Threshold(max_words) =', max_w
    print 'Eliminate =', eliminate 
    print 'Total sentence_count =', sentence_count
    print 'Number of different words =', len(dict.keys())
    print 'Saving....'
    np.savez('cleaned_words', dict=dict, freq=freq)
    return dict, freq 
Example #19
Source File: preprocess_token.py    From show-adapt-and-tell with MIT License 5 votes vote down vote up
def clean_words(data):
	dict = {}
	freq = {}
	# start with 1
	idx = 1
	sentence_count = 0
	eliminate = 0
	max_w = 30
	for k in tqdm(range(len(data['caption_entity']))):
		sen = data['caption_entity'][k]
		filename = data['file_name'][k]
		# skip the no image description
		words = re.split(' ', sen)
		# pop the last u'.'
		n = len(words)
		if "" in words:
		    words.remove("")
		if n <= max_w:
			sentence_count += 1
			for word in words:
				if "\n" in word:
                                        word = word.replace("\n", "")
				for p in string.punctuation:
					if p in word:
						word = word.replace(p,'')
				word = word.lower()
				if word not in dict.keys():
					dict[word] = idx
					idx += 1
					freq[word] = 1
				else:
					freq[word] += 1
		else:
			eliminate += 1
	print 'Threshold(max_words) =', max_w
	print 'Eliminate =', eliminate 
	print 'Total sentence_count =', sentence_count
	print 'Number of different words =', len(dict.keys())
	print 'Saving....'
	np.savez('K_cleaned_words', dict=dict, freq=freq)
	return dict, freq 
Example #20
Source File: run_mujoco.py    From rl_graph_generation with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def runner(env, policy_func, load_model_path, timesteps_per_batch, number_trajs,
           stochastic_policy, save=False, reuse=False):

    # Setup network
    # ----------------------------------------
    ob_space = env.observation_space
    ac_space = env.action_space
    pi = policy_func("pi", ob_space, ac_space, reuse=reuse)
    U.initialize()
    # Prepare for rollouts
    # ----------------------------------------
    U.load_state(load_model_path)

    obs_list = []
    acs_list = []
    len_list = []
    ret_list = []
    for _ in tqdm(range(number_trajs)):
        traj = traj_1_generator(pi, env, timesteps_per_batch, stochastic=stochastic_policy)
        obs, acs, ep_len, ep_ret = traj['ob'], traj['ac'], traj['ep_len'], traj['ep_ret']
        obs_list.append(obs)
        acs_list.append(acs)
        len_list.append(ep_len)
        ret_list.append(ep_ret)
    if stochastic_policy:
        print('stochastic policy:')
    else:
        print('deterministic policy:')
    if save:
        filename = load_model_path.split('/')[-1] + '.' + env.spec.id
        np.savez(filename, obs=np.array(obs_list), acs=np.array(acs_list),
                 lens=np.array(len_list), rets=np.array(ret_list))
    avg_len = sum(len_list)/len(len_list)
    avg_ret = sum(ret_list)/len(ret_list)
    print("Average length:", avg_len)
    print("Average return:", avg_ret)
    return avg_len, avg_ret


# Sample one trajectory (until trajectory end) 
Example #21
Source File: test_io.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def test_named_arrays(self):
        a = np.array([[1, 2], [3, 4]], float)
        b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
        c = BytesIO()
        np.savez(c, file_a=a, file_b=b)
        c.seek(0)
        l = np.load(c)
        assert_equal(a, l['file_a'])
        assert_equal(b, l['file_b']) 
Example #22
Source File: utils.py    From ngraph-onnx with Apache License 2.0 5 votes vote down vote up
def save_results(dest_dir, filename, kwargs):
    # currently it is assumed that data is an array object with timings.
    try:
        os.makedirs(dest_dir, exist_ok=True)
    except OSError:
        pass
    np.savez(os.path.join(dest_dir, filename), **kwargs) 
Example #23
Source File: pretreatment.py    From 12306 with MIT License 5 votes vote down vote up
def load_data(path='data.npz'):
    if not os.path.isfile(path):
        texts, imgs = pretreat()
        np.savez(path, texts=texts, images=imgs)
    f = np.load(path)
    return f['texts'], f['images'] 
Example #24
Source File: char_encdec.py    From knmt with GNU General Public License v3.0 5 votes vote down vote up
def do_generate_voc_table(args):
    voc_list = make_voc_list_from_text(args.filename, max_nb_ex = args.max_nb_ex, frequency_threshold = args.frequency_threshold)
    ced, charlist, chardict = load_encdec_from_config(args.config, args.model)
    if args.gpu is not None:
        chainer.cuda.Device(args.gpu).use()
        ced = ced.to_gpu(args.gpu)
    encodings = generate_voc_encodings(ced.enc, charlist, voc_list, mb_size=args.mb_size)
    json.dump(voc_list, open(args.dest + ".gen.voc_list", "w"))
    np.savez(args.dest + ".gen.encodings", enc=encodings) 
Example #25
Source File: test_io.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def test_BagObj(self):
        a = np.array([[1, 2], [3, 4]], float)
        b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
        c = BytesIO()
        np.savez(c, file_a=a, file_b=b)
        c.seek(0)
        l = np.load(c)
        assert_equal(sorted(dir(l.f)), ['file_a','file_b'])
        assert_equal(a, l.f.file_a)
        assert_equal(b, l.f.file_b) 
Example #26
Source File: utils.py    From OpenQA with MIT License 5 votes vote down vote up
def save_sparse_csr(filename, matrix, metadata=None):
    data = {
        'data': matrix.data,
        'indices': matrix.indices,
        'indptr': matrix.indptr,
        'shape': matrix.shape,
        'metadata': metadata,
    }
    np.savez(filename, **data) 
Example #27
Source File: utils.py    From tensornets with MIT License 5 votes vote down vote up
def save(model, weights_path, sess):
    if sess is None:
        sess = tf.get_default_session()
        assert sess is not None, 'The default session should be given.'

    weights = get_weights(model)
    names = [w.name for w in weights]
    values = sess.run(weights)
    np.savez(weights_path, names=names, values=values) 
Example #28
Source File: test_format.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_empty_npz():
    # Test for gh-9989
    fname = os.path.join(tempdir, "nothing.npz")
    np.savez(fname)
    np.load(fname) 
Example #29
Source File: prepare_dataset.py    From DepthNets with MIT License 5 votes vote down vote up
def prepare_test():
    ids = []
    orientations = []
    with open("%s/list_valid_test.txt" % root_dir) as f:
        for line in f:
            line = line.rstrip().split(",")
            if line[1] == "test":
                ids.append(line[0])
                orientations.append(line[2])
    y_keypts, z_keypts = get_keypts_from_ids(ids, "valid") # yes, valid
    np.savez(file="%s/test" % root_dir,
             y_keypts=y_keypts,
             z_keypts=z_keypts,
             ids=ids,
             orientations=orientations) 
Example #30
Source File: knowledge_graph.py    From dgl with Apache License 2.0 5 votes vote down vote up
def _save_sparse_csr(filename, array):
    np.savez(filename, data=array.data, indices=array.indices,
             indptr=array.indptr, shape=array.shape)