Python h5py.File() Examples

The following are 30 code examples of h5py.File(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module h5py , or try the search function .
Example #1
Source File: data_prep_util.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def save_h5_data_label_normal(h5_filename, data, label, normal, 
		data_dtype='float32', label_dtype='uint8', noral_dtype='float32'):
    h5_fout = h5py.File(h5_filename)
    h5_fout.create_dataset(
            'data', data=data,
            compression='gzip', compression_opts=4,
            dtype=data_dtype)
    h5_fout.create_dataset(
            'normal', data=normal,
            compression='gzip', compression_opts=4,
            dtype=normal_dtype)
    h5_fout.create_dataset(
            'label', data=label,
            compression='gzip', compression_opts=1,
            dtype=label_dtype)
    h5_fout.close()


# Write numpy array data and label to h5_filename 
Example #2
Source File: leike_ensslin_2019.py    From dustmaps with GNU General Public License v2.0 6 votes vote down vote up
def __init__(self, map_fname=None):
        """
        Args:
            map_fname (Optional[str]): Filename of the map. Defaults
                to :obj:`None`, meaning that the default location
                is used.
        """

        if map_fname is None:
            map_fname = os.path.join(
                data_dir(),
                'leike_ensslin_2019',
                'simple_cube.h5'
            )

        self._data = {}

        with h5py.File(map_fname) as f:
            self._data['mean'] = f['mean'][:]
            self._data['std'] = f['std'][:]

        self._shape = self._data['mean'].shape 
Example #3
Source File: xianci.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def write_integrals(xci, orb):
    mol = xci.mol
    orbsym = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb, orb)
    h1e = reduce(numpy.dot, (orb.T, xci.get_hcore(), orb))
    norb = orb.shape[1]
    if xci._eri is not None:
        h2e = ao2mo.restore(1, ao2mo.full(xci._eri, orb), norb)
    else:
        h2e = ao2mo.restore(1, ao2mo.full(mol, orb), norb)

    with h5py.File(xci.integralfile, 'w') as f:
        f['h1e']    = h1e
        f['h2e']    = h2e
        f['norb'  ] = numpy.array(norb, dtype=numpy.int32)
        f['group' ] = mol.groupname
        f['orbsym'] = numpy.asarray(orbsym, dtype=numpy.int32)
        f['ecore' ] = mol.energy_nuc() 
Example #4
Source File: hcp.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def download(self, sid):
        '''
        ny.data['hcp'].download(sid) downloads all the data understood by neuropythy for the given
        HCP subject id; the data are downloaded from the Amazon S3 into the path given by the 
        'hcp_auto_path' config item then returns a list of the downloaded files.
        '''
        # we can do this in quite a sneaky way: get the subject, get their filemap, force all the
        # paths in the subject to be downloaded using the pseudo-path, return the cache path!
        sub   = self.subjects[sid]
        fmap  = sub.meta_data['file_map']
        ppath = fmap.path
        fls   = []
        logging.info('Downloading HCP subject %s structure data...' % (sid,))
        for fl in six.iterkeys(fmap.data_files):
            logging.info('  * Downloading file %s for subject %s' % (fl, sid))
            try:
                fls.append(ppath.local_path(fl))
            except ValueError as e:
                if len(e.args) != 1 or not e.args[0].startswith('getpath:'): raise
                else: logging.info('    (File %s not found for subject %s)' % (fl, sid))
        logging.info('Subject %s donwnload complete!' % (sid,))
        return fls
# we wrap this in a lambda so that it gets loaded when requested (in case the config changes between
# when this gets run and when the dataset gets requested) 
Example #5
Source File: uintermediates_slow.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def cc_Wvvvv(t1,t2,eris):
    tau = make_tau(t2,t1,t1)
    #eris_vovv = np.array(eris.ovvv).transpose(1,0,3,2)
    #tmp = einsum('mb,amef->abef',t1,eris_vovv)
    #Wabef = eris.vvvv - tmp + tmp.transpose(1,0,2,3)
    #Wabef += 0.25*einsum('mnab,mnef->abef',tau,eris.oovv)
    if t1.dtype == np.complex: ds_type = 'c16'
    else: ds_type = 'f8'
    _tmpfile1 = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
    fimd = h5py.File(_tmpfile1.name)
    nocc, nvir = t1.shape
    Wabef = fimd.create_dataset('vvvv', (nvir,nvir,nvir,nvir), ds_type)
    for a in range(nvir):
        Wabef[a] = eris.vvvv[a] 
        Wabef[a] -= einsum('mb,mfe->bef',t1,eris.ovvv[:,a,:,:]) 
        Wabef[a] += einsum('m,mbfe->bef',t1[:,a],eris.ovvv) 
        Wabef[a] += 0.25*einsum('mnb,mnef->bef',tau[:,:,a,:],eris.oovv)
    return Wabef 
Example #6
Source File: uintermediates_slow.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def Wvvvv(t1,t2,eris):
    tau = make_tau(t2,t1,t1)
    #Wabef = cc_Wvvvv(t1,t2,eris) + 0.25*einsum('mnab,mnef->abef',tau,eris.oovv)
    if t1.dtype == np.complex: ds_type = 'c16'
    else: ds_type = 'f8'
    _tmpfile1 = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
    fimd = h5py.File(_tmpfile1.name)
    nocc, nvir = t1.shape
    Wabef = fimd.create_dataset('vvvv', (nvir,nvir,nvir,nvir), ds_type)
    #_cc_Wvvvv = cc_Wvvvv(t1,t2,eris)
    for a in range(nvir):
        #Wabef[a] = _cc_Wvvvv[a]
        Wabef[a] = eris.vvvv[a] 
        Wabef[a] -= einsum('mb,mfe->bef',t1,eris.ovvv[:,a,:,:]) 
        Wabef[a] += einsum('m,mbfe->bef',t1[:,a],eris.ovvv) 
        #Wabef[a] += 0.25*einsum('mnb,mnef->bef',tau[:,:,a,:],eris.oovv)

        #Wabef[a] += 0.25*einsum('mnb,mnef->bef',tau[:,:,a,:],eris.oovv) 
        Wabef[a] += 0.5*einsum('mnb,mnef->bef',tau[:,:,a,:],eris.oovv) 
    return Wabef 
Example #7
Source File: tddft_iter.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def load_kernel_method(self, kernel_fname, kernel_format="npy", kernel_path_hdf5=None, **kw):
      """ Loads from file and initializes .kernel field... Useful? Rewrite?"""
    
      if kernel_format == "npy":
          self.kernel = self.dtype(np.load(kernel_fname))
      elif kernel_format == "txt":
          self.kernel = np.loadtxt(kernel_fname, dtype=self.dtype)
      elif kernel_format == "hdf5":
          import h5py
          if kernel_path_hdf5 is None:
              raise ValueError("kernel_path_hdf5 not set while trying to read kernel from hdf5 file.")
          self.kernel = h5py.File(kernel_fname, "r")[kernel_path_hdf5].value
      else:
          raise ValueError("Wrong format for loading kernel, must be: npy, txt or hdf5, got " + kernel_format)

      if len(self.kernel.shape) > 1:
          raise ValueError("The kernel must be saved in packed format in order to be loaded!")
      
      assert self.nprod*(self.nprod+1)//2 == self.kernel.size, "wrong size for loaded kernel: %r %r "%(self.nprod*(self.nprod+1)//2, self.kernel.size)
      self.kernel_dim = self.nprod 
Example #8
Source File: m_restart.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def write_rst_h5py(data, filename = None):
    import h5py
    if filename is None: 
      filename= 'SCREENED_COULOMB.hdf5'
    

    with h5py.File(filename, 'w') as data_file:
        try:
            data_file.create_dataset('W_c', data=data)
        except:
            print("failed writting data to SCREENED_COULOMB.hdf5")
            print(type(data))

        data_file.close
    
    msg = 'Full matrix elements of screened interactions (W_c) stored in {}'.format(filename)
    return msg 
Example #9
Source File: bh.py    From dustmaps with GNU General Public License v2.0 6 votes vote down vote up
def __init__(self, bh_dir=None):
        """
        Args:
            bh_dir (Optional[str]): The directory containing the Burstein &
                Heiles dust map. Defaults to `None`, meaning that the default
                directory is used.
        """
        if bh_dir is None:
            bh_dir = os.path.join(data_dir_default, 'bh')

        f = h5py.File(os.path.join(bh_dir, 'bh.h5'), 'r')
        self._hinorth = f['hinorth'][:]
        self._hisouth = f['hisouth'][:]
        self._rednorth = f['rednorth'][:]
        self._redsouth = f['redsouth'][:]
        f.close() 
Example #10
Source File: test_io.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_NDArrayIter_h5py():
    if not h5py:
        return

    data, labels = _init_NDArrayIter_data('ndarray')

    try:
        os.remove('ndarraytest.h5')
    except OSError:
        pass
    with h5py.File('ndarraytest.h5') as f:
        f.create_dataset('data', data=data)
        f.create_dataset('label', data=labels)
        
        _test_last_batch_handle(f['data'], f['label'])
        _test_last_batch_handle(f['data'], [])
        _test_last_batch_handle(f['data'])
    try:
        os.remove("ndarraytest.h5")
    except OSError:
        pass 
Example #11
Source File: utils.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def read_data(data_fname):
  """ Read saved data in HDF5 format.

  Args:
    data_fname: The filename of the file from which to read the data.
  Returns:
    A dictionary whose keys will vary depending on dataset (but should
    always contain the keys 'train_data' and 'valid_data') and whose
    values are numpy arrays.
  """

  try:
    with h5py.File(data_fname, 'r') as hf:
      data_dict = {k: np.array(v) for k, v in hf.items()}
      return data_dict
  except IOError:
    print("Cannot open %s for reading." % data_fname)
    raise 
Example #12
Source File: samplers.py    From cvpr2018-hnd with MIT License 6 votes vote down vote up
def shuffle(labels, num_epochs=50, path=None, start_time=time.time()):

    order_path = '{path}/order_{num_epochs}.h5' \
                       .format(path=path, num_epochs=num_epochs)
    if path is not None and os.path.isfile(order_path):
        with h5py.File(order_path, 'r') as f:
            order = f['order'][:]
    else:
        order = -np.ones([num_epochs, labels.size(0)], dtype=int)
        for epoch in range(num_epochs):
            order[epoch] = np.random.permutation(labels.size(0))
            print_freq = min([100, (num_epochs-1) // 5 + 1])
            print_me = (epoch == 0 or epoch == num_epochs-1 or (epoch+1) % print_freq == 0)
            if print_me:
                print('{epoch:4d}/{num_epochs:4d} e; '.format(epoch=epoch+1, num_epochs=num_epochs), end='')
                print('generate random order; {time:8.3f} s'.format(time=time.time()-start_time))
        
        if path is not None:
            with h5py.File(order_path, 'w') as f:
                f.create_dataset('order', data=order, compression='gzip', compression_opts=9)
    
    print('random order; {time:8.3f} s'.format(time=time.time()-start_time))
    return torch.from_numpy(order) 
Example #13
Source File: kccsd_rhf.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def read_eom_amplitudes(vec_shape, filename="reom_amplitudes.hdf5", vec=None):
    task_list = generate_max_task_list(vec_shape)
    read_success = False
    return False, None  # TODO: find a way to make the amplitudes are consistent
                        # with the signs of the eris/t-amplitudes when restarting
    print("attempting to read in eom amplitudes from file ", filename)
    if os.path.isfile(filename):
        print("reading eom amplitudes from file. shape=", vec_shape)
        feri = h5py.File(filename, 'r', driver='mpio', comm=MPI.COMM_WORLD)
        saved_v = feri['v']
        if vec is None:
            vec = np.empty(vec_shape,dtype=saved_v.dtype)
        assert(saved_v.shape == vec_shape)
        task_list = generate_max_task_list(vec.shape)
        for block in task_list:
            which_slice = [slice(*x) for x in block]
            vec[tuple(which_slice)] = saved_v[tuple(which_slice)]
        feri.close()
        read_success = True
    if vec is not None and vec_shape[-1] == 1:
        vec = vec.reshape(vec_shape[:-1])
    return read_success, vec 
Example #14
Source File: kccsd_rhf.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def write_amplitudes(t1, t2, filename="t_amplitudes.hdf5"):
    task_list = generate_max_task_list(t2.shape)
    if rank == 0:
        print("writing t amplitudes to file")
        feri = h5py.File(filename, 'w')
        ds_type = t2.dtype
        out_t1  = feri.create_dataset('t1', t1.shape, dtype=ds_type)
        out_t2  = feri.create_dataset('t2', t2.shape, dtype=ds_type)

        task_list = generate_max_task_list(t1.shape)
        for block in task_list:
            which_slice = [slice(*x) for x in block]
            out_t1[tuple(which_slice)] = t1[tuple(which_slice)]
        task_list = generate_max_task_list(t2.shape)
        for block in task_list:
            which_slice = [slice(*x) for x in block]
            out_t2[tuple(which_slice)] = t2[tuple(which_slice)]
        feri.close()
    return 
Example #15
Source File: hdf5_loader.py    From SSGAN-Tensorflow with MIT License 6 votes vote down vote up
def __init__(self, path, ids, name='default',
                 max_examples=None, is_train=True):
        self._ids = list(ids)
        self.name = name
        self.is_train = is_train

        if max_examples is not None:
            self._ids = self._ids[:max_examples]

        filename = 'data.hdf5'

        file = os.path.join(path, filename)
        log.info("Reading %s ...", file)

        self.data = h5py.File(file, 'r')
        log.info("Reading Done: %s", file) 
Example #16
Source File: data_io.py    From Kaggler with MIT License 6 votes vote down vote up
def save_hdf5(X, y, path):
    """Save data as a HDF5 file.

    Args:
        X (numpy or scipy sparse matrix): Data matrix
        y (numpy array): Target vector.
        path (str): Path to the HDF5 file to save data.
    """

    with h5py.File(path, 'w') as f:
        is_sparse = 1 if sparse.issparse(X) else 0
        f['issparse'] = is_sparse
        f['target'] = y

        if is_sparse:
            if not sparse.isspmatrix_csr(X):
                X = X.tocsr()

            f['shape'] = np.array(X.shape)
            f['data'] = X.data
            f['indices'] = X.indices
            f['indptr'] = X.indptr
        else:
            f['data'] = X 
Example #17
Source File: preprocessing.py    From IGMC with MIT License 6 votes vote down vote up
def load_matlab_file(path_file, name_field):
    """
    load '.mat' files
    inputs:
        path_file, string containing the file path
        name_field, string containig the field name (default='shape')
    warning:
        '.mat' files should be saved in the '-v7.3' format
    """
    db = h5py.File(path_file, 'r')
    ds = db[name_field]
    try:
        if 'ir' in ds.keys():
            data = np.asarray(ds['data'])
            ir = np.asarray(ds['ir'])
            jc = np.asarray(ds['jc'])
            out = sp.csc_matrix((data, ir, jc)).astype(np.float32)
    except AttributeError:
        # Transpose in case is a dense matrix because of the row- vs column- major ordering between python and matlab
        out = np.asarray(ds).astype(np.float32).T

    db.close()

    return out 
Example #18
Source File: cifar10.py    From Generative-Latent-Optimization-Tensorflow with MIT License 6 votes vote down vote up
def __init__(self, ids, name='default',
                 max_examples=None, is_train=True):
        self._ids = list(ids)
        self.name = name
        self.is_train = is_train

        if max_examples is not None:
            self._ids = self._ids[:max_examples]

        filename = 'data.hdf5'

        file = os.path.join(__PATH__, filename)
        log.info("Reading %s ...", file)

        try:
            self.data = h5py.File(file, 'r+')
        except:
            raise IOError('Dataset not found. Please make sure the dataset was downloaded.')
        log.info("Reading Done: %s", file) 
Example #19
Source File: svhn.py    From Generative-Latent-Optimization-Tensorflow with MIT License 6 votes vote down vote up
def __init__(self, ids, name='default',
                 max_examples=None, is_train=True):
        self._ids = list(ids)
        self.name = name
        self.is_train = is_train

        if max_examples is not None:
            self._ids = self._ids[:max_examples]

        filename = 'data.hdf5'

        file = os.path.join(__PATH__, filename)
        log.info("Reading %s ...", file)

        try:
            self.data = h5py.File(file, 'r+')
        except:
            raise IOError('Dataset not found. Please make sure the dataset was downloaded.')
        log.info("Reading Done: %s", file) 
Example #20
Source File: xianci.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def write_integrals(xci, orb):
    mol = xci.mol
    orbsym = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb, orb)
    h1e = reduce(numpy.dot, (orb.T, xci.get_hcore(), orb))
    norb = orb.shape[1]
    if xci._eri is not None:
        h2e = ao2mo.restore(1, ao2mo.full(xci._eri, orb), norb)
    else:
        h2e = ao2mo.restore(1, ao2mo.full(mol, orb), norb)

    with h5py.File(xci.integralfile, 'w') as f:
        f['h1e']    = h1e
        f['h2e']    = h2e
        f['norb'  ] = numpy.array(norb, dtype=numpy.int32)
        f['group' ] = mol.groupname
        f['orbsym'] = numpy.asarray(orbsym, dtype=numpy.int32)
        f['ecore' ] = mol.energy_nuc() 
Example #21
Source File: Forecaster.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, n_pop=4, **specs):
        
        FortneyMarleyCahoyMix1.__init__(self, **specs)
        
        # number of category
        self.n_pop = int(n_pop)
        
        # read forecaster parameter file
        downloadsdir = get_downloads_dir()
        filename = 'fitting_parameters.h5'
        parampath = os.path.join(downloadsdir, filename)
        if not os.path.exists(parampath) and os.access(downloadsdir, os.W_OK|os.X_OK):
            fitting_url = 'https://raw.github.com/dsavransky/forecaster/master/fitting_parameters.h5'
            self.vprint("Fetching Forecaster fitting parameters from %s to %s" % (fitting_url, parampath))
            try:
                urlretrieve(fitting_url, parampath)
            except:
                self.vprint("Error: Remote fetch failed. Fetch manually or see install instructions.")

        assert os.path.exists(parampath), 'fitting_parameters.h5 must exist in /.EXOSIMS/downloads'

        h5 = h5py.File(parampath, 'r')
        self.all_hyper = h5['hyper_posterior'][:]
        h5.close() 
Example #22
Source File: leike_ensslin_2019.py    From dustmaps with GNU General Public License v2.0 6 votes vote down vote up
def fetch(clobber=False):
    """
    Downloads the 3D dust map of Leike & Ensslin (2019).

    Args:
        clobber (Optional[bool]): If ``True``, any existing file will be
            overwritten, even if it appears to match. If ``False`` (the
            default), ``fetch()`` will attempt to determine if the dataset
            already exists. This determination is not 100\% robust against data
            corruption.
    """
    dest_dir = fname_pattern = os.path.join(data_dir(), 'leike_ensslin_2019')
    fname = os.path.join(dest_dir, 'simple_cube.h5')
    
    # Check if the FITS table already exists
    md5sum = 'f54e01c253453117e3770575bed35078'

    if (not clobber) and fetch_utils.check_md5sum(fname, md5sum):
        print('File appears to exist already. Call `fetch(clobber=True)` '
              'to force overwriting of existing file.')
        return

    # Download from the server
    url = 'https://zenodo.org/record/2577337/files/simple_cube.h5?download=1'
    fetch_utils.download_and_verify(url, md5sum, fname) 
Example #23
Source File: helper.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def read_h5(file_name):
	import h5py
	f = h5py.File(file_name, 'r')
	templates = np.array(f.get('templates'))
	f.close()
	return templates 
Example #24
Source File: generate_dataset.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def load_h5_data_label_seg(self, h5_filename):
		f = h5py.File(h5_filename)
		data = f['data'][:]
		label = f['label'][:]
		seg = f['pid'][:]
		return (data, label, seg) 
Example #25
Source File: data_prep_util.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def load_h5(h5_filename):
    f = h5py.File(h5_filename)
    data = f['data'][:]
    label = f['label'][:]
    return (data, label)

# ----------------------------------------------------------------
# Following are the helper functions to load save/load PLY files
# ----------------------------------------------------------------

# Load PLY file 
Example #26
Source File: helper.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def read_noise_data(data_dict):
	import h5py
	f = h5py.File(os.path.join('data',data_dict,'noise_data.h5'), 'r')
	templates = np.array(f.get('templates'))
	sources = np.array(f.get('sources'))
	f.close()
	return templates, sources 
Example #27
Source File: data_prep_util.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def load_h5_data_label_seg(h5_filename):
    f = h5py.File(h5_filename)
    data = f['data'][:]
    label = f['label'][:]
    seg = f['pid'][:]
    return (data, label, seg)

# Read numpy array data and label from h5_filename 
Example #28
Source File: models.py    From gandlf with MIT License 5 votes vote down vote up
def save_model(model, filepath, overwrite=True):

    def get_json_type(obj):
        if hasattr(obj, 'get_config'):
            return {'class_name': obj.__class__.__name__,
                    'config': obj.get_config()}

        if type(obj).__module__ == np.__name__:
            return obj.item()

        if callable(obj) or type(obj).__name__ == type.__name__:
            return obj.__name__

        raise TypeError('Not JSON Serializable:', obj)

    import h5py
    from keras import __version__ as keras_version

    if not overwrite and os.path.isfile(filepath):
        proceed = keras.models.ask_to_proceed_with_overwrite(filepath)
        if not proceed:
            return

    f = h5py.File(filepath, 'w')
    f.attrs['keras_version'] = str(keras_version).encode('utf8')
    f.attrs['generator_config'] = json.dumps({
        'class_name': model.discriminator.__class__.__name__,
        'config': model.generator.get_config(),
    }, default=get_json_type).encode('utf8')
    f.attrs['discriminator_config'] = json.dumps({
        'class_name': model.discriminator.__class__.__name__,
        'config': model.discriminator.get_config(),
    }, default=get_json_type).encode('utf8')

    generator_weights_group = f.create_group('generator_weights')
    discriminator_weights_group = f.create_group('discriminator_weights')
    model.generator.save_weights_to_hdf5_group(generator_weights_group)
    model.discriminator.save_weights_to_hdf5_group(discriminator_weights_group)

    f.flush()
    f.close() 
Example #29
Source File: m_restart.py    From pyscf with Apache License 2.0 5 votes vote down vote up
def read_rst_h5py (filename=None):
    import h5py ,os
    if filename is None: 
        path = os.getcwd()
        filename =find('*.hdf5', path)
    #filename= 'SCREENED_COULOMB.hdf5'
    with h5py.File(filename, 'r') as f:
        #print("Keys: %s" % f.keys())
        a_group_key = list(f.keys())[0]
        # Get the data
        data = list(f[a_group_key])
    msg = 'RESTART: Full matrix elements of screened interactions (W_c) was read from {}'.format(filename)
    return data, msg 
Example #30
Source File: generate_dataset.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def store_h5(templates, dict_name):
	# templates:	Array of templates (BxNx3)
	# dict_name:	Dictionary to store data.
	if not os.path.exists(os.path.join('data',dict_name)): os.mkdir(os.path.join('data',dict_name))

	file_names_txt = open(os.path.join('data',dict_name,'files.txt'),'w')			# Store names of files in txt file to read data.
	file_name = os.path.join('data',dict_name,'templates.h5')
	file_names_txt.write(file_name)
	f = h5py.File(file_name,'w')
	f.create_dataset('templates',data=templates)
	f.close()
	file_names_txt.close()