Python numpy.save() Examples

The following are 30 code examples for showing how to use numpy.save(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: pytorch-mri-segmentation-3D   Author: Achilleas   File: PP.py    License: MIT License 10 votes vote down vote up
def generateImgSlicesFolder(data_folder = '../Data/MS2017a/scans/'):
	scan_folders = glob.glob(data_folder + '*')

	for sf in scan_folders:
		slice_dir_path = os.path.join(sf, 'slices/')
		if not os.path.exists(slice_dir_path):
			print('Creating directory at:' , slice_dir_path)
			os.makedirs(slice_dir_path)

		img = nib.load(os.path.join(sf, 'pre/FLAIR.nii.gz'))
		img_np = img.get_data()
		img_affine = img.affine
		print(sf)
		print('The img shape', img_np.shape[2])
		for i in range(img_np.shape[2]):
			slice_img_np = img_np[:,:,i]
			nft_img = nib.Nifti1Image(slice_img_np, img_affine)
			nib.save(nft_img, slice_dir_path + 'FLAIR_' + str(i) + '.nii.gz')

			if os.path.basename(sf) == '0':
				slice_img = nib.load(slice_dir_path + 'FLAIR_' + str(i) + '.nii.gz').get_data() / 5
				print('DID I GET HERE?')
				print('Writing to', str(i) + '.jpg') 
Example 2
Project: face-attendance-machine   Author: matiji66   File: encoding_images.py    License: Apache License 2.0 7 votes vote down vote up
def encoding_images(path):
    """
    对path路径下的子文件夹中的图片进行编码,
    TODO:
        对人脸数据进行历史库中的人脸向量进行欧式距离的比较,当距离小于某个阈值的时候提醒:
        如果相似的是本人,则跳过该条记录,并提醒已经存在,否则警告人脸过度相似问题,
    :param path:
    :return:
    """
    with open(name_and_encoding, 'w') as f:
        subdirs = [os.path.join(path, x) for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))]
        for subdir in subdirs:
            print('process image name :', subdir)
            person_image_encoding = []
            for y in os.listdir(subdir):
                print("image name is ", y)
                _image = face_recognition.load_image_file(os.path.join(subdir, y))
                face_encodings = face_recognition.face_encodings(_image)
                name = os.path.split(subdir)[-1]
                if face_encodings and len(face_encodings) == 1:
                    if len(person_image_encoding) == 0:
                        person_image_encoding.append(face_encodings[0])
                        known_face_names.append(name)
                        continue
                    for i in range(len(person_image_encoding)):
                        distances = face_recognition.compare_faces(person_image_encoding, face_encodings[0], tolerance=image_thread)
                        if False in distances:
                            person_image_encoding.append(face_encodings[0])
                            known_face_names.append(name)
                            print(name, " new feature")
                            f.write(name + ":" + str(face_encodings[0]) + "\n")
                            break
                    # face_encoding = face_recognition.face_encodings(_image)[0]
                    # face_recognition.compare_faces()
            known_face_encodings.extend(person_image_encoding)
            bb = np.array(known_face_encodings)
            print("--------")
    np.save(KNOWN_FACE_ENCODINGS, known_face_encodings)
    np.save(KNOWN_FACE_NANE, known_face_names) 
Example 3
Project: pytorch-mri-segmentation-3D   Author: Achilleas   File: PP.py    License: MIT License 7 votes vote down vote up
def generateGTSlicesFolder(data_folder = '../Data/MS2017a/scans/'):
	scan_folders = glob.glob(data_folder + '*')

	for sf in scan_folders:
		slice_dir_path = os.path.join(sf, 'gt_slices/')
		if not os.path.exists(slice_dir_path):
			print('Creating directory at:' , slice_dir_path)
			os.makedirs(slice_dir_path)

		img = nib.load(os.path.join(sf, 'wmh.nii.gz'))
		img_np = img.get_data()
		img_affine = img.affine
		print(sf)
		print('The img shape', img_np.shape[2])
		for i in range(img_np.shape[2]):
			slice_img_np = img_np[:,:,i]
			nft_img = nib.Nifti1Image(slice_img_np, img_affine)
			nib.save(nft_img, slice_dir_path + 'wmh_' + str(i) + '.nii.gz')

			if os.path.basename(sf) == '0':
				slice_img = nib.load(slice_dir_path + 'wmh_' + str(i) + '.nii.gz').get_data() * 256
				#cv2.imwrite('temp/' + str(i) + '.jpg', slice_img) 
Example 4
Project: vergeml   Author: mme   File: cache.py    License: MIT License 6 votes vote down vote up
def _serialize_data(self, data):

        # Default to raw bytes
        type_ = _BYTES

        if isinstance(data, np.ndarray):
        # When the data is a numpy array, use the more compact native
        # numpy format.
            buf = io.BytesIO()
            np.save(buf, data)
            data = buf.getvalue()
            type_ = _NUMPY

        elif not isinstance(data, (bytearray, bytes)):
        # Everything else except byte data is serialized in pickle format.
            data = pickle.dumps(data)
            type_ = _PICKLE

        if self.compress:
        # Optional compression
            data = lz4.frame.compress(data)

        return type_, data 
Example 5
Project: gated-graph-transformer-network   Author: hexahedria   File: ggtnn_train.py    License: MIT License 6 votes vote down vote up
def visualize(m, story_buckets, wordlist, answerlist, output_format, outputdir, batch_size=1, seq_len=5, debugmode=False, snap=False):
    cur_bucket = random.choice(story_buckets)
    sampled_batch = sample_batch(cur_bucket, batch_size, len(answerlist), output_format)
    part_sampled_batch = sampled_batch[:3]
    with open(os.path.join(outputdir,'stories.txt'),'w') as f:
        ggtnn_graph_parse.print_batch(part_sampled_batch, wordlist, answerlist, file=f)
    with open(os.path.join(outputdir,'answer_list.txt'),'w') as f:
        f.write('\n'.join(answerlist) + '\n')
    if debugmode:
        args = sampled_batch
        fn = m.debug_test_fn
    else:
        args = part_sampled_batch[:2] + ((seq_len,) if output_format == model.ModelOutputFormat.sequence else ())
        fn = m.snap_test_fn if snap else m.fuzzy_test_fn
    results = fn(*args)
    for i,result in enumerate(results):
        np.save(os.path.join(outputdir,'result_{}.npy'.format(i)), result) 
Example 6
Project: dustmaps   Author: gregreen   File: json_serializers.py    License: GNU General Public License v2.0 6 votes vote down vote up
def serialize_ndarray_npy(o):
    """
    Serializes a :obj:`numpy.ndarray` using numpy's built-in :obj:`save` function.
    This produces totally unreadable (and very un-JSON-like) results (in "npy"
    format), but it's basically guaranteed to work in 100% of cases.

    Args:
        o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.

    Returns:
        A dictionary that can be passed to :obj:`json.dumps`.
    """
    with io.BytesIO() as f:
        np.save(f, o)
        f.seek(0)
        serialized = json.dumps(f.read().decode('latin-1'))
    return dict(
        _type='np.ndarray',
        npy=serialized) 
Example 7
Project: dustmaps   Author: gregreen   File: json_serializers.py    License: GNU General Public License v2.0 6 votes vote down vote up
def deserialize_ndarray_npy(d):
    """
    Deserializes a JSONified :obj:`numpy.ndarray` that was created using numpy's
    :obj:`save` function.

    Args:
        d (:obj:`dict`): A dictionary representation of an :obj:`ndarray` object, created
            using :obj:`numpy.save`.

    Returns:
        An :obj:`ndarray` object.
    """
    with io.BytesIO() as f:
        f.write(json.loads(d['npy']).encode('latin-1'))
        f.seek(0)
        return np.load(f) 
Example 8
Project: DOTA_models   Author: ringringyi   File: input.py    License: Apache License 2.0 6 votes vote down vote up
def extract_mnist_data(filename, num_images, image_size, pixel_depth):
  """
  Extract the images into a 4D tensor [image index, y, x, channels].

  Values are rescaled from [0, 255] down to [-0.5, 0.5].
  """
  # if not os.path.exists(file):
  if not tf.gfile.Exists(filename+".npy"):
    with gzip.open(filename) as bytestream:
      bytestream.read(16)
      buf = bytestream.read(image_size * image_size * num_images)
      data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
      data = (data - (pixel_depth / 2.0)) / pixel_depth
      data = data.reshape(num_images, image_size, image_size, 1)
      np.save(filename, data)
      return data
  else:
    with tf.gfile.Open(filename+".npy", mode='r') as file_obj:
      return np.load(file_obj) 
Example 9
Project: pytorch-mri-segmentation-3D   Author: Achilleas   File: PP.py    License: MIT License 6 votes vote down vote up
def extractMeanDataStats(size = [200, 200, 100], 
						postfix = '_200x200x100orig', 
						main_folder_path = '../../Data/MS2017b/', 
						):
	scan_folders = glob.glob(main_folder_path + 'scans/*')
	img_path = 'pre/FLAIR' + postfix + '.nii.gz'
	segm_path = 'wmh' + postfix + '.nii.gz'
	
	shape_ = [len(scan_folders), size[0], size[1], size[2]]
	arr = np.zeros(shape_)

	for i, sf in enumerate(scan_folders):
		arr[i, :,:,:] =  numpyFromScan(os.path.join(sf,img_path)).squeeze()

	arr /= len(scan_folders)

	means = np.mean(arr)
	stds = np.std(arr, axis = 0)

	np.save(main_folder_path + 'extra_data/std' + postfix, stds)
	np.save(main_folder_path + 'extra_data/mean' + postfix, means) 
Example 10
Project: pytorch-mri-segmentation-3D   Author: Achilleas   File: PP.py    License: MIT License 6 votes vote down vote up
def generateTrainValFile_Slices(train_fraction, main_folder = '../Data/MS2017a/'):
	train_folders, val_folders = splitTrainVal_Slices(0.8)

	train_folder_names = [train_folders[i].split(main_folder)[1] for i in range(len(train_folders))]
	val_folder_names = [val_folders[i].split(main_folder)[1] for i in range(len(val_folders))]

	f_train = open(main_folder + 'train_slices.txt', 'w+')
	f_val = open(main_folder + 'val_slices.txt', 'w+')

	for fn in train_folder_names:
		f_train.write(fn + '\n')

	for fn in val_folder_names:
		f_val.write(fn + '\n')

	f_train.close()
	f_val.close()

#Use this to save the images quickly (for testing purposes) 
Example 11
def next_batch(self, whichSet='train'):
		if whichSet == 'train':
			self.trainBatchCnt += 1
			assert self.trainBatchCnt < self.trainMaxBatch
			return self.train[self.trainBatchCnt * self.batch_size: (self.trainBatchCnt + 1) * self.batch_size]
		elif whichSet == 'validation':
			self.validationBatchCnt += 1
			assert self.validationBatchCnt < self.validationMaxBatch
			return self.validation[self.validationBatchCnt * self.batch_size: (self.validationBatchCnt + 1) * self.batch_size]
		elif whichSet == 'test':
			self.testBatchCnt += 1
			assert self.testBatchCnt < self.testMaxBatch
			return self.test[self.testBatchCnt * self.batch_size: (self.testBatchCnt + 1) * self.batch_size]
		else:
			msg = 'Wrong set name!\n'+ \
				  'Should be train / validation / test.'
			raise Exception(msg)
	# Following code copied here:
	# https://stackoverflow.com/questions/17219481/save-to-file-and-load-an-instance-of-a-python-class-with-its-attributes 
Example 12
Project: argus-freesound   Author: lRomul   File: predict_folds.py    License: MIT License 6 votes vote down vote up
def pred_test_fold(predictor, fold, test_data):
    fold_prediction_dir = PREDICTION_DIR / f'fold_{fold}' / 'test'
    fold_prediction_dir.mkdir(parents=True, exist_ok=True)

    fname_lst, images_lst = test_data
    pred_lst = []
    for fname, image in zip(fname_lst, images_lst):
        pred = predictor.predict(image)

        pred_path = fold_prediction_dir / f'{fname}.npy'
        np.save(pred_path, pred)

        pred = pred.mean(axis=0)
        pred_lst.append(pred)

    preds = np.stack(pred_lst, axis=0)
    subm_df = pd.DataFrame(data=preds,
                           index=fname_lst,
                           columns=config.classes)
    subm_df.index.name = 'fname'
    subm_df.to_csv(fold_prediction_dir / 'probs.csv') 
Example 13
Project: overhaul-distillation   Author: clovaai   File: calculate_weights.py    License: MIT License 6 votes vote down vote up
def calculate_weigths_labels(dataset, dataloader, num_classes):
    # Create an instance from the data loader
    z = np.zeros((num_classes,))
    # Initialize tqdm
    tqdm_batch = tqdm(dataloader)
    print('Calculating classes weights')
    for sample in tqdm_batch:
        y = sample['label']
        y = y.detach().cpu().numpy()
        mask = (y >= 0) & (y < num_classes)
        labels = y[mask].astype(np.uint8)
        count_l = np.bincount(labels, minlength=num_classes)
        z += count_l
    tqdm_batch.close()
    total_frequency = np.sum(z)
    class_weights = []
    for frequency in z:
        class_weight = 1 / (np.log(1.02 + (frequency / total_frequency)))
        class_weights.append(class_weight)
    ret = np.array(class_weights)
    classes_weights_path = os.path.join(Path.db_root_dir(dataset), dataset+'_classes_weights.npy')
    np.save(classes_weights_path, ret)

    return ret 
Example 14
Project: pyscf   Author: pyscf   File: wmme.py    License: Apache License 2.0 6 votes vote down vote up
def MakeBaseIntegrals(self, Smh=True, MakeS=False):
      """Invoke bfint to calculate CoreEnergy (scalar), CoreH (nOrb x nOrb),
      Int2e_Frs (nFit x nOrb x nOrb), and overlap matrix (nOrb x nOrb)"""

      # assemble arguments to integral generation program
      Args = []
      if Smh:
         Args.append("--orb-trafo=Smh")
         # ^- calculate integrals in symmetrically orthogonalized AO basis
      Outputs = []
      Outputs.append(("--save-coreh", "INT1E"))
      Outputs.append(("--save-fint2e", "INT2E"))
      Outputs.append(("--save-overlap", "OVERLAP"))

      CoreH, Int2e, Overlap = self._InvokeBfint(Args, Outputs)

      nOrb = CoreH.shape[0]
      Int2e = Int2e.reshape((Int2e.shape[0], nOrb, nOrb))
      CoreEnergy = self.Atoms.fCoreRepulsion()

      if MakeS:
         return CoreEnergy, CoreH, Int2e, Overlap
      else:
         return CoreEnergy, CoreH, Int2e 
Example 15
Project: pyscf   Author: pyscf   File: wmme.py    License: Apache License 2.0 6 votes vote down vote up
def MakeOverlap(self, OrbBasis2=None):
      """calculate overlap within main orbital basis, and, optionally, between main
      orbital basis and a second basis, as described in OrbBasis2.
      Returns <1|1>, <1|2>, and <2|2> matrices."""
      Args = []
      Outputs = []
      Outputs.append(("--save-overlap", "OVERLAP_1"))
      if OrbBasis2 is not None:
         MoreBases = {'--basis-orb-2': OrbBasis2}
         Outputs.append(("--save-overlap-12", "OVERLAP_12"))
         Outputs.append(("--save-overlap-2", "OVERLAP_2"))
         return self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
      else:
         MoreBases = None
         Overlap, = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
         return Overlap 
Example 16
Project: pyscf   Author: pyscf   File: wmme.py    License: Apache License 2.0 6 votes vote down vote up
def MakeRaw2eIntegrals(self, Smh=True, Kernel2e="coulomb"):
      """compute Int2e_Frs (nFit x nOrb x nOrb) and fitting metric Int2e_FG (nFit x nFit),
      where the fitting metric is *not* absorbed into the 2e integrals."""

      # assemble arguments to integral generation program
      Args = []
      if Smh:
         Args.append("--orb-trafo=Smh")
         # ^- calculate integrals in symmetrically orthogonalized AO basis
      Args.append("--kernel2e='%s'" % Kernel2e)
      Args.append("--solve-fitting-eq=false")
      Outputs = []
      Outputs.append(("--save-fint2e", "INT2E_3IX"))
      Outputs.append(("--save-fitting-metric", "INT2E_METRIC"))

      Int2e_Frs, Int2e_FG = self._InvokeBfint(Args, Outputs)

      nOrb = int(Int2e_Frs.shape[1]**.5 + .5)
      assert(nOrb**2 == Int2e_Frs.shape[1])
      Int2e_Frs = Int2e_Frs.reshape((Int2e_Frs.shape[0], nOrb, nOrb))
      assert(Int2e_Frs.shape[0] == Int2e_FG.shape[0])
      assert(Int2e_FG.shape[0] == Int2e_FG.shape[1])
      return Int2e_FG, Int2e_Frs 
Example 17
Project: SEDST   Author: AuCson   File: unsup_model.py    License: MIT License 6 votes vote down vote up
def load_glove_embedding(self, freeze=False):
        initial_arr = self.m.u_encoder.embedding.weight.data.cpu().numpy()
        mat = get_glove_matrix(self.reader.vocab, initial_arr)
        # np.save('./data/embedding.npy',mat)
        # mat = np.load('./data/embedding.npy')
        embedding_arr = torch.from_numpy(mat)

        self.m.u_encoder.embedding.weight.data.copy_(embedding_arr)
        self.m.p_encoder.embedding.weight.data.copy_(embedding_arr)
        self.m.m_decoder.emb.weight.data.copy_(embedding_arr)
        self.m.p_decoder.emb.weight.data.copy_(embedding_arr)
        self.m.qz_decoder.mu.weight.data.copy_(embedding_arr.transpose(1, 0))
        self.m.pz_decoder.mu.weight.data.copy_(embedding_arr.transpose(1, 0))
        if freeze:
            self.freeze_module(self.m.u_encoder.embedding)
            self.freeze_module(self.m.m_e.embedding)
            self.freeze_module(self.m.m_decoder.emb) 
Example 18
Project: PathCon   Author: hwwang55   File: data_loader.py    License: MIT License 6 votes vote down vote up
def read_relations(file_name):
    bow = []
    count_vec = CountVectorizer()

    d = {}
    file = open(file_name)
    for line in file:
        index, name = line.strip().split('\t')
        d[name] = int(index)

        if args.feature_type == 'bow' and not os.path.exists('../data/' + args.dataset + '/bow.npy'):
            tokens = re.findall('[a-z]{2,}', name)
            bow.append(' '.join(tokens))
    file.close()

    if args.feature_type == 'bow' and not os.path.exists('../data/' + args.dataset + '/bow.npy'):
        bow = count_vec.fit_transform(bow)
        np.save('../data/' + args.dataset + '/bow.npy', bow.toarray())

    return d 
Example 19
Project: ConvolutionalEmotion   Author: Zebreu   File: emotionclassification.py    License: MIT License 6 votes vote down vote up
def getPeakFeatures():
    net = DecafNet()

    features = numpy.zeros((number_sequences,feature_length))
    labels = numpy.zeros((number_sequences,1))
    counter = 0
    # Maybe sort them
    for participant in os.listdir(os.path.join(data_dir,image_dir)):
        for sequence in os.listdir(os.path.join(data_dir,image_dir, participant)):
            if sequence != ".DS_Store":
                image_files = sorted(os.listdir(os.path.join(data_dir,image_dir, participant,sequence)))
                image_file = image_files[-1]
                print counter, image_file
                imarray = cv2.imread(os.path.join(data_dir,image_dir, participant,sequence,image_file))
                imarray = cv2.cvtColor(imarray,cv2.COLOR_BGR2GRAY)
                scores = net.classify(imarray, center_only=True)
                features[counter] = net.feature(feature_level)#.flatten()
                label_file = open(os.path.join(data_dir,label_dir, participant,sequence,image_file[:-4]+"_emotion.txt"))
                labels[counter] = eval(label_file.read())
                label_file.close()
                counter += 1

    numpy.save("featuresPeak5",features)
    numpy.save("labelsPeak5",labels) 
Example 20
Project: Keras-GAN   Author: eriklindernoren   File: data_loader.py    License: MIT License 6 votes vote down vote up
def setup_mnist(self, img_res):

        print ("Setting up MNIST...")

        if not os.path.exists('datasets/mnist_x.npy'):
            # Load the dataset
            (mnist_X, mnist_y), (_, _) = mnist.load_data()

            # Normalize and rescale images
            mnist_X = self.normalize(mnist_X)
            mnist_X = np.array([imresize(x, img_res) for x in mnist_X])
            mnist_X = np.expand_dims(mnist_X, axis=-1)
            mnist_X = np.repeat(mnist_X, 3, axis=-1)

            self.mnist_X, self.mnist_y = mnist_X, mnist_y

            # Save formatted images
            np.save('datasets/mnist_x.npy', self.mnist_X)
            np.save('datasets/mnist_y.npy', self.mnist_y)
        else:
            self.mnist_X = np.load('datasets/mnist_x.npy')
            self.mnist_y = np.load('datasets/mnist_y.npy')

        print ("+ Done.") 
Example 21
Project: sudoku   Author: Kyubyong   File: generate_sudoku.py    License: GNU General Public License v3.0 6 votes vote down vote up
def main(num):
    '''
    Generates `num` games of Sudoku.
    '''
    quizzes = np.zeros((num, 9, 9), np.int32)
    solutions = np.zeros((num, 9, 9), np.int32)
    for i in range(num):
        all_results, solution = run(n=23, iter=10)
        quiz = best(all_results)
        
        quizzes[i] = quiz
        solutions[i] = solution

        if (i+1) % 1000 == 0:
            print i+1
            np.save('data/sudoku.npz', quizzes=quizzes, solutions=solutions) 
Example 22
Project: insightface   Author: deepinsight   File: IJB_11.py    License: MIT License 6 votes vote down vote up
def verification(template_norm_feats = None, unique_templates = None, p1 = None, p2 = None):
    # ==========================================================
    #         Compute set-to-set Similarity Score.
    # ==========================================================
    template2id = np.zeros((max(unique_templates)+1,1),dtype=int)
    for count_template, uqt in enumerate(unique_templates):
        template2id[uqt] = count_template
    
    score = np.zeros((len(p1),))   # save cosine distance between pairs 

    total_pairs = np.array(range(len(p1)))
    batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation
    sublists = [total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)]
    total_sublists = len(sublists)
    for c, s in enumerate(sublists):
        feat1 = template_norm_feats[template2id[p1[s]]]
        feat2 = template_norm_feats[template2id[p2[s]]]
        similarity_score = np.sum(feat1 * feat2, -1)
        score[s] = similarity_score.flatten()
        if c % 10 == 0:
            print('Finish {}/{} pairs.'.format(c, total_sublists))
    return score

# In[ ]: 
Example 23
Project: EDeN   Author: fabriziocosta   File: util.py    License: MIT License 5 votes vote down vote up
def store_matrix(matrix='',
                 output_dir_path='',
                 out_file_name='',
                 output_format=''):
    """store_matrix."""
    if not os.path.exists(output_dir_path):
        os.mkdir(output_dir_path)
    full_out_file_name = os.path.join(output_dir_path, out_file_name)
    if output_format == "MatrixMarket":
        if len(matrix.shape) == 1:
            raise Exception(
                "'MatrixMarket' format supports only 2D dimensional array\
                and not vectors")
        else:
            io.mmwrite(full_out_file_name, matrix, precision=None)
    elif output_format == "numpy":
        np.save(full_out_file_name, matrix)
    elif output_format == "joblib":
        joblib.dump(matrix, full_out_file_name)
    elif output_format == "text":
        with open(full_out_file_name, "w") as f:
            if len(matrix.shape) == 1:
                for x in matrix:
                    f.write("%s\n" % (x))
            else:
                raise Exception(
                    "'text' format supports only mono dimensional array\
                    and not matrices")
    logger.info("Written file: %s" % full_out_file_name) 
Example 24
Project: DDPAE-video-prediction   Author: jthsieh   File: metrics.py    License: MIT License 5 votes vote down vote up
def get_scores(self):
    # Save positions
    if self.save_path != '':
      positions = np.array([np.concatenate(self.pred_positions, axis=0),
                            np.concatenate(self.gt_positions, axis=0)])
      np.save(os.path.join(self.save_path), positions)

    masks = np.concatenate(self.masks, axis=0)
    cosine = np.concatenate(self.cosine_similarities, axis=0)
    rel_error = np.concatenate(self.relative_errors, axis=0)

    numel = np.sum(masks == 1, axis=(0,2))
    rel_error = np.sum(rel_error * masks, axis=(0,2)) / numel
    cosine = np.sum(cosine * masks, axis=(0,2)) / numel
    return {'relative_errors': rel_error, 'cosine_similarities': cosine} 
Example 25
Project: gated-graph-transformer-network   Author: hexahedria   File: convert_story.py    License: MIT License 5 votes vote down vote up
def main(storyfile, outputdir):
    
    with gzip.open(storyfile,'rb') as f:
        story, sents, query, ans = pickle.load(f)

    with open(os.path.join(outputdir,'story.txt'),'w') as f:
        f.write("{}\n{}\n{}".format("\n".join(" ".join(s) for s in sents), " ".join(query), " ".join(ans)))

    results = convert(story)
    if not os.path.exists(outputdir):
        os.makedirs(outputdir)
    for i,res in enumerate(results):
        np.save(os.path.join(outputdir,'result_{}.npy'.format(i)), res) 
Example 26
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    License: MIT License 5 votes vote down vote up
def add_labels(self, labels):
        if self.print_progress:
            print('%-40s\r' % 'Saving labels...', end='', flush=True)
        assert labels.shape[0] == self.cur_images
        with open(self.tfr_prefix + '-rxx.labels', 'wb') as f:
            np.save(f, labels.astype(np.float32)) 
Example 27
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    License: MIT License 5 votes vote down vote up
def extract(tfrecord_dir, output_dir):
    print('Loading dataset "%s"' % tfrecord_dir)
    tfutil.init_tf({'gpu_options.allow_growth': True})
    dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size=0, repeat=False, shuffle_mb=0)
    tfutil.init_uninited_vars()
    
    print('Extracting images to "%s"' % output_dir)
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    idx = 0
    while True:
        if idx % 10 == 0:
            print('%d\r' % idx, end='', flush=True)
        try:
            images, labels = dset.get_minibatch_np(1)
        except tf.errors.OutOfRangeError:
            break
        if images.shape[1] == 1:
            img = PIL.Image.fromarray(images[0][0], 'L')
        else:
            img = PIL.Image.fromarray(images[0].transpose(1, 2, 0), 'RGB')
        img.save(os.path.join(output_dir, 'img%08d.png' % idx))
        idx += 1
    print('Extracted %d images.' % idx)

#---------------------------------------------------------------------------- 
Example 28
Project: Deep_VoiceChanger   Author: pstuvwx   File: dataset.py    License: MIT License 5 votes vote down vote up
def save(path, bps, data):
    if data.dtype != np.int16:
        data = data.astype(np.int16)
    data = np.reshape(data, -1)
    wav.write(path, bps, data) 
Example 29
Project: Deep_VoiceChanger   Author: pstuvwx   File: dataset.py    License: MIT License 5 votes vote down vote up
def pre_encode():
    import tqdm

    path = input('enter wave path...')
    ds = WaveDataset(path, -1, True)
    num = ds.max // dif

    imgs = [ds.get_example(i) for i in tqdm.tqdm(range(num))]    
    dst = np.concatenate(imgs, axis=1)
    print(dst.shape)

    np.save(path[:-3]+'npy', dst)
    print('encoded file saved at', path[:-3]+'npy') 
Example 30
Project: deep-learning-note   Author: wdxtub   File: preprocessing.py    License: MIT License 5 votes vote down vote up
def main():
    start = datetime.datetime.now()
    print("start", start)
    with tf.Session() as sess:
        processed_data = create_image_lists(sess, TEST_PERCENTAGE, VALIDATION_PERCENTAGE)
        # 通过 numpy 格式保存处理后的数据
        np.save(OUTPUT_FILE, processed_data)
    end = datetime.datetime.now()
    print("done", end)