Python numpy.save() Examples

The following are 30 code examples of numpy.save(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: PP.py    From pytorch-mri-segmentation-3D with MIT License 10 votes vote down vote up
def generateImgSlicesFolder(data_folder = '../Data/MS2017a/scans/'):
	scan_folders = glob.glob(data_folder + '*')

	for sf in scan_folders:
		slice_dir_path = os.path.join(sf, 'slices/')
		if not os.path.exists(slice_dir_path):
			print('Creating directory at:' , slice_dir_path)
			os.makedirs(slice_dir_path)

		img = nib.load(os.path.join(sf, 'pre/FLAIR.nii.gz'))
		img_np = img.get_data()
		img_affine = img.affine
		print(sf)
		print('The img shape', img_np.shape[2])
		for i in range(img_np.shape[2]):
			slice_img_np = img_np[:,:,i]
			nft_img = nib.Nifti1Image(slice_img_np, img_affine)
			nib.save(nft_img, slice_dir_path + 'FLAIR_' + str(i) + '.nii.gz')

			if os.path.basename(sf) == '0':
				slice_img = nib.load(slice_dir_path + 'FLAIR_' + str(i) + '.nii.gz').get_data() / 5
				print('DID I GET HERE?')
				print('Writing to', str(i) + '.jpg') 
Example #2
Source File: encoding_images.py    From face-attendance-machine with Apache License 2.0 8 votes vote down vote up
def encoding_images(path):
    """
    对path路径下的子文件夹中的图片进行编码,
    TODO:
        对人脸数据进行历史库中的人脸向量进行欧式距离的比较,当距离小于某个阈值的时候提醒:
        如果相似的是本人,则跳过该条记录,并提醒已经存在,否则警告人脸过度相似问题,
    :param path:
    :return:
    """
    with open(name_and_encoding, 'w') as f:
        subdirs = [os.path.join(path, x) for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))]
        for subdir in subdirs:
            print('process image name :', subdir)
            person_image_encoding = []
            for y in os.listdir(subdir):
                print("image name is ", y)
                _image = face_recognition.load_image_file(os.path.join(subdir, y))
                face_encodings = face_recognition.face_encodings(_image)
                name = os.path.split(subdir)[-1]
                if face_encodings and len(face_encodings) == 1:
                    if len(person_image_encoding) == 0:
                        person_image_encoding.append(face_encodings[0])
                        known_face_names.append(name)
                        continue
                    for i in range(len(person_image_encoding)):
                        distances = face_recognition.compare_faces(person_image_encoding, face_encodings[0], tolerance=image_thread)
                        if False in distances:
                            person_image_encoding.append(face_encodings[0])
                            known_face_names.append(name)
                            print(name, " new feature")
                            f.write(name + ":" + str(face_encodings[0]) + "\n")
                            break
                    # face_encoding = face_recognition.face_encodings(_image)[0]
                    # face_recognition.compare_faces()
            known_face_encodings.extend(person_image_encoding)
            bb = np.array(known_face_encodings)
            print("--------")
    np.save(KNOWN_FACE_ENCODINGS, known_face_encodings)
    np.save(KNOWN_FACE_NANE, known_face_names) 
Example #3
Source File: PP.py    From pytorch-mri-segmentation-3D with MIT License 7 votes vote down vote up
def generateGTSlicesFolder(data_folder = '../Data/MS2017a/scans/'):
	scan_folders = glob.glob(data_folder + '*')

	for sf in scan_folders:
		slice_dir_path = os.path.join(sf, 'gt_slices/')
		if not os.path.exists(slice_dir_path):
			print('Creating directory at:' , slice_dir_path)
			os.makedirs(slice_dir_path)

		img = nib.load(os.path.join(sf, 'wmh.nii.gz'))
		img_np = img.get_data()
		img_affine = img.affine
		print(sf)
		print('The img shape', img_np.shape[2])
		for i in range(img_np.shape[2]):
			slice_img_np = img_np[:,:,i]
			nft_img = nib.Nifti1Image(slice_img_np, img_affine)
			nib.save(nft_img, slice_dir_path + 'wmh_' + str(i) + '.nii.gz')

			if os.path.basename(sf) == '0':
				slice_img = nib.load(slice_dir_path + 'wmh_' + str(i) + '.nii.gz').get_data() * 256
				#cv2.imwrite('temp/' + str(i) + '.jpg', slice_img) 
Example #4
Source File: cache.py    From vergeml with MIT License 6 votes vote down vote up
def _serialize_data(self, data):

        # Default to raw bytes
        type_ = _BYTES

        if isinstance(data, np.ndarray):
        # When the data is a numpy array, use the more compact native
        # numpy format.
            buf = io.BytesIO()
            np.save(buf, data)
            data = buf.getvalue()
            type_ = _NUMPY

        elif not isinstance(data, (bytearray, bytes)):
        # Everything else except byte data is serialized in pickle format.
            data = pickle.dumps(data)
            type_ = _PICKLE

        if self.compress:
        # Optional compression
            data = lz4.frame.compress(data)

        return type_, data 
Example #5
Source File: predict_folds.py    From argus-freesound with MIT License 6 votes vote down vote up
def pred_test_fold(predictor, fold, test_data):
    fold_prediction_dir = PREDICTION_DIR / f'fold_{fold}' / 'test'
    fold_prediction_dir.mkdir(parents=True, exist_ok=True)

    fname_lst, images_lst = test_data
    pred_lst = []
    for fname, image in zip(fname_lst, images_lst):
        pred = predictor.predict(image)

        pred_path = fold_prediction_dir / f'{fname}.npy'
        np.save(pred_path, pred)

        pred = pred.mean(axis=0)
        pred_lst.append(pred)

    preds = np.stack(pred_lst, axis=0)
    subm_df = pd.DataFrame(data=preds,
                           index=fname_lst,
                           columns=config.classes)
    subm_df.index.name = 'fname'
    subm_df.to_csv(fold_prediction_dir / 'probs.csv') 
Example #6
Source File: calculate_weights.py    From overhaul-distillation with MIT License 6 votes vote down vote up
def calculate_weigths_labels(dataset, dataloader, num_classes):
    # Create an instance from the data loader
    z = np.zeros((num_classes,))
    # Initialize tqdm
    tqdm_batch = tqdm(dataloader)
    print('Calculating classes weights')
    for sample in tqdm_batch:
        y = sample['label']
        y = y.detach().cpu().numpy()
        mask = (y >= 0) & (y < num_classes)
        labels = y[mask].astype(np.uint8)
        count_l = np.bincount(labels, minlength=num_classes)
        z += count_l
    tqdm_batch.close()
    total_frequency = np.sum(z)
    class_weights = []
    for frequency in z:
        class_weight = 1 / (np.log(1.02 + (frequency / total_frequency)))
        class_weights.append(class_weight)
    ret = np.array(class_weights)
    classes_weights_path = os.path.join(Path.db_root_dir(dataset), dataset+'_classes_weights.npy')
    np.save(classes_weights_path, ret)

    return ret 
Example #7
Source File: input.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def extract_mnist_data(filename, num_images, image_size, pixel_depth):
  """
  Extract the images into a 4D tensor [image index, y, x, channels].

  Values are rescaled from [0, 255] down to [-0.5, 0.5].
  """
  # if not os.path.exists(file):
  if not tf.gfile.Exists(filename+".npy"):
    with gzip.open(filename) as bytestream:
      bytestream.read(16)
      buf = bytestream.read(image_size * image_size * num_images)
      data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
      data = (data - (pixel_depth / 2.0)) / pixel_depth
      data = data.reshape(num_images, image_size, image_size, 1)
      np.save(filename, data)
      return data
  else:
    with tf.gfile.Open(filename+".npy", mode='r') as file_obj:
      return np.load(file_obj) 
Example #8
Source File: wmme.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def MakeBaseIntegrals(self, Smh=True, MakeS=False):
      """Invoke bfint to calculate CoreEnergy (scalar), CoreH (nOrb x nOrb),
      Int2e_Frs (nFit x nOrb x nOrb), and overlap matrix (nOrb x nOrb)"""

      # assemble arguments to integral generation program
      Args = []
      if Smh:
         Args.append("--orb-trafo=Smh")
         # ^- calculate integrals in symmetrically orthogonalized AO basis
      Outputs = []
      Outputs.append(("--save-coreh", "INT1E"))
      Outputs.append(("--save-fint2e", "INT2E"))
      Outputs.append(("--save-overlap", "OVERLAP"))

      CoreH, Int2e, Overlap = self._InvokeBfint(Args, Outputs)

      nOrb = CoreH.shape[0]
      Int2e = Int2e.reshape((Int2e.shape[0], nOrb, nOrb))
      CoreEnergy = self.Atoms.fCoreRepulsion()

      if MakeS:
         return CoreEnergy, CoreH, Int2e, Overlap
      else:
         return CoreEnergy, CoreH, Int2e 
Example #9
Source File: wmme.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def MakeOverlap(self, OrbBasis2=None):
      """calculate overlap within main orbital basis, and, optionally, between main
      orbital basis and a second basis, as described in OrbBasis2.
      Returns <1|1>, <1|2>, and <2|2> matrices."""
      Args = []
      Outputs = []
      Outputs.append(("--save-overlap", "OVERLAP_1"))
      if OrbBasis2 is not None:
         MoreBases = {'--basis-orb-2': OrbBasis2}
         Outputs.append(("--save-overlap-12", "OVERLAP_12"))
         Outputs.append(("--save-overlap-2", "OVERLAP_2"))
         return self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
      else:
         MoreBases = None
         Overlap, = self._InvokeBfint(Args, Outputs, MoreBases=MoreBases)
         return Overlap 
Example #10
Source File: data.py    From UROP-Adversarial-Feature-Matching-for-Text-Generation with GNU Affero General Public License v3.0 6 votes vote down vote up
def next_batch(self, whichSet='train'):
		if whichSet == 'train':
			self.trainBatchCnt += 1
			assert self.trainBatchCnt < self.trainMaxBatch
			return self.train[self.trainBatchCnt * self.batch_size: (self.trainBatchCnt + 1) * self.batch_size]
		elif whichSet == 'validation':
			self.validationBatchCnt += 1
			assert self.validationBatchCnt < self.validationMaxBatch
			return self.validation[self.validationBatchCnt * self.batch_size: (self.validationBatchCnt + 1) * self.batch_size]
		elif whichSet == 'test':
			self.testBatchCnt += 1
			assert self.testBatchCnt < self.testMaxBatch
			return self.test[self.testBatchCnt * self.batch_size: (self.testBatchCnt + 1) * self.batch_size]
		else:
			msg = 'Wrong set name!\n'+ \
				  'Should be train / validation / test.'
			raise Exception(msg)
	# Following code copied here:
	# https://stackoverflow.com/questions/17219481/save-to-file-and-load-an-instance-of-a-python-class-with-its-attributes 
Example #11
Source File: PP.py    From pytorch-mri-segmentation-3D with MIT License 6 votes vote down vote up
def extractMeanDataStats(size = [200, 200, 100], 
						postfix = '_200x200x100orig', 
						main_folder_path = '../../Data/MS2017b/', 
						):
	scan_folders = glob.glob(main_folder_path + 'scans/*')
	img_path = 'pre/FLAIR' + postfix + '.nii.gz'
	segm_path = 'wmh' + postfix + '.nii.gz'
	
	shape_ = [len(scan_folders), size[0], size[1], size[2]]
	arr = np.zeros(shape_)

	for i, sf in enumerate(scan_folders):
		arr[i, :,:,:] =  numpyFromScan(os.path.join(sf,img_path)).squeeze()

	arr /= len(scan_folders)

	means = np.mean(arr)
	stds = np.std(arr, axis = 0)

	np.save(main_folder_path + 'extra_data/std' + postfix, stds)
	np.save(main_folder_path + 'extra_data/mean' + postfix, means) 
Example #12
Source File: wmme.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def MakeRaw2eIntegrals(self, Smh=True, Kernel2e="coulomb"):
      """compute Int2e_Frs (nFit x nOrb x nOrb) and fitting metric Int2e_FG (nFit x nFit),
      where the fitting metric is *not* absorbed into the 2e integrals."""

      # assemble arguments to integral generation program
      Args = []
      if Smh:
         Args.append("--orb-trafo=Smh")
         # ^- calculate integrals in symmetrically orthogonalized AO basis
      Args.append("--kernel2e='%s'" % Kernel2e)
      Args.append("--solve-fitting-eq=false")
      Outputs = []
      Outputs.append(("--save-fint2e", "INT2E_3IX"))
      Outputs.append(("--save-fitting-metric", "INT2E_METRIC"))

      Int2e_Frs, Int2e_FG = self._InvokeBfint(Args, Outputs)

      nOrb = int(Int2e_Frs.shape[1]**.5 + .5)
      assert(nOrb**2 == Int2e_Frs.shape[1])
      Int2e_Frs = Int2e_Frs.reshape((Int2e_Frs.shape[0], nOrb, nOrb))
      assert(Int2e_Frs.shape[0] == Int2e_FG.shape[0])
      assert(Int2e_FG.shape[0] == Int2e_FG.shape[1])
      return Int2e_FG, Int2e_Frs 
Example #13
Source File: unsup_model.py    From SEDST with MIT License 6 votes vote down vote up
def load_glove_embedding(self, freeze=False):
        initial_arr = self.m.u_encoder.embedding.weight.data.cpu().numpy()
        mat = get_glove_matrix(self.reader.vocab, initial_arr)
        # np.save('./data/embedding.npy',mat)
        # mat = np.load('./data/embedding.npy')
        embedding_arr = torch.from_numpy(mat)

        self.m.u_encoder.embedding.weight.data.copy_(embedding_arr)
        self.m.p_encoder.embedding.weight.data.copy_(embedding_arr)
        self.m.m_decoder.emb.weight.data.copy_(embedding_arr)
        self.m.p_decoder.emb.weight.data.copy_(embedding_arr)
        self.m.qz_decoder.mu.weight.data.copy_(embedding_arr.transpose(1, 0))
        self.m.pz_decoder.mu.weight.data.copy_(embedding_arr.transpose(1, 0))
        if freeze:
            self.freeze_module(self.m.u_encoder.embedding)
            self.freeze_module(self.m.m_e.embedding)
            self.freeze_module(self.m.m_decoder.emb) 
Example #14
Source File: data_loader.py    From PathCon with MIT License 6 votes vote down vote up
def read_relations(file_name):
    bow = []
    count_vec = CountVectorizer()

    d = {}
    file = open(file_name)
    for line in file:
        index, name = line.strip().split('\t')
        d[name] = int(index)

        if args.feature_type == 'bow' and not os.path.exists('../data/' + args.dataset + '/bow.npy'):
            tokens = re.findall('[a-z]{2,}', name)
            bow.append(' '.join(tokens))
    file.close()

    if args.feature_type == 'bow' and not os.path.exists('../data/' + args.dataset + '/bow.npy'):
        bow = count_vec.fit_transform(bow)
        np.save('../data/' + args.dataset + '/bow.npy', bow.toarray())

    return d 
Example #15
Source File: json_serializers.py    From dustmaps with GNU General Public License v2.0 6 votes vote down vote up
def deserialize_ndarray_npy(d):
    """
    Deserializes a JSONified :obj:`numpy.ndarray` that was created using numpy's
    :obj:`save` function.

    Args:
        d (:obj:`dict`): A dictionary representation of an :obj:`ndarray` object, created
            using :obj:`numpy.save`.

    Returns:
        An :obj:`ndarray` object.
    """
    with io.BytesIO() as f:
        f.write(json.loads(d['npy']).encode('latin-1'))
        f.seek(0)
        return np.load(f) 
Example #16
Source File: json_serializers.py    From dustmaps with GNU General Public License v2.0 6 votes vote down vote up
def serialize_ndarray_npy(o):
    """
    Serializes a :obj:`numpy.ndarray` using numpy's built-in :obj:`save` function.
    This produces totally unreadable (and very un-JSON-like) results (in "npy"
    format), but it's basically guaranteed to work in 100% of cases.

    Args:
        o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized.

    Returns:
        A dictionary that can be passed to :obj:`json.dumps`.
    """
    with io.BytesIO() as f:
        np.save(f, o)
        f.seek(0)
        serialized = json.dumps(f.read().decode('latin-1'))
    return dict(
        _type='np.ndarray',
        npy=serialized) 
Example #17
Source File: emotionclassification.py    From ConvolutionalEmotion with MIT License 6 votes vote down vote up
def getPeakFeatures():
    net = DecafNet()

    features = numpy.zeros((number_sequences,feature_length))
    labels = numpy.zeros((number_sequences,1))
    counter = 0
    # Maybe sort them
    for participant in os.listdir(os.path.join(data_dir,image_dir)):
        for sequence in os.listdir(os.path.join(data_dir,image_dir, participant)):
            if sequence != ".DS_Store":
                image_files = sorted(os.listdir(os.path.join(data_dir,image_dir, participant,sequence)))
                image_file = image_files[-1]
                print counter, image_file
                imarray = cv2.imread(os.path.join(data_dir,image_dir, participant,sequence,image_file))
                imarray = cv2.cvtColor(imarray,cv2.COLOR_BGR2GRAY)
                scores = net.classify(imarray, center_only=True)
                features[counter] = net.feature(feature_level)#.flatten()
                label_file = open(os.path.join(data_dir,label_dir, participant,sequence,image_file[:-4]+"_emotion.txt"))
                labels[counter] = eval(label_file.read())
                label_file.close()
                counter += 1

    numpy.save("featuresPeak5",features)
    numpy.save("labelsPeak5",labels) 
Example #18
Source File: data_loader.py    From Keras-GAN with MIT License 6 votes vote down vote up
def setup_mnist(self, img_res):

        print ("Setting up MNIST...")

        if not os.path.exists('datasets/mnist_x.npy'):
            # Load the dataset
            (mnist_X, mnist_y), (_, _) = mnist.load_data()

            # Normalize and rescale images
            mnist_X = self.normalize(mnist_X)
            mnist_X = np.array([imresize(x, img_res) for x in mnist_X])
            mnist_X = np.expand_dims(mnist_X, axis=-1)
            mnist_X = np.repeat(mnist_X, 3, axis=-1)

            self.mnist_X, self.mnist_y = mnist_X, mnist_y

            # Save formatted images
            np.save('datasets/mnist_x.npy', self.mnist_X)
            np.save('datasets/mnist_y.npy', self.mnist_y)
        else:
            self.mnist_X = np.load('datasets/mnist_x.npy')
            self.mnist_y = np.load('datasets/mnist_y.npy')

        print ("+ Done.") 
Example #19
Source File: PP.py    From pytorch-mri-segmentation-3D with MIT License 6 votes vote down vote up
def generateTrainValFile_Slices(train_fraction, main_folder = '../Data/MS2017a/'):
	train_folders, val_folders = splitTrainVal_Slices(0.8)

	train_folder_names = [train_folders[i].split(main_folder)[1] for i in range(len(train_folders))]
	val_folder_names = [val_folders[i].split(main_folder)[1] for i in range(len(val_folders))]

	f_train = open(main_folder + 'train_slices.txt', 'w+')
	f_val = open(main_folder + 'val_slices.txt', 'w+')

	for fn in train_folder_names:
		f_train.write(fn + '\n')

	for fn in val_folder_names:
		f_val.write(fn + '\n')

	f_train.close()
	f_val.close()

#Use this to save the images quickly (for testing purposes) 
Example #20
Source File: ggtnn_train.py    From gated-graph-transformer-network with MIT License 6 votes vote down vote up
def visualize(m, story_buckets, wordlist, answerlist, output_format, outputdir, batch_size=1, seq_len=5, debugmode=False, snap=False):
    cur_bucket = random.choice(story_buckets)
    sampled_batch = sample_batch(cur_bucket, batch_size, len(answerlist), output_format)
    part_sampled_batch = sampled_batch[:3]
    with open(os.path.join(outputdir,'stories.txt'),'w') as f:
        ggtnn_graph_parse.print_batch(part_sampled_batch, wordlist, answerlist, file=f)
    with open(os.path.join(outputdir,'answer_list.txt'),'w') as f:
        f.write('\n'.join(answerlist) + '\n')
    if debugmode:
        args = sampled_batch
        fn = m.debug_test_fn
    else:
        args = part_sampled_batch[:2] + ((seq_len,) if output_format == model.ModelOutputFormat.sequence else ())
        fn = m.snap_test_fn if snap else m.fuzzy_test_fn
    results = fn(*args)
    for i,result in enumerate(results):
        np.save(os.path.join(outputdir,'result_{}.npy'.format(i)), result) 
Example #21
Source File: generate_sudoku.py    From sudoku with GNU General Public License v3.0 6 votes vote down vote up
def main(num):
    '''
    Generates `num` games of Sudoku.
    '''
    quizzes = np.zeros((num, 9, 9), np.int32)
    solutions = np.zeros((num, 9, 9), np.int32)
    for i in range(num):
        all_results, solution = run(n=23, iter=10)
        quiz = best(all_results)
        
        quizzes[i] = quiz
        solutions[i] = solution

        if (i+1) % 1000 == 0:
            print i+1
            np.save('data/sudoku.npz', quizzes=quizzes, solutions=solutions) 
Example #22
Source File: IJB_11.py    From insightface with MIT License 6 votes vote down vote up
def verification(template_norm_feats = None, unique_templates = None, p1 = None, p2 = None):
    # ==========================================================
    #         Compute set-to-set Similarity Score.
    # ==========================================================
    template2id = np.zeros((max(unique_templates)+1,1),dtype=int)
    for count_template, uqt in enumerate(unique_templates):
        template2id[uqt] = count_template
    
    score = np.zeros((len(p1),))   # save cosine distance between pairs 

    total_pairs = np.array(range(len(p1)))
    batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation
    sublists = [total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)]
    total_sublists = len(sublists)
    for c, s in enumerate(sublists):
        feat1 = template_norm_feats[template2id[p1[s]]]
        feat2 = template_norm_feats[template2id[p2[s]]]
        similarity_score = np.sum(feat1 * feat2, -1)
        score[s] = similarity_score.flatten()
        if c % 10 == 0:
            print('Finish {}/{} pairs.'.format(c, total_sublists))
    return score

# In[ ]: 
Example #23
Source File: pack_dataset.py    From medicaldetectiontoolkit with Apache License 2.0 5 votes vote down vote up
def convert_to_npy(npz_file):
    if not os.path.isfile(npz_file[:-3] + "npy"):
        a = np.load(npz_file)['data']
        np.save(npz_file[:-3] + "npy", a) 
Example #24
Source File: generate_toys.py    From medicaldetectiontoolkit with Apache License 2.0 5 votes vote down vote up
def multi_processing_create_image(inputs):


    out_dir, six, foreground_margin, class_diameters, mode = inputs
    print('processing {} {}'.format(out_dir, six))

    img = np.random.rand(320, 320)
    seg = np.zeros((320, 320)).astype('uint8')
    center_x = np.random.randint(foreground_margin, img.shape[0] - foreground_margin)
    center_y = np.random.randint(foreground_margin, img.shape[1] - foreground_margin)
    class_id = np.random.randint(0, 2)

    for y in range(img.shape[0]):
        for x in range(img.shape[0]):
            if ((x - center_x) ** 2 + (y - center_y) ** 2 - class_diameters[class_id] ** 2) < 0:
                img[y][x] += 0.2
                seg[y][x] = 1

    if 'donuts' in mode:
        whole_diameter = 4
        if class_id == 1:
            for y in range(img.shape[0]):
                for x in range(img.shape[0]):
                    if ((x - center_x) ** 2 + (y - center_y) ** 2 - whole_diameter ** 2) < 0:
                        img[y][x] -= 0.2
                        if mode == 'donuts_shape':
                            seg[y][x] = 0

    out = np.concatenate((img[None], seg[None]))
    out_path = os.path.join(out_dir, '{}.npy'.format(six))
    np.save(out_path, out)

    with open(os.path.join(out_dir, 'meta_info_{}.pickle'.format(six)), 'wb') as handle:
        pickle.dump([out_path, class_id, str(six)], handle) 
Example #25
Source File: data.py    From UROP-Adversarial-Feature-Matching-for-Text-Generation with GNU Affero General Public License v3.0 5 votes vote down vote up
def save(self, fileName):
		assert fileName is not None

		with open(self.paramSavePath + fileName, 'wb') as f:
			pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) 
Example #26
Source File: util.py    From EDeN with MIT License 5 votes vote down vote up
def store_matrix(matrix='',
                 output_dir_path='',
                 out_file_name='',
                 output_format=''):
    """store_matrix."""
    if not os.path.exists(output_dir_path):
        os.mkdir(output_dir_path)
    full_out_file_name = os.path.join(output_dir_path, out_file_name)
    if output_format == "MatrixMarket":
        if len(matrix.shape) == 1:
            raise Exception(
                "'MatrixMarket' format supports only 2D dimensional array\
                and not vectors")
        else:
            io.mmwrite(full_out_file_name, matrix, precision=None)
    elif output_format == "numpy":
        np.save(full_out_file_name, matrix)
    elif output_format == "joblib":
        joblib.dump(matrix, full_out_file_name)
    elif output_format == "text":
        with open(full_out_file_name, "w") as f:
            if len(matrix.shape) == 1:
                for x in matrix:
                    f.write("%s\n" % (x))
            else:
                raise Exception(
                    "'text' format supports only mono dimensional array\
                    and not matrices")
    logger.info("Written file: %s" % full_out_file_name) 
Example #27
Source File: demo.py    From RingNet with MIT License 5 votes vote down vote up
def main(config, template_mesh):
    sess = tf.Session()
    model = RingNet_inference(config, sess=sess)
    input_img, proc_param, img = preprocess_image(config.img_path)
    vertices, flame_parameters = model.predict(np.expand_dims(input_img, axis=0), get_parameters=True)
    cams = flame_parameters[0][:3]
    visualize(img, proc_param, vertices[0], cams, img_name=config.out_folder + '/images/' + config.img_path.split('/')[-1][:-4])

    if config.save_obj_file:
        if not os.path.exists(config.out_folder + '/mesh'):
            os.mkdir(config.out_folder + '/mesh')
        mesh = Mesh(v=vertices[0], f=template_mesh.f)
        mesh.write_obj(config.out_folder + '/mesh/' + config.img_path.split('/')[-1][:-4] + '.obj')

    if config.save_flame_parameters:
        if not os.path.exists(config.out_folder + '/params'):
            os.mkdir(config.out_folder + '/params')
        flame_parameters_ = {'cam':  flame_parameters[0][:3], 'pose': flame_parameters[0][3:3+config.pose_params], 'shape': flame_parameters[0][3+config.pose_params:3+config.pose_params+config.shape_params],
         'expression': flame_parameters[0][3+config.pose_params+config.shape_params:]}
        np.save(config.out_folder + '/params/' + config.img_path.split('/')[-1][:-4] + '.npy', flame_parameters_)

    if config.neutralize_expression:
        from util.using_flame_parameters import make_prdicted_mesh_neutral
        if not os.path.exists(config.out_folder + '/neutral_mesh'):
            os.mkdir(config.out_folder + '/neutral_mesh')
        neutral_mesh = make_prdicted_mesh_neutral(config.out_folder + '/params/' + config.img_path.split('/')[-1][:-4] + '.npy', config.flame_model_path)
        neutral_mesh.write_obj(config.out_folder + '/neutral_mesh/' + config.img_path.split('/')[-1][:-4] + '.obj') 
Example #28
Source File: nodclsgbt.py    From DeepLung with GNU General Public License v3.0 5 votes vote down vote up
def gbtfunc(dep):
	m = gbt(max_depth=dep, random_state=0)
	m.fit(traindata, trainlabel)
	predtrain = m.predict(traindata)
	predtest = m.predict_proba(testdata)
	# print predtest.shape, predtest[1,:]
	return np.sum(predtrain == trainlabel) / float(traindata.shape[0]), \
	    np.mean((predtest[:,1]>0.5).astype(int) == testlabel), predtest # / float(testdata.shape[0]),
# trainacc, testacc, predtest = gbtfunc(3)
# print trainacc, testacc
# np.save('pixradiustest.npy', predtest[:,1]) 
Example #29
Source File: dataloader_utils.py    From medicaldetectiontoolkit with Apache License 2.0 5 votes vote down vote up
def convert_to_npy(npz_file):
    identifier = os.path.split(npz_file)[1][:-4]
    if not os.path.isfile(npz_file[:-4] + ".npy"):
        a = np.load(npz_file)[identifier]
        np.save(npz_file[:-4] + ".npy", a) 
Example #30
Source File: emotionclassification.py    From ConvolutionalEmotion with MIT License 5 votes vote down vote up
def getPeakFaceFeatures():
    net = DecafNet()
    cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')

    features = numpy.zeros((number_sequences,feature_length))
    labels = numpy.zeros((number_sequences,1))
    counter = 0
    # Maybe sort them
    for participant in os.listdir(os.path.join(data_dir,image_dir)):
        for sequence in os.listdir(os.path.join(data_dir,image_dir, participant)):
            if sequence != ".DS_Store":
                image_files = sorted(os.listdir(os.path.join(data_dir,image_dir, participant,sequence)))
                image_file = image_files[-1]
                print counter, image_file
                imarray = cv2.imread(os.path.join(data_dir,image_dir, participant,sequence,image_file))
                imarray = cv2.cvtColor(imarray,cv2.COLOR_BGR2GRAY)
                rects = cascade.detectMultiScale(imarray, 1.3, 3, cv2.cv.CV_HAAR_SCALE_IMAGE, (150,150))
                if len(rects) > 0:
                    facerect=rects[0]
                    imarray = imarray[facerect[1]:facerect[1]+facerect[3], facerect[0]:facerect[0]+facerect[2]]
                scores = net.classify(imarray, center_only=True)
                features[counter] = net.feature(feature_level).flatten()
                label_file = open(os.path.join(data_dir,label_dir, participant,sequence,image_file[:-4]+"_emotion.txt"))
                labels[counter] = eval(label_file.read())
                label_file.close()
                counter += 1

    numpy.save("featuresPeakFace5",features)
    numpy.save("labelsPeakFace5",labels)