Python scipy.io.savemat() Examples

The following are 30 code examples of scipy.io.savemat(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.io , or try the search function .
Example #1
Source File: utils_sisr.py    From KAIR with MIT License 14 votes vote down vote up
def cal_pca_matrix(path='PCA_matrix.mat', ksize=15, l_max=12.0, dim_pca=15, num_samples=500):
    kernels = np.zeros([ksize*ksize, num_samples], dtype=np.float32)
    for i in range(num_samples):

        theta = np.pi*np.random.rand(1)
        l1    = 0.1+l_max*np.random.rand(1)
        l2    = 0.1+(l1-0.1)*np.random.rand(1)

        k = anisotropic_Gaussian(ksize=ksize, theta=theta[0], l1=l1[0], l2=l2[0])

        # util.imshow(k)

        kernels[:, i] = np.reshape(k, (-1), order="F")  # k.flatten(order='F')

    # io.savemat('k.mat', {'k': kernels})

    pca_matrix = get_pca_matrix(kernels, dim_pca=dim_pca)

    io.savemat(path, {'p': pca_matrix})

    return pca_matrix 
Example #2
Source File: make_data.py    From DCC with MIT License 8 votes vote down vote up
def make_mnist_data(path, isconv=False):
    X, Y = load_mnist(path, True)
    X = X.astype(np.float64)
    X2, Y2 = load_mnist(path, False)
    X2 = X2.astype(np.float64)
    X3 = np.concatenate((X, X2), axis=0)

    minmaxscale = MinMaxScaler().fit(X3)

    X = minmaxscale.transform(X)
    if isconv:
        X = X.reshape((-1, 1, 28, 28))

    sio.savemat(osp.join(path, 'traindata.mat'), {'X': X, 'Y': Y})

    X2 = minmaxscale.transform(X2)
    if isconv:
        X2 = X2.reshape((-1, 1, 28, 28))

    sio.savemat(osp.join(path, 'testdata.mat'), {'X': X2, 'Y': Y2}) 
Example #3
Source File: feature_distribution(t-sne).py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 6 votes vote down vote up
def get_feature():
    inputs = tf.placeholder("float", [None, 64, 64, 1])
    is_training = tf.placeholder("bool")
    _, feature = googlenet(inputs, is_training)
    feature = tf.squeeze(feature, [1, 2])
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    data = sio.loadmat("../data/dataset.mat")
    testdata = data["test"] / 127.5 - 1.0
    testlabels = data["testlabels"]
    saver.restore(sess, "../save_para/.\\model.ckpt")
    nums_test = testdata.shape[0]
    FEATURE = np.zeros([nums_test, 1024])
    for i in range(nums_test // BATCH_SIZE):
        FEATURE[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE] = sess.run(feature, feed_dict={inputs: testdata[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE], is_training: False})
    FEATURE[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:] = sess.run(feature, feed_dict={inputs: testdata[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:], is_training: False})
    sio.savemat("../data/feature.mat", {"feature": FEATURE, "testlabels": testlabels}) 
Example #4
Source File: bench_structarr.py    From Computable with MIT License 6 votes vote down vote up
def bench_run():
    str_io = BytesIO()
    print()
    print('Read / writing matlab structs')
    print('='*60)
    print(' write |  read |   vars | fields | structs | compressed')
    print('-'*60)
    print()
    for n_vars, n_fields, n_structs in (
        (10, 10, 20), (20, 20, 40), (30, 30, 50)):
        var_dict = make_structarr(n_vars, n_fields, n_structs)
        for compression in (False, True):
            str_io = BytesIO()
            write_time = measure('sio.savemat(str_io, var_dict, do_compression=%r)' % compression)
            read_time = measure('sio.loadmat(str_io)')
            print('%.5f | %.5f | %5d | %5d | %5d | %r' % (
                write_time,
                read_time,
                n_vars,
                n_fields,
                n_structs,
                compression)) 
Example #5
Source File: createAccount.py    From Iris-Recognition with MIT License 6 votes vote down vote up
def createAccount(template, mask, name, exinfo):
	'''
	Description:
		Create an account in database based on extracted feature, and some
		extra information from the enroller.

	Input:
		template 	- Extracted template from the iris image
		mask		- Extracted mask from the iris image
		name		- Name of the enroller
		exinfo		- Extra information of the enroller
	'''
	# Get file name for the account
	files = []
	for file in os.listdir(temp_database_path):
	    if file.endswith(".mat"):
	        files.append(file)
	filename = str(len(files) + 1)

	# Save the file
	sio.savemat(temp_database_path + filename + '.mat',	\
		mdict={'template':template, 'mask':mask,\
		'name':name, 'exinfo':exinfo}) 
Example #6
Source File: load_and_predict.py    From DeepHash with MIT License 6 votes vote down vote up
def precision_recall(params):
    database_code = np.array(params['database_code'])
    validation_code = np.array(params['validation_code'])
    database_labels = np.array(params['database_labels'])
    validation_labels = np.array(params['validation_labels'])
    database_code = np.sign(database_code)
    validation_code = np.sign(validation_code)
    database_labels.astype(np.int)
    validation_labels.astype(np.int)

    sim = np.dot(database_code, validation_code.T)
    ids = np.argsort(-sim, axis=0)
    ones = np.ones((ids.shape[0], ids.shape[1]), dtype=np.int)
    print(np.min(ids))
    ids = ids + ones
    print(np.min(ids))
    mat_ids = dict(
        ids=ids,
        LBase=database_labels,
        LTest=validation_labels
    )
    scio.savemat('./data/data.mat', mat_ids) 
Example #7
Source File: stitch.py    From SegMitos_mitosis_detection with MIT License 6 votes vote down vote up
def stitchPatch(root_folder, dir1, imgname, featfolder, savefolder):  
    # stitch the features of patches to feature of full image
	name = os.path.join(dir1, imgname)
	print 'name:%s\n' %(name)
	Im = os.path.join(featfolder, name[0:-4])
	I = [None]*16
	for i in range(9):
		dict1 = sio.loadmat(Im+'_0'+str(i+1)+'.mat')
		I[i] = dict1['feat']
	for i in range(9,16):
		dict2 = sio.loadmat(Im+'_'+str(i+1)+'.mat')
		I[i] = dict2['feat']
	A = np.zeros((4*500,4*500))
	for row in range(4):
		for col in range(4):
			A[row*500:(row+1)*500,col*500:(col+1)*500] = I[row*4+col]
	sio.savemat(savefolder+name[0:-4], {'A':np.mat(A)}) 
Example #8
Source File: meshView.py    From laplacian-meshes with GNU General Public License v3.0 6 votes vote down vote up
def doLaplacianSolveWithConstraints(self, evt):
        anchorWeights = 1e8
        anchors = np.zeros((len(self.laplacianConstraints), 3))
        i = 0
        anchorsIdx = []
        for anchor in self.laplacianConstraints:
            anchorsIdx.append(anchor)
            anchors[i, :] = self.laplacianConstraints[anchor]
            i += 1
        
        #IGL Cotangent weights
        (L, M_inv, solver, deltaCoords) = makeLaplacianMatrixSolverIGLSoft(self.mesh.VPos, self.mesh.ITris, anchorsIdx, anchorWeights)
        self.mesh.VPos = solveLaplacianMatrixIGLSoft(solver, L, M_inv, deltaCoords, anchorsIdx, anchors, anchorWeights)
        
#        #My umbrella weights
#        L = makeLaplacianMatrixUmbrellaWeights(self.mesh.VPos, self.mesh.ITris, anchorsIdx, anchorWeights)
#        deltaCoords = L.dot(self.mesh.VPos)[0:self.mesh.VPos.shape[0], :]
#        self.mesh.VPos = np.array(solveLaplacianMatrix(L, deltaCoords, anchors, anchorWeights), dtype=np.float32)
        
        sio.savemat("anchors.mat", {'deltaCoords':deltaCoords, 'anchors':anchors, 'anchorsIdx':np.array(anchorsIdx)})
        self.mesh.needsDisplayUpdate = True
        self.mesh.updateIndexDisplayList()
        self.Refresh() 
Example #9
Source File: cnn_use.py    From bonnet with GNU General Public License v3.0 6 votes vote down vote up
def predict_probs(img, net, FLAGS, DATA):
  # open image
  cvim = cv2.imread(img, cv2.IMREAD_UNCHANGED)
  if cvim is None:
    print("No image to open for ", img)
    return
  # predict mask from image
  start = time.time()
  probs = net.predict(cvim, path=FLAGS.path + '/' +
                      FLAGS.model, verbose=FLAGS.verbose, as_probs=True)
  print("Prediction for img ", img, ". Elapsed: ", time.time() - start, "s")

  # save to matlab matrix
  matname = FLAGS.log + "/" + \
      os.path.splitext(os.path.basename(img))[0] + ".mat"
  sio.savemat(matname, {'p': probs})

  return 
Example #10
Source File: multiclass_ResNet.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
    weight_name = model_name[0]
    # model_json = model_name[1] + '.json'
    # model_all = model_name[0] + '.hdf5'
    _, sPath = os.path.splitdrive(sOutPath)
    sPath, sFilename = os.path.split(sOutPath)
    # sFilename, sExt = os.path.splitext(sFilename)

    # f = h5py.File(weight_name, 'r+')
    # del f['optimizer_weights']
    # f.close()
    model = load_model(weight_name)
    opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [
        ModelCheckpoint(sOutPath + os.sep + 'checkpoints' + os.sep + 'checker.hdf5', monitor='val_acc', verbose=0,
                        period=1, save_best_only=True), EarlyStopping(monitor='val_loss', patience=10, verbose=1)]

    # model.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
    # model.load_weights(weight_name)
    model.summary()

    score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize)
    prob_pre = model.predict(X_test, batchSize, 0)

    y_pred = np.argmax(prob_pre, axis=1)
    y_test = np.argmax(y_test, axis=1)
    confusion_mat = confusion_matrix(y_test, y_pred)
    # modelSave = model_name[:-5] + '_pred.mat'
    modelSave = sOutPath + '/' + sFilename + '_result.mat'
    sio.savemat(modelSave,
                {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test, 'confusion_mat': confusion_mat}) 
Example #11
Source File: multiclass_ResNet-50.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X,y,  sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example #12
Source File: multiclass_SE-ResNet-44_dense.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X,y,  sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example #13
Source File: multiclass_DenseNet-BC-100.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X,y,  sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})



###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example #14
Source File: multiclass_DenseNet-34.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X,y,  sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example #15
Source File: multiclass_SE-ResNet-32.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example #16
Source File: multiclass_SE-DenseNet-34.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example #17
Source File: VNetArt.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input: X: Samples to predict on. The shape of X should fit to the input shape of the model y: Labels for the
    Samples. Number of Samples should be equal to the number of samples in X sModelPath: (String) full path to a
    trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same
    directory! sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy
    stored. The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the
    suffix '_pred.mat' batchSize: Batchsize, number of samples that are processed at once """
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example #18
Source File: MNetArt.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input:
        X: Samples to predict on. The shape of X should fit to the input shape of the model
        y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
        sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
        sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                    The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
        batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example #19
Source File: multiclass_ResNet-56.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X,y,  sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
        Input:
            X: Samples to predict on. The shape of X should fit to the input shape of the model
            y: Labels for the Samples. Number of Samples should be equal to the number of samples in X
            sModelPath: (String) full path to a trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same directory!
            sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy stored.
                        The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the suffix '_pred.mat'
            batchSize: Batchsize, number of samples that are processed at once"""
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example #20
Source File: CNN3D.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input: X: Samples to predict on. The shape of X should fit to the input shape of the model y: Labels for the
    Samples. Number of Samples should be equal to the number of samples in X sModelPath: (String) full path to a
    trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same
    directory! sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy
    stored. The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the
    suffix '_pred.mat' batchSize: Batchsize, number of samples that are processed at once """
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example #21
Source File: CNN3DmoreLayers.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input: X: Samples to predict on. The shape of X should fit to the input shape of the model y: Labels for the
    Samples. Number of Samples should be equal to the number of samples in X sModelPath: (String) full path to a
    trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same
    directory! sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy
    stored. The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the
    suffix '_pred.mat' batchSize: Batchsize, number of samples that are processed at once """
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example #22
Source File: MNetArt.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X, y, sModelPath, sOutPath, batchSize=64):
    """Takes an already trained model and computes the loss and Accuracy over the samples X with their Labels y
    Input: X: Samples to predict on. The shape of X should fit to the input shape of the model y: Labels for the
    Samples. Number of Samples should be equal to the number of samples in X sModelPath: (String) full path to a
    trained keras model. It should be *_json.txt file. there has to be a corresponding *_weights.h5 file in the same
    directory! sOutPath: (String) full path for the Output. It is a *.mat file with the computed loss and accuracy
    stored. The Output file has the Path 'sOutPath'+ the filename of sModelPath without the '_json.txt' added the
    suffix '_pred.mat' batchSize: Batchsize, number of samples that are processed at once """
    sModelPath = sModelPath.replace("_json.txt", "")
    weight_name = sModelPath + '_weights.h5'
    model_json = sModelPath + '_json.txt'
    model_all = sModelPath + '_model.h5'

    # load weights and model (new way)
    model_json = open(model_json, 'r')
    model_string = model_json.read()
    model_json.close()
    model = model_from_json(model_string)

    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
    model.load_weights(weight_name)

    score_test, acc_test = model.evaluate(X, y, batch_size=batchSize)
    print('loss' + str(score_test) + '   acc:' + str(acc_test))
    prob_pre = model.predict(X, batch_size=batchSize, verbose=1)
    print(prob_pre[0:14, :])
    _, sModelFileSave = os.path.split(sModelPath)

    modelSave = sOutPath + sModelFileSave + '_pred.mat'
    print('saving Model:{}'.format(modelSave))
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example #23
Source File: copularnd.py    From copula-py with GNU General Public License v3.0 5 votes vote down vote up
def _frank(M, N, alpha):
    if(N<2):
        raise ValueError('Dimensionality Argument [N] must be an integer >= 2')
    elif(N==2):        
        u1 = uniform.rvs(size=M)
        p = uniform.rvs(size=M)
        if abs(alpha) > math.log(sys.float_info.max):
            u2 = (u1 < 0).astype(int) + np.sign(alpha)*u1  # u1 or 1-u1
        elif abs(alpha) > math.sqrt(np.spacing(1)):
            u2 = -1*np.log((np.exp(-alpha*u1)*(1-p)/p + np.exp(-alpha))/(1 + np.exp(-alpha*u1)*(1-p)/p))/alpha
        else:
            u2 = p
        
        U = np.column_stack((u1,u2))
    else:
        # Algorithm 1 described in both the SAS Copula Procedure, as well as the
        # paper: "High Dimensional Archimedean Copula Generation Algorithm"
        if(alpha<=0):
            raise ValueError('For N>=3, alpha >0 in Frank Copula')
            
        U = np.empty((M,N))
        #v_vec = np.empty(M)
        for ii in range(0,M):
            p = -1.0*np.expm1(-1*alpha)
            if(p==1):
                # boundary case protection
                p = 1 - np.spacing(1)
            v = logser.rvs(p, size=1)
            #v_vec[ii] = v
            # sample N independent uniform random variables
            x_i = uniform.rvs(size=N)
            t = -1*np.log(x_i)/v
            U[ii,:] = -1.0*np.log1p( np.exp(-t)*np.expm1(-1.0*alpha))/alpha
            
        #sio.savemat('logser_v.mat', {'v':v_vec})
            
    return U 
Example #24
Source File: main.py    From Beyond-Binary-Supervision-CVPR19 with MIT License 5 votes vote down vote up
def evaluation(eval_loader, model, epoch):
    print('== Embedding validation data')
    # switch to test mode
    model.eval()
    embed_all  = torch.FloatTensor(num_data_val, args.embedding_size).fill_(0)
    pbar = tqdm(enumerate(eval_loader))
    for batch_idx, input_pairs in pbar:
        idx_val_stt = batch_idx *  args.test_batch_size
        idx_val_end = min((batch_idx + 1) * args.test_batch_size, num_data_val)    
        input_pairs  = torch.squeeze(input_pairs).to(device)
        with torch.no_grad():
            conv_features, embed = model.forward(input_pairs)
        embed_eval = embed.detach()[:(idx_val_end-idx_val_stt)]
        embed_all[idx_val_stt:idx_val_end] = embed_eval.double()
    
    mean_distance, nDCG = eval_retrieval(args.dataroot, embed_all, num_data_val) 
   
    writer.add_scalar('mean_distance', mean_distance[-1], epoch)
    writer.add_scalar('nDCG', nDCG[-1], epoch)

    print('mean_distance : {}, nDCG : {}'.format(mean_distance[-1],  nDCG[-1]))
    
    sio.savemat('{}/pose_emb_{}.mat'.format(LOG_DIR,epoch), {'x' : embed_all.numpy()})
    if not os.path.exists('{}/retrieval'.format(LOG_DIR)):
        os.makedirs('{}/retrieval'.format(LOG_DIR))
    sio.savemat('{}/retrieval/retrieval_scores_embed_{}.mat'.format(LOG_DIR,epoch), 
    {'mean_distance' : mean_distance, 'nDCG' : nDCG})
    
    # Embedding Projector save file
    if not os.path.exists('{}/embedding'.format(LOG_DIR)):
        os.makedirs('{}/embedding'.format(LOG_DIR))   
    np.savetxt('{}/embedding/embedding_val_{}.tsv'.format(LOG_DIR,epoch), X=embed_all, delimiter = "\t") 
Example #25
Source File: io_.py    From PAN-PSEnet with Apache License 2.0 5 votes vote down vote up
def dump_mat(path, dict_obj, append = True):
    import scipy.io as sio
    path = get_absolute_path(path)
    make_parent_dir(path)
    sio.savemat(file_name = path, mdict =  dict_obj, appendmat = append) 
Example #26
Source File: util.py    From ScanComplete with Apache License 2.0 5 votes vote down vote up
def save_mat_df(df, error, filename):
  """Saves df as matlab .mat file."""
  output = {'x': df}
  if error is not None:
    output['errors'] = error
  sio.savemat(filename, output) 
Example #27
Source File: calc_mAP.py    From pytorch-ppn with MIT License 5 votes vote down vote up
def calc_mAP(gt_path = './dataset/mpi/val_gt/mpi_val_groundtruth.mat', pred_path = './exps/preds/mat_results/pred_keypoints_mpii_multi.mat'):

	thresh = 0.5

	gtDir, partNames, name, predFilename, colorName = getExpParams(0)

	data = loadmat(gt_path)
	annolist_test_multi = data['annolist_test_multi']

	data = loadmat(pred_path)
	pred = data['pred']
	
	assert len(annolist_test_multi) == len(pred), 'incompatible length: annolist_test_multi & pred'
	scoresAll, labelsAll, nGTall = assignGTmulti(pred, annolist_test_multi, thresh)

	ap = np.zeros(nGTall.shape[0]+1)
	for j in range(nGTall.shape[0]):
		scores = np.array([])
		labels = np.array([])

		for imgidx in range(len(annolist_test_multi)):
			scores = np.append(scores,scoresAll[j][imgidx])
			labels = np.append(labels,labelsAll[j][imgidx])

		precision, recall, sorted_scores, sortidx, sorted_labels = getRPC(scores,labels,np.sum(nGTall[j,:]))

		ap[j] = VOCap(recall,precision) * 100

	ap[-1] = np.mean(ap[0:-1])

	columnNames = partNames
	genTableAP(ap,name)

	sio.savemat(predFilename, {'ap':ap, 'columnNames':columnNames})

	return ap 
Example #28
Source File: eval_util.py    From pytorch-ppn with MIT License 5 votes vote down vote up
def save_mppe_results_to_mpi_format(mp_pose_list, save_path='./exps/preds/mat_results/pred_keypoints_mpii_multi.mat'):
    pred = fromarrays([mp_pose_list], names=['annorect'])
    sio.savemat(save_path, {'pred': pred}) 
Example #29
Source File: manager.py    From sem with GNU General Public License v2.0 5 votes vote down vote up
def save_to_mat_file(self, parameter_space,
                         result_parsing_function,
                         filename, runs):
        """
        Return the results relative to the desired parameter space in the form
        of a .mat file.

        Args:
            parameter_space (dict): dictionary containing
                parameter/list-of-values pairs.
            result_parsing_function (function): user-defined function, taking a
                result dictionary as argument, that can be used to parse the
                result files and return a list of values.
            filename (path): name of output .mat file.
            runs (int): number of runs to gather for each parameter
                combination.
        """

        # Make sure all values are lists
        for key in parameter_space:
            if not isinstance(parameter_space[key], list):
                parameter_space[key] = [parameter_space[key]]

        # Add a dimension label for each non-singular dimension
        dimension_labels = [{key: str(parameter_space[key])} for key in
                            parameter_space.keys() if len(parameter_space[key])
                            > 1] + [{'runs': range(runs)}]

        # Create a list of the parameter names

        return savemat(
            filename,
            {'results':
             self.get_results_as_numpy_array(parameter_space,
                                             result_parsing_function,
                                             runs=runs),
             'dimension_labels': dimension_labels}) 
Example #30
Source File: getDETCentroid.py    From SegMitos_mitosis_detection with MIT License 5 votes vote down vote up
def compCentroid_detect1(fcn, savefolder):
	data_dict = sio.loadmat(fcn)
	f = matlab_style_gauss2D((10,10),0.25)
	A = cv2.filter2D(data_dict['A'], -1, f)

	level = threshold_otsu(A) #otsu threshold of image
	bw = A > level #binary image
	L,num = label(bw,8,return_num=True) #label  the segmented blobs
	#import pdb;pdb.set_trace()
	plot_x = np.zeros((num, 1)) # location of centroid
	plot_y = np.zeros((num, 1))

	sum_x = np.zeros((num, 1))
	sum_y = np.zeros((num, 1))
	area = np.zeros((num, 1))
	score = np.zeros((num, 1))

	height,width = bw.shape[0], bw.shape[1]
	for i in range(height):
		for j in range(width):
			if L[i,j] != 0:
				N = L[i,j]
				sum_x[N-1] = sum_x[N-1]+i*A[i,j]
				sum_y[N-1] = sum_y[N-1]+j*A[i,j]
				area[N-1] = area[N-1] + 1
				score[N-1] = score[N-1] + A[i,j]

	plot_x = np.around(sum_x*1.0/score)
	plot_y = np.around(sum_y*1.0/score)
	score = score*1.0/area
	centroid = np.zeros((num,2))
	for row in range(num):
		centroid[row,0] = plot_x[row,0]
		centroid[row,1] = plot_y[row,0]
	#centroid = np.mat(centroid)
	savefile = savefolder + fcn[-9:]
	sio.savemat(savefile,{'centroid':centroid, 'area':area, 'score':score})