Python scipy.io.loadmat() Examples

The following are 30 code examples for showing how to use scipy.io.loadmat(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module scipy.io , or try the search function .

Example 1
Project: cascade-rcnn_Pytorch   Author: guoruoqian   File: pascal_voc.py    License: MIT License 6 votes vote down vote up
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
            'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb) 
Example 2
Project: fast-MPN-COV   Author: jiangtaoxie   File: functions.py    License: MIT License 6 votes vote down vote up
def __init__(self, path, start_epoch):
        if start_epoch is not 0:
           stats_ = sio.loadmat(os.path.join(path,'stats.mat'))
           data = stats_['data']
           content = data[0,0]
           self.trainObj = content['trainObj'][:,:start_epoch].squeeze().tolist()
           self.trainTop1 = content['trainTop1'][:,:start_epoch].squeeze().tolist()
           self.trainTop5 = content['trainTop5'][:,:start_epoch].squeeze().tolist()
           self.valObj = content['valObj'][:,:start_epoch].squeeze().tolist()
           self.valTop1 = content['valTop1'][:,:start_epoch].squeeze().tolist()
           self.valTop5 = content['valTop5'][:,:start_epoch].squeeze().tolist()
           if start_epoch is 1:
               self.trainObj = [self.trainObj]
               self.trainTop1 = [self.trainTop1]
               self.trainTop5 = [self.trainTop5]
               self.valObj = [self.valObj]
               self.valTop1 = [self.valTop1]
               self.valTop5 = [self.valTop5]
        else:
           self.trainObj = []
           self.trainTop1 = []
           self.trainTop5 = []
           self.valObj = []
           self.valTop1 = []
           self.valTop5 = [] 
Example 3
Project: spectrum   Author: synergetics   File: cumest.py    License: MIT License 6 votes vote down vote up
def test():
  y = sio.loadmat(here(__file__) + '/demo/ma1.mat')['y']

  # The right results are:
  #           "biased": [-0.12250513  0.35963613  1.00586945  0.35963613 -0.12250513]
  #           "unbiaed": [-0.12444965  0.36246791  1.00586945  0.36246791 -0.12444965]
  print cum2est(y, 2, 128, 0, 'unbiased')
  print cum2est(y, 2, 128, 0, 'biased')

  # For the 3rd cumulant:
  #           "biased": [-0.18203039  0.07751503  0.67113035  0.729953    0.07751503]
  #           "unbiased": [-0.18639911  0.07874543  0.67641484  0.74153955  0.07937539]
  print cum3est(y, 2, 128, 0, 'biased', 1)
  print cum3est(y, 2, 128, 0, 'unbiased', 1)

  # For testing the 4th-order cumulant
  # "biased": [-0.03642083  0.4755026   0.6352588   1.38975232  0.83791117  0.41641134 -0.97386322]
  # "unbiased": [-0.04011388  0.48736793  0.64948927  1.40734633  0.8445089   0.42303979 -0.99724968]
  print cum4est(y, 3, 128, 0, 'biased', 1, 1)
  print cum4est(y, 3, 128, 0, 'unbiased', 1, 1) 
Example 4
def get_predict_labels():
    inputs = tf.placeholder("float", [None, 64, 64, 1])
    is_training = tf.placeholder("bool")
    prediction, _ = googlenet(inputs, is_training)
    predict_labels = tf.argmax(prediction, 1)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    data = sio.loadmat("../data/dataset.mat")
    testdata = data["test"] / 127.5 - 1.0
    testlabel = data["testlabels"]
    saver.restore(sess, "../save_para/.\\model.ckpt")
    nums_test = testlabel.shape[1]
    PREDICT_LABELS = np.zeros([nums_test])
    for i in range(nums_test // BATCH_SIZE):
        PREDICT_LABELS[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE] = sess.run(predict_labels, feed_dict={inputs: testdata[i * BATCH_SIZE:i * BATCH_SIZE + BATCH_SIZE], is_training: False})
    PREDICT_LABELS[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:] = sess.run(predict_labels, feed_dict={inputs: testdata[(nums_test // BATCH_SIZE - 1) * BATCH_SIZE + BATCH_SIZE:], is_training: False})
    np.savetxt("../data/predict_labels.txt", PREDICT_LABELS) 
Example 5
Project: H3DNet   Author: zaiweizhang   File: show_results_sunrgbd.py    License: MIT License 6 votes vote down vote up
def export_one_scan(scan_name):
    pt = np.load(os.path.join(DATA_DIR, scan_name+'_pc.npz'))['pc']
    np.savetxt(mode+'tmp.xyz', pt)
    os.system("mv {}tmp.xyz {}tmp.xyzrgb".format(mode, mode))
    point_cloud = o3d.io.read_point_cloud(mode+'tmp.xyzrgb')

    pred_proposals = np.load(os.path.join(PRED_PATH, 'center'+scan_name+'_nms.npy'))
    gt_bbox = sio.loadmat(os.path.join(PRED_PATH, 'center'+scan_name+'_gt.mat'))['gt']
    bb =[]
    if mode=='gt':
        boundingboxes = gt_bbox
    elif mode =='pred':
        boundingboxes = pred_proposals
    else:
        print("model must be gt or pred")
        return
    for i in range(boundingboxes.shape[0]):
        c = np.array(color_mapping[int(boundingboxes[i,-1])])/255.0
        for _ in range(2):
            bb.append(create_lineset(boundingboxes[i]+0.005*(np.random.rand()-0.5)*2, colors=c))
    load_view_point([point_cloud] + bb, './viewpoint.json', window_name=scan_name+'_'+mode) 
Example 6
Project: deep-smoke-machine   Author: CMU-CREATE-Lab   File: utils.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def mat_load(path, m_dict=None):
    """
    Load mat files.
    :param path:
    :return:
    """
    if m_dict is None:
        data = sio.loadmat(path)
    else:
        data = sio.loadmat(path, m_dict)

    return data

# endregion

# region File/Folder Names/Pathes 
Example 7
Project: pytorch_geometric   Author: rusty1s   File: suite_sparse.py    License: MIT License 6 votes vote down vote up
def process(self):
        mat = loadmat(self.raw_paths[0])['Problem'][0][0][2].tocsr().tocoo()

        row = torch.from_numpy(mat.row).to(torch.long)
        col = torch.from_numpy(mat.col).to(torch.long)
        edge_index = torch.stack([row, col], dim=0)

        edge_attr = torch.from_numpy(mat.data).to(torch.float)
        if torch.all(edge_attr == 1.):
            edge_attr = None

        size = torch.Size(mat.shape)
        if mat.shape[0] == mat.shape[1]:
            size = None

        num_nodes = mat.shape[0]

        data = Data(edge_index=edge_index, edge_attr=edge_attr, size=size,
                    num_nodes=num_nodes)

        if self.pre_transform is not None:
            data = self.pre_transform(data)

        torch.save(self.collate([data]), self.processed_paths[0]) 
Example 8
Project: timeception   Author: noureldien   File: utils.py    License: GNU General Public License v3.0 6 votes vote down vote up
def mat_load(path, m_dict=None):
    """
    Load mat files.
    :param path:
    :return:
    """
    if m_dict is None:
        data = sio.loadmat(path)
    else:
        data = sio.loadmat(path, m_dict)

    return data

# endregion

# region File/Folder Names/Pathes 
Example 9
Project: TFFRCNN   Author: CharlesShang   File: pascal3d.py    License: MIT License 6 votes vote down vote up
def _load_selective_search_IJCV_roidb(self, gt_roidb):
        IJCV_path = os.path.abspath(os.path.join(self.cache_path, '..',
                                                 'selective_search_IJCV_data',
                                                 'voc_' + self._year))
        assert os.path.exists(IJCV_path), \
               'Selective search IJCV data not found at: {}'.format(IJCV_path)

        top_k = self.config['top_k']
        box_list = []
        for i in xrange(self.num_images):
            filename = os.path.join(IJCV_path, self.image_index[i] + '.mat')
            raw_data = sio.loadmat(filename)
            box_list.append((raw_data['boxes'][:top_k, :]-1).astype(np.uint16))

        return self.create_roidb_from_box_list(box_list, gt_roidb)

    # evaluate detection results 
Example 10
Project: TFFRCNN   Author: CharlesShang   File: pascal_voc.py    License: MIT License 6 votes vote down vote up
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb) 
Example 11
Project: TFFRCNN   Author: CharlesShang   File: kittivoc.py    License: MIT License 6 votes vote down vote up
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(self._data_path,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
               'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb) 
Example 12
Project: KAIR   Author: cszn   File: dataset_usrnet.py    License: MIT License 6 votes vote down vote up
def __init__(self, opt):
        super(DataSetUSRNet, self).__init__()
        self.opt = opt
        self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
        self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 96
        self.sigma_max = self.opt['sigma_max'] if self.opt['sigma_max'] is not None else 25
        self.scales = opt['scales'] if opt['scales'] is not None else [1,2,3,4]
        self.sf_validation = opt['sf_validation'] if opt['sf_validation'] is not None else 3
        #self.kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels']
        self.kernels = loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels']  # for validation

        # -------------------
        # get the path of H
        # -------------------
        self.paths_H = util.get_image_paths(opt['dataroot_H'])  # return None if input is None
        self.count = 0 
Example 13
Project: grass_pytorch   Author: kevin-kaixu   File: grassdata.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, dir, transform=None):
        self.dir = dir
        box_data = torch.from_numpy(loadmat(self.dir+'/box_data.mat')['boxes']).float()
        op_data = torch.from_numpy(loadmat(self.dir+'/op_data.mat')['ops']).int()
        sym_data = torch.from_numpy(loadmat(self.dir+'/sym_data.mat')['syms']).float()
        #weight_list = torch.from_numpy(loadmat(self.dir+'/weights.mat')['weights']).float()
        num_examples = op_data.size()[1]
        box_data = torch.chunk(box_data, num_examples, 1)
        op_data = torch.chunk(op_data, num_examples, 1)
        sym_data = torch.chunk(sym_data, num_examples, 1)
        #weight_list = torch.chunk(weight_list, num_examples, 1)
        self.transform = transform
        self.trees = []
        for i in range(len(op_data)) :
            boxes = torch.t(box_data[i])
            ops = torch.t(op_data[i])
            syms = torch.t(sym_data[i])
            tree = Tree(boxes, ops, syms)
            self.trees.append(tree) 
Example 14
Project: grass_pytorch   Author: kevin-kaixu   File: grassdata.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, dir, transform=None):
        self.dir = dir
        box_data = torch.from_numpy(loadmat(self.dir+u'/box_data.mat')[u'boxes']).float()
        op_data = torch.from_numpy(loadmat(self.dir+u'/op_data.mat')[u'ops']).int()
        sym_data = torch.from_numpy(loadmat(self.dir+u'/sym_data.mat')[u'syms']).float()
        #weight_list = torch.from_numpy(loadmat(self.dir+'/weights.mat')['weights']).float()
        num_examples = op_data.size()[1]
        box_data = torch.chunk(box_data, num_examples, 1)
        op_data = torch.chunk(op_data, num_examples, 1)
        sym_data = torch.chunk(sym_data, num_examples, 1)
        #weight_list = torch.chunk(weight_list, num_examples, 1)
        self.transform = transform
        self.trees = []
        for i in xrange(len(op_data)) :
            boxes = torch.t(box_data[i])
            ops = torch.t(op_data[i])
            syms = torch.t(sym_data[i])
            tree = Tree(boxes, ops, syms)
            self.trees.append(tree) 
Example 15
Project: LRP   Author: cancam   File: pascal_voc.py    License: MIT License 6 votes vote down vote up
def _load_selective_search_roidb(self, gt_roidb):
        filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
                                                'selective_search_data',
                                                self.name + '.mat'))
        assert os.path.exists(filename), \
            'Selective search data not found at: {}'.format(filename)
        raw_data = sio.loadmat(filename)['boxes'].ravel()

        box_list = []
        for i in xrange(raw_data.shape[0]):
            boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
            keep = ds_utils.unique_boxes(boxes)
            boxes = boxes[keep, :]
            keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
            boxes = boxes[keep, :]
            box_list.append(boxes)

        return self.create_roidb_from_box_list(box_list, gt_roidb) 
Example 16
Project: mxnet-E2FAR   Author: ShownX   File: E2FAR.py    License: Apache License 2.0 6 votes vote down vote up
def __getitem__(self, idx):
        img_path = self.data_frame.iloc[idx, 0]
        img = cv2.imread(img_path, 1)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        x, y, w, h = self.data_frame.iloc[idx, 1:5]
        l, t, ww, hh = enlarge_bbox(x, y, w, h, self.enlarge_factor)
        r, b = l + ww, t + hh

        img = img[t: b, l:r, :]
        img = cv2.resize(img, (self.img_size, self.img_size))
        img = img.astype(np.float32) - 127.5

        img = nd.transpose(nd.array(img), (2, 0, 1))

        label_path = img_path.replace('.jpg', '.mat')

        label = sio.loadmat(label_path)

        params_shape = label['Shape_Para'].astype(np.float32).ravel()
        params_exp = label['Exp_Para'].astype(np.float32).ravel()

        return img, params_shape, params_exp 
Example 17
Project: face_classification   Author: oarriaga   File: datasets.py    License: MIT License 6 votes vote down vote up
def _load_imdb(self):
        face_score_treshold = 3
        dataset = loadmat(self.dataset_path)
        image_names_array = dataset['imdb']['full_path'][0, 0][0]
        gender_classes = dataset['imdb']['gender'][0, 0][0]
        face_score = dataset['imdb']['face_score'][0, 0][0]
        second_face_score = dataset['imdb']['second_face_score'][0, 0][0]
        face_score_mask = face_score > face_score_treshold
        second_face_score_mask = np.isnan(second_face_score)
        unknown_gender_mask = np.logical_not(np.isnan(gender_classes))
        mask = np.logical_and(face_score_mask, second_face_score_mask)
        mask = np.logical_and(mask, unknown_gender_mask)
        image_names_array = image_names_array[mask]
        gender_classes = gender_classes[mask].tolist()
        image_names = []
        for image_name_arg in range(image_names_array.shape[0]):
            image_name = image_names_array[image_name_arg][0]
            image_names.append(image_name)
        return dict(zip(image_names, gender_classes)) 
Example 18
Project: pase   Author: santi-pdp   File: transforms.py    License: MIT License 6 votes vote down vote up
def load_IR(self, ir_file, ir_fmt):
        ir_file = os.path.join(self.data_root, ir_file)
        # print('loading ir_file: ', ir_file)
        if hasattr(self, 'cache') and ir_file in self.cache:
            return self.cache[ir_file]
        else:
            if ir_fmt == 'mat':
                IR = loadmat(ir_file, squeeze_me=True, struct_as_record=False)
                IR = IR['risp_imp']
            elif ir_fmt == 'imp' or ir_fmt == 'txt':
                IR = np.loadtxt(ir_file)
            elif ir_fmt == 'npy':
                IR = np.load(ir_file)
            elif ir_fmt == 'wav':
                IR, _ = sf.read(ir_file)
            else:
                raise TypeError('Unrecognized IR format: ', ir_fmt)
            IR = IR[:self.max_reverb_len]
            if np.max(IR)>0:
                IR = IR / np.abs(np.max(IR))
            p_max = np.argmax(np.abs(IR))
            if hasattr(self, 'cache'):
                self.cache[ir_file] = (IR, p_max)
            return IR, p_max 
Example 19
def _load_selective_search_roidb(self, gt_roidb):
    filename = os.path.abspath(os.path.join(self.cache_path, '..',
                                            'selective_search_data',
                                            self.name + '.mat'))
    assert os.path.exists(filename), \
           'Selective search data not found at: {}'.format(filename)
    data = sio.loadmat(filename)
    raw_data = data['boxes'].ravel()

    box_list = []
    for i in range(raw_data.shape[0]):
        box_list.append(raw_data[i][:, (1, 0, 3, 2)] - 1)

    return self.create_roidb_from_box_list(box_list, gt_roidb) 
Example 20
Project: deep-learning-note   Author: wdxtub   File: 6_bias_variance.py    License: MIT License 5 votes vote down vote up
def load_data():
    """for ex5
    d['X'] shape = (12, 1)
    pandas has trouble taking this 2d ndarray to construct a dataframe, so I ravel
    the results
    """
    d = sio.loadmat('data/ex5data1.mat')
    return map(np.ravel, [d['X'], d['y'], d['Xval'], d['yval'], d['Xtest'], d['ytest']]) 
Example 21
Project: DOTA_models   Author: ringringyi   File: input.py    License: Apache License 2.0 5 votes vote down vote up
def extract_svhn(local_url):
  """
  Extract a MATLAB matrix into two numpy arrays with data and labels
  :param local_url:
  :return:
  """

  with tf.gfile.Open(local_url, mode='r') as file_obj:
    # Load MATLAB matrix using scipy IO
    dict = loadmat(file_obj)

    # Extract each dictionary (one for data, one for labels)
    data, labels = dict["X"], dict["y"]

    # Set np type
    data = np.asarray(data, dtype=np.float32)
    labels = np.asarray(labels, dtype=np.int32)

    # Transpose data to match TF model input format
    data = data.transpose(3, 0, 1, 2)

    # Fix the SVHN labels which label 0s as 10s
    labels[labels == 10] = 0

    # Fix label dimensions
    labels = labels.reshape(len(labels))

    return data, labels 
Example 22
Project: Pytorch-Project-Template   Author: moemen95   File: generate_class_weights.py    License: MIT License 5 votes vote down vote up
def __getitem__(self, index):
        if self.mode == 'test':
            img_path, img_name = self.imgs[index]
            img = Image.open(os.path.join(img_path, img_name + '.jpg')).convert('RGB')
            if self.transform is not None:
                img = self.transform(img)
            return img_name, img

        img_path, mask_path = self.imgs[index]
        img = Image.open(img_path).convert('RGB')
        if self.mode == 'train':
            mask = sio.loadmat(mask_path)['GTcls']['Segmentation'][0][0]
            mask = Image.fromarray(mask.astype(np.uint8))
        else:
            mask = Image.open(mask_path)

        if self.joint_transform is not None:
            img, mask = self.joint_transform(img, mask)

        if self.sliding_crop is not None:
            img_slices, mask_slices, slices_info = self.sliding_crop(img, mask)
            if self.transform is not None:
                img_slices = [self.transform(e) for e in img_slices]
            if self.target_transform is not None:
                mask_slices = [self.target_transform(e) for e in mask_slices]
            img, mask = torch.stack(img_slices, 0), torch.stack(mask_slices, 0)
            return img, mask, torch.LongTensor(slices_info)
        else:
            if self.transform is not None:
                img = self.transform(img)
            if self.target_transform is not None:
                mask = self.target_transform(mask)
            return img, mask 
Example 23
Project: Pytorch-Project-Template   Author: moemen95   File: voc2012.py    License: MIT License 5 votes vote down vote up
def __getitem__(self, index):
        if self.mode == 'test':
            img_path, img_name = self.imgs[index]
            img = Image.open(os.path.join(img_path, img_name + '.jpg')).convert('RGB')
            if self.transform is not None:
                img = self.transform(img)
            return img_name, img

        img_path, mask_path = self.imgs[index]
        img = Image.open(img_path).convert('RGB')
        if self.mode == 'train':
            mask = sio.loadmat(mask_path)['GTcls']['Segmentation'][0][0]
            mask = Image.fromarray(mask.astype(np.uint8))
        else:
            mask = Image.open(mask_path)

        if self.joint_transform is not None:
            img, mask = self.joint_transform(img, mask)

        if self.sliding_crop is not None:
            img_slices, mask_slices, slices_info = self.sliding_crop(img, mask)
            if self.transform is not None:
                img_slices = [self.transform(e) for e in img_slices]
            if self.target_transform is not None:
                mask_slices = [self.target_transform(e) for e in mask_slices]
            img, mask = torch.stack(img_slices, 0), torch.stack(mask_slices, 0)
            return img, mask, torch.LongTensor(slices_info)
        else:
            if self.transform is not None:
                img = self.transform(img)
            if self.target_transform is not None:
                mask = self.target_transform(mask)
            return img, mask 
Example 24
Project: SSGAN-Tensorflow   Author: clvrai   File: download.py    License: MIT License 5 votes vote down vote up
def download_svhn(download_path):
    data_dir = os.path.join(download_path, 'svhn')

    import scipy.io as sio
    # svhn file loader
    def svhn_loader(url, path):
        cmd = ['curl', url, '-o', path]
        subprocess.call(cmd)
        m = sio.loadmat(path)
        return m['X'], m['y']

    if check_file(data_dir):
        print('SVHN was downloaded.')
        return

    data_url = 'http://ufldl.stanford.edu/housenumbers/train_32x32.mat'
    train_image, train_label = svhn_loader(data_url, os.path.join(data_dir, 'train_32x32.mat'))

    data_url = 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat'
    test_image, test_label = svhn_loader(data_url, os.path.join(data_dir, 'test_32x32.mat'))

    prepare_h5py(np.transpose(train_image, (3, 0, 1, 2)), train_label,
                 np.transpose(test_image, (3, 0, 1, 2)), test_label, data_dir)

    cmd = ['rm', '-f', os.path.join(data_dir, '*.mat')]
    subprocess.call(cmd) 
Example 25
Project: ArtGAN   Author: cs-chan   File: ingest_flower102.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def collectdata(self,):
        print 'Start Collect Data...'
        img_labels = sio.loadmat(self.input_dir + '/imagelabels.mat')['labels'][0]
        img_split = sio.loadmat(self.input_dir + '/setid.mat')
        img_train = img_split['trnid']
        img_val = img_split['valid']
        img_test = img_split['tstid']

        for idx in img_train[0]:
            img_name = 'image_%05d.jpg' % idx
            imgpath = os.path.join(self.input_img_dir, img_name)
            outpath = os.path.join(self.outimgdir, img_name)
            transform_and_save(img_path=imgpath, output_filename=outpath, target_size=self.target_size, skip=self.skipimg)
            self.trainpairlist[os.path.join('images', img_name)] = os.path.join('labels', str(img_labels[idx-1] - 1) + '.txt')

        for idx in img_val[0]:
            img_name = 'image_%05d.jpg' % idx
            imgpath = os.path.join(self.input_img_dir, img_name)
            outpath = os.path.join(self.outimgdir, img_name)
            transform_and_save(img_path=imgpath, output_filename=outpath, target_size=self.target_size,
                               skip=self.skipimg)
            self.valpairlist[os.path.join('images', img_name)] = os.path.join('labels', str(img_labels[idx-1] - 1) + '.txt')

        for idx in img_test[0]:
            img_name = 'image_%05d.jpg' % idx
            imgpath = os.path.join(self.input_img_dir, img_name)
            outpath = os.path.join(self.outimgdir, img_name)
            transform_and_save(img_path=imgpath, output_filename=outpath, target_size=self.target_size,
                               skip=self.skipimg)
            self.testpairlist[os.path.join('images', img_name)] = os.path.join('labels', str(img_labels[idx-1] - 1) + '.txt')

        print 'Finished Collect Data...' 
Example 26
Project: transferlearning   Author: jindongwang   File: digit_data_loader.py    License: MIT License 5 votes vote down vote up
def load_mnist(path, scale=True, usps=False, all_use=True):
    mnist_data = loadmat(path)
    if scale:
        mnist_train = np.reshape(mnist_data['train_32'], (55000, 32, 32, 1))
        mnist_test = np.reshape(mnist_data['test_32'], (10000, 32, 32, 1))
        mnist_train = np.concatenate([mnist_train, mnist_train, mnist_train], 3)
        mnist_test = np.concatenate([mnist_test, mnist_test, mnist_test], 3)
        mnist_train = mnist_train.transpose(0, 3, 1, 2).astype(np.float32)
        mnist_test = mnist_test.transpose(0, 3, 1, 2).astype(np.float32)
        mnist_labels_train = mnist_data['label_train']
        mnist_labels_test = mnist_data['label_test']
    else:
        mnist_train = mnist_data['train_28']
        mnist_test = mnist_data['test_28']
        mnist_labels_train = mnist_data['label_train']
        mnist_labels_test = mnist_data['label_test']
        mnist_train = mnist_train.astype(np.float32)
        mnist_test = mnist_test.astype(np.float32)
        mnist_train = mnist_train.transpose((0, 3, 1, 2))
        mnist_test = mnist_test.transpose((0, 3, 1, 2))
    train_label = np.argmax(mnist_labels_train, axis=1)
    inds = np.random.permutation(mnist_train.shape[0])
    mnist_train = mnist_train[inds]
    train_label = train_label[inds]
    test_label = np.argmax(mnist_labels_test, axis=1)
    if usps and all_use != 'yes':
        mnist_train = mnist_train[:2000]
        train_label = train_label[:2000]

    return mnist_train, train_label, mnist_test, test_label 
Example 27
Project: transferlearning   Author: jindongwang   File: digit_data_loader.py    License: MIT License 5 votes vote down vote up
def load_svhn(path_train, path_test):
    svhn_train = loadmat(path_train)
    svhn_test = loadmat(path_test)
    svhn_train_im = svhn_train['X']
    svhn_train_im = svhn_train_im.transpose(3, 2, 0, 1).astype(np.float32)
    svhn_label = dense_to_one_hot(svhn_train['y'])
    svhn_test_im = svhn_test['X']
    svhn_test_im = svhn_test_im.transpose(3, 2, 0, 1).astype(np.float32)
    svhn_label_test = dense_to_one_hot(svhn_test['y'])

    return svhn_train_im, svhn_label, svhn_test_im, svhn_label_test 
Example 28
Project: PHATE   Author: KrishnaswamyLab   File: tree.py    License: GNU General Public License v2.0 5 votes vote down vote up
def artificial_tree():
    tree = loadmat("../../data/TreeData.mat")
    return tree["M"], tree["C"] 
Example 29
Project: Generative-Latent-Optimization-Tensorflow   Author: clvrai   File: download.py    License: MIT License 5 votes vote down vote up
def download_svhn(download_path):
    data_dir = osp.join(download_path, 'svhn')

    import scipy.io as sio

    # svhn file loader
    def svhn_loader(url, path):
        cmd = ['curl', url, '-o', path]
        subprocess.call(cmd)
        m = sio.loadmat(path)
        return m['X'], m['y']

    if check_file(data_dir):
        print('SVHN was downloaded.')
        return

    data_url = 'http://ufldl.stanford.edu/housenumbers/train_32x32.mat'
    train_image, train_label = svhn_loader(data_url, osp.join(data_dir, 'train_32x32.mat'))

    data_url = 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat'
    test_image, test_label = svhn_loader(data_url, osp.join(data_dir, 'test_32x32.mat'))

    prepare_h5py(np.transpose(train_image, (3, 0, 1, 2)),
                 np.transpose(test_image, (3, 0, 1, 2)), data_dir)

    cmd = ['rm', '-f', osp.join(data_dir, '*.mat')]
    subprocess.call(cmd) 
Example 30
Project: spectrum   Author: synergetics   File: bispectrumdx.py    License: MIT License 5 votes vote down vote up
def test():
  nl1 = sio.loadmat(here(__file__) + '/demo/nl1.mat')
  dbic = bispectrumdx(nl1['x'], nl1['x'], nl1['y'], 128,5)