Python numpy.intersect1d() Examples

The following are 30 code examples of numpy.intersect1d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: mf.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def nonin_osc_strength(self):
    from scipy.sparse import spmatrix 
    """ Computes the non-interacting oscillator strengths and energies """

    x,y,z = map(spmatrix.toarray, self.dipole_coo())
    i2d = array((x,y,z))
    n = self.mo_occ.shape[-1]
    
    p = zeros((len(comega)), dtype=np.complex128) # result to accumulate
    
    for s in range(self.nspin):
      o,e,cc = self.mo_occ[0,s],self.mo_energy[0,s],self.mo_coeff[0,s,:,:,0]
      oo1,ee1 = np.subtract.outer(o,o).reshape(n*n), np.subtract.outer(e,e).reshape(n*n)
      idx = unravel_index( np.intersect1d(where(oo1<0.0), where(ee1<eemax)), (n,n))
      ivrt,iocc = array(list(set(idx[0]))), array(list(set(idx[1])))
      voi2d = einsum('nia,ma->nmi', einsum('iab,nb->nia', i2d, cc[ivrt]), cc[iocc])
      t2osc = 2.0/3.0*einsum('voi,voi->vo', voi2d, voi2d)
      t2w =  np.subtract.outer(e[ivrt],e[iocc])
      t2o = -np.subtract.outer(o[ivrt],o[iocc])

      for iw,w in enumerate(comega):
        p[iw] += 0.5*(t2osc*((t2o/(w-t2w))-(t2o/(w+t2w)))).sum()      
    return p 
Example #2
Source File: pull_data.py    From TheCannon with MIT License 6 votes vote down vote up
def find_colors(ref_id, ref_flux, ref_ivar):
    # Find colors
    print("Finding colors")
    a = pyfits.open(DATA_DIR + "/lamost_catalog_colors.fits")
    data = a[1].data
    a.close()
    all_ids = data['LAMOST_ID_1']
    all_ids = np.array([val.strip() for val in all_ids])
    ref_id_col = np.intersect1d(all_ids, ref_id)
    inds = np.array([np.where(all_ids==val)[0][0] for val in ref_id_col])
    all_col, all_col_ivar = get_colors(
            DATA_DIR + "/lamost_catalog_colors.fits")
    col = all_col[:,inds]
    col_ivar = all_col_ivar[:,inds]
    bad_ivar = np.logical_or(np.isnan(col_ivar), col_ivar==np.inf)
    col_ivar[bad_ivar] = 0.0
    bad_flux = np.logical_or(np.isnan(col), col==np.inf)
    col[bad_flux] = 1.0
    col_ivar[bad_flux] = 0.0
    # add them to the wl, flux and ivar arrays
    inds = np.array([np.where(ref_id==val)[0][0] for val in ref_id_col])
    ref_flux_col = np.hstack((ref_flux[inds], col.T))
    ref_ivar_col = np.hstack((ref_ivar[inds], col_ivar.T))
    return ref_id_col, ref_flux_col, ref_ivar_col 
Example #3
Source File: pull_data.py    From TheCannon with MIT License 6 votes vote down vote up
def find_colors(ref_id, ref_flux, ref_ivar):
    # Find colors
    DATA_DIR = "/Users/annaho/Data/LAMOST/Mass_And_Age"
    print("Finding colors")
    a = pyfits.open(DATA_DIR + "/lamost_catalog_colors.fits")
    data = a[1].data
    a.close()
    all_ids = data['LAMOST_ID_1']
    all_ids = np.array([val.strip() for val in all_ids])
    ref_id_col = np.intersect1d(all_ids, ref_id)
    inds = np.array([np.where(all_ids==val)[0][0] for val in ref_id_col])
    all_id, all_col, all_col_ivar = get_colors(
            DATA_DIR + "/lamost_catalog_colors.fits")
    col = all_col[:,inds]
    col_ivar = all_col_ivar[:,inds]
    bad_ivar = np.logical_or(np.isnan(col_ivar), col_ivar==np.inf)
    col_ivar[bad_ivar] = 0.0
    bad_flux = np.logical_or(np.isnan(col), col==np.inf)
    col[bad_flux] = 1.0
    col_ivar[bad_flux] = 0.0
    # add them to the wl, flux and ivar arrays
    inds = np.array([np.where(ref_id==val)[0][0] for val in ref_id_col])
    ref_flux_col = np.hstack((ref_flux[inds], col.T))
    ref_ivar_col = np.hstack((ref_ivar[inds], col_ivar.T))
    return ref_id_col, ref_flux_col, ref_ivar_col 
Example #4
Source File: ecg_rsa.py    From NeuroKit with MIT License 6 votes vote down vote up
def _ecg_rsa_cycles(signals):
    """Extract respiratory cycles."""
    inspiration_onsets = np.intersect1d(
        np.where(signals["RSP_Phase"] == 1)[0], np.where(signals["RSP_Phase_Completion"] == 0)[0], assume_unique=True
    )

    expiration_onsets = np.intersect1d(
        np.where(signals["RSP_Phase"] == 0)[0], np.where(signals["RSP_Phase_Completion"] == 0)[0], assume_unique=True
    )

    cycles_length = np.diff(inspiration_onsets)

    return {
        "RSP_Inspiration_Onsets": inspiration_onsets,
        "RSP_Expiration_Onsets": expiration_onsets,
        "RSP_Cycles_Length": cycles_length,
    } 
Example #5
Source File: coco_loader.py    From sunets with MIT License 6 votes vote down vote up
def filtertraindata(self):
        datapath = get_data_path('coco')
        train_list = tuple(open(datapath + 'annotations/train2014.txt', 'r'))
        val_list = tuple(open(datapath + 'annotations/val2014.txt', 'r'))
        total_list = ['/train2014/'+id_.rstrip() for id_ in train_list] + ['/val2014/'+id_.rstrip() for id_ in val_list]

        annotation_path = os.path.join(datapath, 'seg_mask')
        aug_list = []
        for filename in total_list:
            lbl_path = annotation_path + filename + '.png'
            lbl = Image.open(lbl_path).convert('P')
            lbl = np.array(lbl, dtype=np.int32)
            if np.sum(pascal_map[lbl] != 0) > 1000 and np.intersect1d(np.unique(lbl),pascal_classes).any():
                aug_list.append(filename)

        val_aug_list = random.sample(aug_list, 1500)
        train_aug_list = list(set(aug_list) - set(val_aug_list))
        with open(os.path.join(datapath, 'annotations', 'train_aug.txt'), 'w') as txtfile:
            [txtfile.write(file + '\n') for file in train_aug_list]
        with open(os.path.join(datapath, 'annotations', 'val.txt'), 'w') as txtfile:
            [txtfile.write(file + '\n') for file in val_aug_list] 
Example #6
Source File: test_split.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_shuffle_kfold():
    # Check the indices are shuffled properly
    kf = KFold(3)
    kf2 = KFold(3, shuffle=True, random_state=0)
    kf3 = KFold(3, shuffle=True, random_state=1)

    X = np.ones(300)

    all_folds = np.zeros(300)
    for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
            kf.split(X), kf2.split(X), kf3.split(X)):
        for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
            # Assert that there is no complete overlap
            assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1))

        # Set all test indices in successive iterations of kf2 to 1
        all_folds[te2] = 1

    # Check that all indices are returned in the different test folds
    assert_equal(sum(all_folds), 300) 
Example #7
Source File: test_split.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_stratified_shuffle_split_overlap_train_test_bug():
    # See https://github.com/scikit-learn/scikit-learn/issues/6121 for
    # the original bug report
    y = [0, 1, 2, 3] * 3 + [4, 5] * 5
    X = np.ones_like(y)

    sss = StratifiedShuffleSplit(n_splits=1,
                                 test_size=0.5, random_state=0)

    train, test = next(sss.split(X=X, y=y))

    # no overlap
    assert_array_equal(np.intersect1d(train, test), [])

    # complete partition
    assert_array_equal(np.union1d(train, test), np.arange(len(y))) 
Example #8
Source File: preprocess.py    From DeepLab_v3 with MIT License 6 votes vote down vote up
def voc2012_split(dataset_dir='data/datasets/VOCdevkit/VOC2012/', split_ratios=[0.7, 0.2, 0.1]):

    images_dir = os.path.join(dataset_dir, 'JPEGImages/')
    labels_dir = os.path.join(dataset_dir, 'SegmentationClass/')

    image_filenames = [filename.split('.')[0] for filename in os.listdir(images_dir) if os.path.isfile(os.path.join(images_dir, filename)) and filename.endswith('.jpg')]
    label_filenames = [filename.split('.')[0] for filename in os.listdir(labels_dir) if os.path.isfile(os.path.join(labels_dir, filename)) and filename.endswith('.png')]

    dataset_filenames = np.intersect1d(image_filenames, label_filenames)

    train_dataset_filename = os.path.join(dataset_dir, 'train.txt')
    valid_dataset_filename = os.path.join(dataset_dir, 'val.txt')
    test_dataset_filename = os.path.join(dataset_dir, 'test.txt')

    try:
        train_val_test_split(
            dataset_filenames=dataset_filenames,
            split_ratios=split_ratios,
            train_dataset_filename=train_dataset_filename,
            valid_dataset_filename=valid_dataset_filename,
            test_dataset_filename=test_dataset_filename)
    except BaseException:
        raise Exception('Dataset split failed.')

    return train_dataset_filename, valid_dataset_filename, test_dataset_filename 
Example #9
Source File: TargetList.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def main_sequence_filter(self):
        """Removes stars from Target List which are not main sequence
        
        """
        
        # indices from Target List to keep
        i1 = np.where((self.BV < 0.74) & (self.MV < 6*self.BV + 1.8))[0]
        i2 = np.where((self.BV >= 0.74) & (self.BV < 1.37) & \
                (self.MV < 4.3*self.BV + 3.05))[0]
        i3 = np.where((self.BV >= 1.37) & (self.MV < 18*self.BV - 15.7))[0]
        i4 = np.where((self.BV < 0.87) & (self.MV > -8*(self.BV - 1.35)**2 + 7.01))[0]
        i5 = np.where((self.BV >= 0.87) & (self.BV < 1.45) & \
                (self.MV < 5*self.BV + 0.81))[0]
        i6 = np.where((self.BV >= 1.45) & (self.MV > 18*self.BV - 18.04))[0]
        ia = np.append(np.append(i1, i2), i3)
        ib = np.append(np.append(i4, i5), i6)
        i = np.intersect1d(np.unique(ia), np.unique(ib))
        self.revise_lists(i) 
Example #10
Source File: test_split.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_stratified_shuffle_split_multilabel():
    # fix for issue 9037
    for y in [np.array([[0, 1], [1, 0], [1, 0], [0, 1]]),
              np.array([[0, 1], [1, 1], [1, 1], [0, 1]])]:
        X = np.ones_like(y)
        sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
        train, test = next(sss.split(X=X, y=y))
        y_train = y[train]
        y_test = y[test]

        # no overlap
        assert_array_equal(np.intersect1d(train, test), [])

        # complete partition
        assert_array_equal(np.union1d(train, test), np.arange(len(y)))

        # correct stratification of entire rows
        # (by design, here y[:, 0] uniquely determines the entire row of y)
        expected_ratio = np.mean(y[:, 0])
        assert_equal(expected_ratio, np.mean(y_train[:, 0]))
        assert_equal(expected_ratio, np.mean(y_test[:, 0])) 
Example #11
Source File: eval.py    From Pointnet2.ScanNet with MIT License 6 votes vote down vote up
def compute_miou(coords, preds, targets, weights):
    coords, preds, targets, weights = filter_points(coords, preds, targets, weights)
    seen_classes = np.unique(targets)
    mask = np.zeros(CONF.NUM_CLASSES)
    mask[seen_classes] = 1

    pointmiou = np.zeros(CONF.NUM_CLASSES)
    voxmiou = np.zeros(CONF.NUM_CLASSES)

    uvidx, uvlabel, _ = point_cloud_label_to_surface_voxel_label_fast(coords, np.concatenate((np.expand_dims(targets,1),np.expand_dims(preds,1)),axis=1), res=0.02)
    for l in seen_classes:
        target_label = np.arange(targets.shape[0])[targets==l]
        pred_label = np.arange(preds.shape[0])[preds==l]
        num_intersection_label = np.intersect1d(pred_label, target_label).shape[0]
        num_union_label = np.union1d(pred_label, target_label).shape[0]
        pointmiou[l] = num_intersection_label / (num_union_label + 1e-8)

        target_label_vox = uvidx[(uvlabel[:, 0] == l)]
        pred_label_vox = uvidx[(uvlabel[:, 1] == l)]
        num_intersection_label_vox = np.intersect1d(pred_label_vox, target_label_vox).shape[0]
        num_union_label_vox = np.union1d(pred_label_vox, target_label_vox).shape[0]
        voxmiou[l] = num_intersection_label_vox / (num_union_label_vox + 1e-8)

    return pointmiou, voxmiou, mask 
Example #12
Source File: task_generator.py    From cactus-maml with MIT License 6 votes vote down vote up
def get_partition_from_splits(self, splits):
        num_splits = len(splits)
        splits_per_partition = np.int(np.ceil(np.log2(self.num_classes)))

        num_failed = 0
        while True:
            which_splits = np.random.choice(num_splits, splits_per_partition, replace=False)
            splits_for_this_partition = [splits[i] for i in which_splits]
            partition = defaultdict(list)
            num_big_enough_classes = 0
            for i_class, above_or_belows in enumerate(product([0, 1], repeat=splits_per_partition)):
                zones = [splits_for_this_partition[i][above_or_belows[i]] for i in range(splits_per_partition)]
                indices = reduce(np.intersect1d, zones)
                if len(indices) >= self.num_samples_per_class:
                    num_big_enough_classes += 1
                    partition[i_class].extend(indices.tolist())
            if num_big_enough_classes >= self.num_classes:
                break
            else:
                num_failed += 1
        return partition, num_failed 
Example #13
Source File: test_numeric.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_intersection(self):
        other = Index([1, 2, 3, 4, 5])
        result = self.index.intersection(other)
        expected = Index(np.sort(np.intersect1d(self.index.values,
                                                other.values)))
        tm.assert_index_equal(result, expected)

        result = other.intersection(self.index)
        expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values,
                                                           other.values))))
        tm.assert_index_equal(result, expected) 
Example #14
Source File: utils.py    From SetSimilaritySearch with Apache License 2.0 5 votes vote down vote up
def _cosine(s1, s2):
    i = len(np.intersect1d(s1, s2, assume_unique=True))
    return float(i) / np.sqrt(float(len(s1)*len(s2))) 
Example #15
Source File: utils.py    From SetSimilaritySearch with Apache License 2.0 5 votes vote down vote up
def _containment(s1, s2):
    i = len(np.intersect1d(s1, s2, assume_unique=True))
    return float(i) / float(len(s1)) 
Example #16
Source File: utils.py    From SetSimilaritySearch with Apache License 2.0 5 votes vote down vote up
def _jaccard(s1, s2):
    i = len(np.intersect1d(s1, s2, assume_unique=True))
    return float(i) / float(len(s1) + len(s2) - i) 
Example #17
Source File: utils.py    From SetSimilaritySearch with Apache License 2.0 5 votes vote down vote up
def _containment_min(s1, s2):
    i = len(np.intersect1d(s1, s2, assume_unique=True))
    return (float(i)) / (float(max(len(s1), len(s2)))) 
Example #18
Source File: neighbor_index.py    From ibeis with Apache License 2.0 5 votes vote down vote up
def get_removed_idxs(nnindexer):
        r"""
        __removed_ids = nnindexer.flann._FLANN__removed_ids
        invalid_idxs = nnindexer.get_removed_idxs()
        assert len(np.intersect1d(invalid_idxs, __removed_ids)) == len(__removed_ids)
        """
        invalid_idxs = np.nonzero(nnindexer.ax2_aid[nnindexer.idx2_ax] == -1)[0]
        return invalid_idxs 
Example #19
Source File: arraysetops.py    From Computable with MIT License 5 votes vote down vote up
def intersect1d(ar1, ar2, assume_unique=False):
    """
    Find the intersection of two arrays.

    Return the sorted, unique values that are in both of the input arrays.

    Parameters
    ----------
    ar1, ar2 : array_like
        Input arrays.
    assume_unique : bool
        If True, the input arrays are both assumed to be unique, which
        can speed up the calculation.  Default is False.

    Returns
    -------
    intersect1d : ndarray
        Sorted 1D array of common and unique elements.

    See Also
    --------
    numpy.lib.arraysetops : Module with a number of other functions for
                            performing set operations on arrays.

    Examples
    --------
    >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
    array([1, 3])

    """
    if not assume_unique:
        # Might be faster than unique( intersect1d( ar1, ar2 ) )?
        ar1 = unique(ar1)
        ar2 = unique(ar2)
    aux = np.concatenate( (ar1, ar2) )
    aux.sort()
    return aux[:-1][aux[1:] == aux[:-1]] 
Example #20
Source File: test_split.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_group_shuffle_split():
    for groups_i in test_groups:
        X = y = np.ones(len(groups_i))
        n_splits = 6
        test_size = 1. / 3
        slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0)

        # Make sure the repr works
        repr(slo)

        # Test that the length is correct
        assert_equal(slo.get_n_splits(X, y, groups=groups_i), n_splits)

        l_unique = np.unique(groups_i)
        l = np.asarray(groups_i)

        for train, test in slo.split(X, y, groups=groups_i):
            # First test: no train group is in the test set and vice versa
            l_train_unique = np.unique(l[train])
            l_test_unique = np.unique(l[test])
            assert not np.any(np.in1d(l[train], l_test_unique))
            assert not np.any(np.in1d(l[test], l_train_unique))

            # Second test: train and test add up to all the data
            assert_equal(l[train].size + l[test].size, l.size)

            # Third test: train and test are disjoint
            assert_array_equal(np.intersect1d(train, test), [])

            # Fourth test:
            # unique train and test groups are correct, +- 1 for rounding error
            assert abs(len(l_test_unique) -
                       round(test_size * len(l_unique))) <= 1
            assert abs(len(l_train_unique) -
                       round((1.0 - test_size) * len(l_unique))) <= 1 
Example #21
Source File: test_numeric.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_intersection(self):
        other = Index([2**63, 2**63 + 5, 2**63 + 10, 2**63 + 15, 2**63 + 20])
        result = self.index.intersection(other)
        expected = Index(np.sort(np.intersect1d(self.index.values,
                                                other.values)))
        tm.assert_index_equal(result, expected)

        result = other.intersection(self.index)
        expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values,
                                                           other.values))))
        tm.assert_index_equal(result, expected) 
Example #22
Source File: dukemtmcreid.py    From person-reid-lib with MIT License 5 votes vote down vote up
def _get_dict(self):
        img_list = []
        train_images, train_img_dir = self._process_dir(self.train_dir, 0)
        img_list = img_list + train_img_dir
        probe_images, probe_img_dir = self._process_dir(self.query_dir, len(img_list))
        img_list = img_list + probe_img_dir
        gallery_images, gallery_img_dir = self._process_dir(self.gallery_dir, len(img_list))
        img_list = img_list + gallery_img_dir

        train_id = np.unique(train_images[:, 0])
        probe_id = np.unique(probe_images[:, 0])
        gallery_id = np.unique(gallery_images[:, 0])
        assert np.intersect1d(train_id, probe_id).size == 0
        assert np.intersect1d(probe_id, gallery_id).size == probe_id.size
        assert gallery_images[-1, 3] == len(img_list)

        data_dict = {}
        data_dict['dir'] = img_list

        data_split = {}
        data_split['train'] = train_images
        data_split['probe'] = probe_images
        data_split['gallery'] = gallery_images
        data_split['info'] = 'DukeMTMC-reID dataset. Split ID {:2d}.'.format(0)

        data_dict['split'] = [data_split]
        data_dict['info'] = 'DukeMTMCreID Dataset. One Split'

        return data_dict 
Example #23
Source File: cuhk01.py    From person-reid-lib with MIT License 5 votes vote down vote up
def _get_dict(self):
        img_list = sorted(glob.glob(osp.join(self.raw_data_folder, '*.png')))
        images_info = []

        for idx, img_path in enumerate(img_list):
            img_name = osp.basename(img_path)
            pid = int(img_name[:4]) - 1
            camid = (int(img_name[4:7]) - 1) // 2
            images_info.append([pid, camid, idx, idx+1, 1])

        images_info = np.asarray(images_info, dtype=np.int64)
        splits = self._create_split(images_info)

        data_dict = {}
        data_dict['dir'] = img_list
        data_splits = []

        for split_id, split_i in enumerate(splits):
            data_split = {}
            train_idx = split_i['train']
            test_idx = split_i['test']
            data_split['train'] = np_filter(images_info, train_idx)
            data_split['probe'] = np_filter(images_info, test_idx)
            data_split['gallery'] = np_filter(images_info, test_idx)
            data_split['info'] = 'CUHK01 dataset. Split ID {:2d}'.format(split_id)

            probe_id = np.unique(data_split['probe'][:, 0])
            gallery_id = np.unique(data_split['gallery'][:, 0])

            assert np.intersect1d(probe_id, gallery_id).size == probe_id.size
            assert probe_id.size == gallery_id.size
            assert data_split['probe'].shape == data_split['gallery'].shape

            data_splits.append(data_split)

        data_dict['split'] = data_splits
        data_dict['info'] = 'CUHK01 Dataset. 10 Splits.'

        return data_dict 
Example #24
Source File: market1501.py    From person-reid-lib with MIT License 5 votes vote down vote up
def _get_dict(self):

        img_list = []
        train_images, train_img_dir = self._process_dir(self.train_dir, 0)
        img_list = img_list + train_img_dir
        probe_images, probe_img_dir = self._process_dir(self.query_dir, len(img_list))
        img_list = img_list + probe_img_dir
        gallery_images, gallery_img_dir = self._process_dir(self.gallery_dir, len(img_list))
        img_list = img_list + gallery_img_dir

        train_id = np.unique(train_images[:, 0])
        probe_id = np.unique(probe_images[:, 0])
        gallery_id = np.unique(gallery_images[:, 0])
        assert np.intersect1d(train_id, probe_id).size == 0
        assert np.intersect1d(probe_id, gallery_id).size == probe_id.size
        assert gallery_images[-1, 3] == len(img_list)

        data_dict = {}
        data_dict['dir'] = img_list

        data_split = {}
        data_split['train'] = train_images
        data_split['probe'] = probe_images
        data_split['gallery'] = gallery_images
        data_split['info'] = 'Market1501 dataset. Split ID {:2d}. Remove Junk Images'.format(0)

        data_dict['split'] = [data_split]
        data_dict['info'] = 'Market1501 Dataset. Remove Junk Images'

        return data_dict 
Example #25
Source File: dukemtmcvidreid.py    From person-reid-lib with MIT License 5 votes vote down vote up
def _get_dict(self):

        train_images = self._process_dir(self.train_dir)
        probe_images = self._process_dir(self.query_dir)
        gallery_images = self._process_dir(self.gallery_dir)

        img_list, train_info, probe_info, gallery_info = self._process_split_info(train_images, probe_images, gallery_images)

        train_id = np.unique(train_info[:, 0])
        probe_id = np.unique(probe_info[:, 0])
        gallery_id = np.unique(gallery_info[:, 0])
        assert np.intersect1d(train_id, probe_id).size == 0
        assert np.intersect1d(probe_id, gallery_id).size == probe_id.size
        assert gallery_info[-1, 3] == len(img_list)

        data_dict = {}
        data_dict['dir'] = img_list

        data_split = {}
        data_split['train'] = train_info
        data_split['probe'] = probe_info
        data_split['gallery'] = gallery_info
        data_split['info'] = 'DukeMTMC-VideoReID. Split ID {:2d}.'.format(0)

        data_dict['split'] = [data_split]
        data_dict['info'] = 'DukeMTMC-VideoReID Dataset. One split.'

        return data_dict 
Example #26
Source File: response_matrix.py    From ocelot with GNU General Public License v3.0 5 votes vote down vote up
def compare(self, rmatrix, absolut=0.001, relative=0.1):
        cors1 = np.array(self.cor_names)
        cors2 = np.array(rmatrix.cor_names)
        bpms1 = np.array(self.bpm_names)
        bpms2 = np.array(rmatrix.bpm_names)
        nb1 = len(bpms1)
        nb2 = len(bpms2)
        #c_names = np.intersect1d(cors1, cors2)
        c_names = cors1[np.in1d(cors1, cors2)]
        c_i1 = np.where(np.in1d(cors1, cors2))[0]
        c_i2 = np.where(np.in1d(cors2, cors1))[0]
        #b_names = np.intersect1d(bpms1, bpms2)
        b_names = bpms1[np.in1d(bpms1, bpms2)]
        b_i1 = np.where(np.in1d(bpms1, bpms2))[0]
        b_i2 = np.where(np.in1d(bpms2, bpms1))[0]
        plane = ["X", "Y"]
        for n in range(2):
            print ("****************   ", plane[n], "   ****************")
            counter = 0
            for i, c in enumerate(c_names):
                for j, b in enumerate(b_names):
                    #print b_i1[j],  nb1*n, c_i1[i]
                    x1 = self.matrix[b_i1[j] + nb1*n, c_i1[i]]
                    x2 = rmatrix.matrix[b_i2[j] + nb2*n, c_i2[i]]
                    if abs(x1 - x2) < absolut:
                        continue
                    if abs(x1 - x2)/max(np.abs([x1, x2])) < relative:
                        continue
                    l_x1 = len(str(x1))
                    print (plane[n], c, " "*(10 - len(c)), b, " "*(10 - len(b)), "r1: ", x1," "*(18 - l_x1),"r2: ", x2)
                    counter += 1
            print("shown", counter, "elements of", len(c_names)*len(b_names)) 
Example #27
Source File: response_matrix.py    From ocelot with GNU General Public License v3.0 5 votes vote down vote up
def compare(self, rmatrix, absolut = 0.001, relative = 0.1):
        cors1 = np.array(self.cor_names)
        cors2 = np.array(rmatrix.cor_names)
        bpms1 = np.array(self.bpm_names)
        bpms2 = np.array(rmatrix.bpm_names)
        nb1 = len(bpms1)
        nb2 = len(bpms2)
        #c_names = np.intersect1d(cors1, cors2)
        c_names = cors1[np.in1d(cors1, cors2)]
        c_i1 = np.where(np.in1d(cors1, cors2))[0]
        c_i2 = np.where(np.in1d(cors2, cors1))[0]
        #b_names = np.intersect1d(bpms1, bpms2)
        b_names = bpms1[np.in1d(bpms1, bpms2)]
        b_i1 = np.where(np.in1d(bpms1, bpms2))[0]
        b_i2 = np.where(np.in1d(bpms2, bpms1))[0]
        plane = ["X", "Y"]
        for n in range(2):
            print ("****************   ", plane[n], "   ****************")
            counter = 0
            for i, c in enumerate(c_names):
                for j, b in enumerate(b_names):
                    #print b_i1[j],  nb1*n, c_i1[i]
                    x1 = self.matrix[b_i1[j] + nb1*n, c_i1[i]]
                    x2 = rmatrix.matrix[b_i2[j] + nb2*n, c_i2[i]]
                    if abs(x1 - x2) <absolut:
                        continue
                    if abs(x1 - x2)/max(np.abs([x1, x2])) < relative:
                        continue
                    l_x1 = len(str(x1))
                    print (plane[n], c, " "*(10 - len(c)), b, " "*(10 - len(b)), "r1: ", x1," "*(18 - l_x1),"r2: ", x2)
                    counter += 1
            print("shown", counter, "elements of", len(c_names)*len(b_names)) 
Example #28
Source File: eval_metrics.py    From TKP with Apache License 2.0 5 votes vote down vote up
def evaluate(distmat, q_pids, g_pids, q_camids, g_camids):
    num_q, num_g = distmat.shape
    index = np.argsort(distmat, axis=1) # from small to large

    num_no_gt = 0 # num of query imgs without groundtruth
    num_r1 = 0
    CMC = np.zeros(len(g_pids))
    AP = 0

    for i in range(num_q):
        # groundtruth index
        query_index = np.argwhere(g_pids==q_pids[i])
        camera_index = np.argwhere(g_camids==q_camids[i])
        good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
        if good_index.size == 0:
            num_no_gt += 1
            continue
        # remove gallery samples that have the same pid and camid with query
        junk_index = np.intersect1d(query_index, camera_index)

        ap_tmp, CMC_tmp = compute_ap_cmc(index[i], good_index, junk_index)
        if CMC_tmp[0]==1:
            num_r1 += 1
        CMC = CMC + CMC_tmp
        AP += ap_tmp

    if num_no_gt > 0:
        print("{} query imgs do not have groundtruth.".format(num_no_gt))

    # print("R1:{}".format(num_r1))

    CMC = CMC / (num_q - num_no_gt)
    mAP = AP / (num_q - num_no_gt)

    return CMC, mAP 
Example #29
Source File: util.py    From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def intersect_sim(array_1, array_2):
    """Calculate the simiarity of two arrays
    by using intersection / union
    """
    sim = float(np.intersect1d(array_1, array_2).size) / \
        float(np.union1d(array_1, array_2).size)
    return sim 
Example #30
Source File: index_operations.py    From paramz with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def remove(self, prop, indices):
        if prop in self._properties:
            diff = remove_indices(self[prop], indices)
            removed = numpy.intersect1d(self[prop], indices, True)
            if not index_empty(diff):
                self._properties[prop] = diff
            else:
                del self._properties[prop]
            return removed.astype(int)
        return numpy.array([]).astype(int)