Python numpy.intersect1d() Examples

The following are 30 code examples for showing how to use numpy.intersect1d(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: EXOSIMS   Author: dsavransky   File: TargetList.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def main_sequence_filter(self):
        """Removes stars from Target List which are not main sequence
        
        """
        
        # indices from Target List to keep
        i1 = np.where((self.BV < 0.74) & (self.MV < 6*self.BV + 1.8))[0]
        i2 = np.where((self.BV >= 0.74) & (self.BV < 1.37) & \
                (self.MV < 4.3*self.BV + 3.05))[0]
        i3 = np.where((self.BV >= 1.37) & (self.MV < 18*self.BV - 15.7))[0]
        i4 = np.where((self.BV < 0.87) & (self.MV > -8*(self.BV - 1.35)**2 + 7.01))[0]
        i5 = np.where((self.BV >= 0.87) & (self.BV < 1.45) & \
                (self.MV < 5*self.BV + 0.81))[0]
        i6 = np.where((self.BV >= 1.45) & (self.MV > 18*self.BV - 18.04))[0]
        ia = np.append(np.append(i1, i2), i3)
        ib = np.append(np.append(i4, i5), i6)
        i = np.intersect1d(np.unique(ia), np.unique(ib))
        self.revise_lists(i) 
Example 2
Project: DeepLab_v3   Author: leimao   File: preprocess.py    License: MIT License 6 votes vote down vote up
def voc2012_split(dataset_dir='data/datasets/VOCdevkit/VOC2012/', split_ratios=[0.7, 0.2, 0.1]):

    images_dir = os.path.join(dataset_dir, 'JPEGImages/')
    labels_dir = os.path.join(dataset_dir, 'SegmentationClass/')

    image_filenames = [filename.split('.')[0] for filename in os.listdir(images_dir) if os.path.isfile(os.path.join(images_dir, filename)) and filename.endswith('.jpg')]
    label_filenames = [filename.split('.')[0] for filename in os.listdir(labels_dir) if os.path.isfile(os.path.join(labels_dir, filename)) and filename.endswith('.png')]

    dataset_filenames = np.intersect1d(image_filenames, label_filenames)

    train_dataset_filename = os.path.join(dataset_dir, 'train.txt')
    valid_dataset_filename = os.path.join(dataset_dir, 'val.txt')
    test_dataset_filename = os.path.join(dataset_dir, 'test.txt')

    try:
        train_val_test_split(
            dataset_filenames=dataset_filenames,
            split_ratios=split_ratios,
            train_dataset_filename=train_dataset_filename,
            valid_dataset_filename=valid_dataset_filename,
            test_dataset_filename=test_dataset_filename)
    except BaseException:
        raise Exception('Dataset split failed.')

    return train_dataset_filename, valid_dataset_filename, test_dataset_filename 
Example 3
Project: pyscf   Author: pyscf   File: mf.py    License: Apache License 2.0 6 votes vote down vote up
def nonin_osc_strength(self):
    from scipy.sparse import spmatrix 
    """ Computes the non-interacting oscillator strengths and energies """

    x,y,z = map(spmatrix.toarray, self.dipole_coo())
    i2d = array((x,y,z))
    n = self.mo_occ.shape[-1]
    
    p = zeros((len(comega)), dtype=np.complex128) # result to accumulate
    
    for s in range(self.nspin):
      o,e,cc = self.mo_occ[0,s],self.mo_energy[0,s],self.mo_coeff[0,s,:,:,0]
      oo1,ee1 = np.subtract.outer(o,o).reshape(n*n), np.subtract.outer(e,e).reshape(n*n)
      idx = unravel_index( np.intersect1d(where(oo1<0.0), where(ee1<eemax)), (n,n))
      ivrt,iocc = array(list(set(idx[0]))), array(list(set(idx[1])))
      voi2d = einsum('nia,ma->nmi', einsum('iab,nb->nia', i2d, cc[ivrt]), cc[iocc])
      t2osc = 2.0/3.0*einsum('voi,voi->vo', voi2d, voi2d)
      t2w =  np.subtract.outer(e[ivrt],e[iocc])
      t2o = -np.subtract.outer(o[ivrt],o[iocc])

      for iw,w in enumerate(comega):
        p[iw] += 0.5*(t2osc*((t2o/(w-t2w))-(t2o/(w+t2w)))).sum()      
    return p 
Example 4
Project: NeuroKit   Author: neuropsychology   File: ecg_rsa.py    License: MIT License 6 votes vote down vote up
def _ecg_rsa_cycles(signals):
    """Extract respiratory cycles."""
    inspiration_onsets = np.intersect1d(
        np.where(signals["RSP_Phase"] == 1)[0], np.where(signals["RSP_Phase_Completion"] == 0)[0], assume_unique=True
    )

    expiration_onsets = np.intersect1d(
        np.where(signals["RSP_Phase"] == 0)[0], np.where(signals["RSP_Phase_Completion"] == 0)[0], assume_unique=True
    )

    cycles_length = np.diff(inspiration_onsets)

    return {
        "RSP_Inspiration_Onsets": inspiration_onsets,
        "RSP_Expiration_Onsets": expiration_onsets,
        "RSP_Cycles_Length": cycles_length,
    } 
Example 5
Project: cactus-maml   Author: kylehkhsu   File: task_generator.py    License: MIT License 6 votes vote down vote up
def get_partition_from_splits(self, splits):
        num_splits = len(splits)
        splits_per_partition = np.int(np.ceil(np.log2(self.num_classes)))

        num_failed = 0
        while True:
            which_splits = np.random.choice(num_splits, splits_per_partition, replace=False)
            splits_for_this_partition = [splits[i] for i in which_splits]
            partition = defaultdict(list)
            num_big_enough_classes = 0
            for i_class, above_or_belows in enumerate(product([0, 1], repeat=splits_per_partition)):
                zones = [splits_for_this_partition[i][above_or_belows[i]] for i in range(splits_per_partition)]
                indices = reduce(np.intersect1d, zones)
                if len(indices) >= self.num_samples_per_class:
                    num_big_enough_classes += 1
                    partition[i_class].extend(indices.tolist())
            if num_big_enough_classes >= self.num_classes:
                break
            else:
                num_failed += 1
        return partition, num_failed 
Example 6
Project: Pointnet2.ScanNet   Author: daveredrum   File: eval.py    License: MIT License 6 votes vote down vote up
def compute_miou(coords, preds, targets, weights):
    coords, preds, targets, weights = filter_points(coords, preds, targets, weights)
    seen_classes = np.unique(targets)
    mask = np.zeros(CONF.NUM_CLASSES)
    mask[seen_classes] = 1

    pointmiou = np.zeros(CONF.NUM_CLASSES)
    voxmiou = np.zeros(CONF.NUM_CLASSES)

    uvidx, uvlabel, _ = point_cloud_label_to_surface_voxel_label_fast(coords, np.concatenate((np.expand_dims(targets,1),np.expand_dims(preds,1)),axis=1), res=0.02)
    for l in seen_classes:
        target_label = np.arange(targets.shape[0])[targets==l]
        pred_label = np.arange(preds.shape[0])[preds==l]
        num_intersection_label = np.intersect1d(pred_label, target_label).shape[0]
        num_union_label = np.union1d(pred_label, target_label).shape[0]
        pointmiou[l] = num_intersection_label / (num_union_label + 1e-8)

        target_label_vox = uvidx[(uvlabel[:, 0] == l)]
        pred_label_vox = uvidx[(uvlabel[:, 1] == l)]
        num_intersection_label_vox = np.intersect1d(pred_label_vox, target_label_vox).shape[0]
        num_union_label_vox = np.union1d(pred_label_vox, target_label_vox).shape[0]
        voxmiou[l] = num_intersection_label_vox / (num_union_label_vox + 1e-8)

    return pointmiou, voxmiou, mask 
Example 7
Project: TheCannon   Author: annayqho   File: pull_data.py    License: MIT License 6 votes vote down vote up
def find_colors(ref_id, ref_flux, ref_ivar):
    # Find colors
    print("Finding colors")
    a = pyfits.open(DATA_DIR + "/lamost_catalog_colors.fits")
    data = a[1].data
    a.close()
    all_ids = data['LAMOST_ID_1']
    all_ids = np.array([val.strip() for val in all_ids])
    ref_id_col = np.intersect1d(all_ids, ref_id)
    inds = np.array([np.where(all_ids==val)[0][0] for val in ref_id_col])
    all_col, all_col_ivar = get_colors(
            DATA_DIR + "/lamost_catalog_colors.fits")
    col = all_col[:,inds]
    col_ivar = all_col_ivar[:,inds]
    bad_ivar = np.logical_or(np.isnan(col_ivar), col_ivar==np.inf)
    col_ivar[bad_ivar] = 0.0
    bad_flux = np.logical_or(np.isnan(col), col==np.inf)
    col[bad_flux] = 1.0
    col_ivar[bad_flux] = 0.0
    # add them to the wl, flux and ivar arrays
    inds = np.array([np.where(ref_id==val)[0][0] for val in ref_id_col])
    ref_flux_col = np.hstack((ref_flux[inds], col.T))
    ref_ivar_col = np.hstack((ref_ivar[inds], col_ivar.T))
    return ref_id_col, ref_flux_col, ref_ivar_col 
Example 8
Project: TheCannon   Author: annayqho   File: pull_data.py    License: MIT License 6 votes vote down vote up
def find_colors(ref_id, ref_flux, ref_ivar):
    # Find colors
    DATA_DIR = "/Users/annaho/Data/LAMOST/Mass_And_Age"
    print("Finding colors")
    a = pyfits.open(DATA_DIR + "/lamost_catalog_colors.fits")
    data = a[1].data
    a.close()
    all_ids = data['LAMOST_ID_1']
    all_ids = np.array([val.strip() for val in all_ids])
    ref_id_col = np.intersect1d(all_ids, ref_id)
    inds = np.array([np.where(all_ids==val)[0][0] for val in ref_id_col])
    all_id, all_col, all_col_ivar = get_colors(
            DATA_DIR + "/lamost_catalog_colors.fits")
    col = all_col[:,inds]
    col_ivar = all_col_ivar[:,inds]
    bad_ivar = np.logical_or(np.isnan(col_ivar), col_ivar==np.inf)
    col_ivar[bad_ivar] = 0.0
    bad_flux = np.logical_or(np.isnan(col), col==np.inf)
    col[bad_flux] = 1.0
    col_ivar[bad_flux] = 0.0
    # add them to the wl, flux and ivar arrays
    inds = np.array([np.where(ref_id==val)[0][0] for val in ref_id_col])
    ref_flux_col = np.hstack((ref_flux[inds], col.T))
    ref_ivar_col = np.hstack((ref_ivar[inds], col_ivar.T))
    return ref_id_col, ref_flux_col, ref_ivar_col 
Example 9
Project: sunets   Author: shahsohil   File: coco_loader.py    License: MIT License 6 votes vote down vote up
def filtertraindata(self):
        datapath = get_data_path('coco')
        train_list = tuple(open(datapath + 'annotations/train2014.txt', 'r'))
        val_list = tuple(open(datapath + 'annotations/val2014.txt', 'r'))
        total_list = ['/train2014/'+id_.rstrip() for id_ in train_list] + ['/val2014/'+id_.rstrip() for id_ in val_list]

        annotation_path = os.path.join(datapath, 'seg_mask')
        aug_list = []
        for filename in total_list:
            lbl_path = annotation_path + filename + '.png'
            lbl = Image.open(lbl_path).convert('P')
            lbl = np.array(lbl, dtype=np.int32)
            if np.sum(pascal_map[lbl] != 0) > 1000 and np.intersect1d(np.unique(lbl),pascal_classes).any():
                aug_list.append(filename)

        val_aug_list = random.sample(aug_list, 1500)
        train_aug_list = list(set(aug_list) - set(val_aug_list))
        with open(os.path.join(datapath, 'annotations', 'train_aug.txt'), 'w') as txtfile:
            [txtfile.write(file + '\n') for file in train_aug_list]
        with open(os.path.join(datapath, 'annotations', 'val.txt'), 'w') as txtfile:
            [txtfile.write(file + '\n') for file in val_aug_list] 
Example 10
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_split.py    License: MIT License 6 votes vote down vote up
def test_shuffle_kfold():
    # Check the indices are shuffled properly
    kf = KFold(3)
    kf2 = KFold(3, shuffle=True, random_state=0)
    kf3 = KFold(3, shuffle=True, random_state=1)

    X = np.ones(300)

    all_folds = np.zeros(300)
    for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
            kf.split(X), kf2.split(X), kf3.split(X)):
        for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
            # Assert that there is no complete overlap
            assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1))

        # Set all test indices in successive iterations of kf2 to 1
        all_folds[te2] = 1

    # Check that all indices are returned in the different test folds
    assert_equal(sum(all_folds), 300) 
Example 11
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_split.py    License: MIT License 6 votes vote down vote up
def test_stratified_shuffle_split_overlap_train_test_bug():
    # See https://github.com/scikit-learn/scikit-learn/issues/6121 for
    # the original bug report
    y = [0, 1, 2, 3] * 3 + [4, 5] * 5
    X = np.ones_like(y)

    sss = StratifiedShuffleSplit(n_splits=1,
                                 test_size=0.5, random_state=0)

    train, test = next(sss.split(X=X, y=y))

    # no overlap
    assert_array_equal(np.intersect1d(train, test), [])

    # complete partition
    assert_array_equal(np.union1d(train, test), np.arange(len(y))) 
Example 12
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_split.py    License: MIT License 6 votes vote down vote up
def test_stratified_shuffle_split_multilabel():
    # fix for issue 9037
    for y in [np.array([[0, 1], [1, 0], [1, 0], [0, 1]]),
              np.array([[0, 1], [1, 1], [1, 1], [0, 1]])]:
        X = np.ones_like(y)
        sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
        train, test = next(sss.split(X=X, y=y))
        y_train = y[train]
        y_test = y[test]

        # no overlap
        assert_array_equal(np.intersect1d(train, test), [])

        # complete partition
        assert_array_equal(np.union1d(train, test), np.arange(len(y)))

        # correct stratification of entire rows
        # (by design, here y[:, 0] uniquely determines the entire row of y)
        expected_ratio = np.mean(y[:, 0])
        assert_equal(expected_ratio, np.mean(y_train[:, 0]))
        assert_equal(expected_ratio, np.mean(y_test[:, 0])) 
Example 13
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: util.py    License: BSD 2-Clause "Simplified" License 5 votes vote down vote up
def intersect_sim(array_1, array_2):
    """Calculate the simiarity of two arrays
    by using intersection / union
    """
    sim = float(np.intersect1d(array_1, array_2).size) / \
        float(np.union1d(array_1, array_2).size)
    return sim 
Example 14
Project: neuropythy   Author: noahbenson   File: retinotopy.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def fit_pRF_radius(ctx, retinotopy=Ellipsis, mask=None, weight=Ellipsis, slope_only=False):
    '''
    fit_pRF_radius(ctx) fits a line, m*eccen + b, to the pRF radius and yields the tuple (m, b).

    The following options may be given:
      * retinotopy (default: Ellipsis) specifies the prefix for the retinotopy (passed to
        retinotopy_data() to find the retinotopic dataset).
      * mask (default: None) specifies the mask over which to perform the calculation. This is
        passed to the to_mask() function. In the case that mask is a set or frozenset, then it is
        treated as a conjunction (intersection) of masks.
      * weight (default: None) specifies that a weight should be used; if this is True or Ellipsis,
        will use the variance_explained if it is part of the retinotopy dataset; if this is False or
        None, uses no weight; otherwise, this must be a weight property or property name.
      * slope_only (default: False) may be set to True to instead fit radius = m*eccen and return
        only m.
    '''
    rdat = retinotopy_data(ctx, retinotopy)
    if 'radius' not in rdat: raise ValueError('No pRF radius found in dataset %s' % retinotopy)
    rad = rdat['radius']
    (ang,ecc) = as_retinotopy(rdat, 'visual')
    if isinstance(mask, (set, frozenset)):
        mask = reduce(np.intersect1d, [ctx.mask(m, indices=True) for m in mask])
    else: mask = ctx.mask(mask, indices=True)
    # get a weight if provided:
    if weight in [False, None]: wgt = np.ones(rad.shape)
    elif weight in [True, Ellipsis]:
        if 'variance_explained' in rdat: wgt = rdat['variance_explained']
        else: wgt = np.ones(rad.shape)
    else: wgt = ctx.property(weight)
    # get the relevant eccen and radius values
    (ecc,rad,wgt) = [x[mask] for x in (ecc,rad,wgt)]
    # fit a line...
    if slope_only:
        ecc = np.reshape(ecc * wgt, (len(ecc), 1))
        rad = np.reshape(rad * wgt, (len(rad), 1))
        return np.linalg.lstsq(ecc, rad)[0]
    else:
        return tuple(np.polyfit(ecc, rad, 1, w=wgt)) 
Example 15
Project: EXOSIMS   Author: dsavransky   File: randomWalkScheduler2.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def choose_next_target(self, old_sInd, sInds, slewTimes, intTimes):
        """Choose next target at random
        
        Args:
            old_sInd (integer):
                Index of the previous target star
            sInds (integer array):
                Indices of available targets
            slewTimes (astropy quantity array):
                slew times to all stars (must be indexed by sInds)
            intTimes (astropy Quantity array):
                Integration times for detection in units of day
        
        Returns:
            sInd (integer):
                Index of next target star
        
        """
        
        TL = self.TargetList

        # cast sInds to array
        sInds = np.array(sInds, ndmin=1, copy=False)
        occ_sInds = np.where(np.in1d(TL.Name, self.occHIPs))[0]
        n_sInds = np.intersect1d(sInds, occ_sInds)
        
        # pick one
        if len(n_sInds) == 0:
            sInd = np.random.choice(sInds)
        else:
            sInd = np.random.choice(n_sInds)
        
        return sInd, slewTimes[sInd] 
Example 16
Project: EXOSIMS   Author: dsavransky   File: SS_char_only2.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def choose_next_target(self, old_sInd, sInds, slewTimes, intTimes):
        """Choose next target at random
        
        Args:
            old_sInd (integer):
                Index of the previous target star
            sInds (integer array):
                Indices of available targets
            slewTimes (astropy quantity array):
                slew times to all stars (must be indexed by sInds)
            intTimes (astropy Quantity array):
                Integration times for detection in units of day
        
        Returns:
            sInd (integer):
                Index of next target star
        
        """
        
        TL = self.TargetList

        # cast sInds to array
        sInds = np.array(sInds, ndmin=1, copy=False)
        occ_sInds = np.where(np.in1d(TL.Name, self.occHIPs))[0]
        n_sInds = np.intersect1d(sInds, occ_sInds)
        
        # pick one
        if len(n_sInds) == 0:
            sInd = np.random.choice(sInds)
        else:
            sInd = np.random.choice(n_sInds)
        
        return sInd 
Example 17
Project: pyscf   Author: pyscf   File: mf.py    License: Apache License 2.0 5 votes vote down vote up
def polariz_nonin_ave_matelem(self, comega):
    from scipy.sparse import spmatrix 
    """ Computes the non-interacting optical polarizability via the dipole matrix elements."""

    x,y,z = map(spmatrix.toarray, self.dipole_coo())
    i2d = array((x,y,z))
    n = self.mo_occ.shape[-1]
    eemax = max(comega.real)+20.0*max(comega.imag)
    
    p = zeros((len(comega)), dtype=np.complex128) # result to accumulate

    #print(__name__, 'Fermi energy', self.fermi_energy)
    #np.set_printoptions(linewidth=1000)
    for s in range(self.nspin):
      o,e,cc = self.mo_occ[0,s],self.mo_energy[0,s],self.mo_coeff[0,s,:,:,0]
      #print(o[:10])
      #print(e[:10])

      oo1,ee1 = np.subtract.outer(o,o).reshape(n*n), np.subtract.outer(e,e).reshape(n*n)
      idx = unravel_index( np.intersect1d(where(oo1<0.0), where(ee1<eemax)), (n,n))
      ivrt,iocc = array(list(set(idx[0]))), array(list(set(idx[1])))
      voi2d = einsum('nia,ma->nmi', einsum('iab,nb->nia', i2d, cc[ivrt]), cc[iocc])
      t2osc = 2.0/3.0*einsum('voi,voi->vo', voi2d, voi2d)
      t2w =  np.subtract.outer(e[ivrt],e[iocc])
      t2o = -np.subtract.outer(o[ivrt],o[iocc])

      for iw,w in enumerate(comega):
        p[iw] += 0.5*(t2osc*((t2o/(w-t2w))-(t2o/(w+t2w)))).sum()
      
    return p 
Example 18
Project: ciftify   Author: edickie   File: report.py    License: MIT License 5 votes vote down vote up
def get_overlaping_idx(clust_id1, clust_atlas1, clust_id2, clust_atlas2):
    '''
    find the indices that overlap for two labels across two maps
    '''
    label1_idx = get_cluster_indices(int(clust_id1), clust_atlas1)
    label2_idx = get_cluster_indices(int(clust_id2), clust_atlas2)
    overlap_idx = np.intersect1d(label1_idx,label2_idx)
    return(overlap_idx) 
Example 19
Project: ciftify   Author: edickie   File: ciftify_peaktable.py    License: MIT License 5 votes vote down vote up
def calc_atlas_overlap(df, wb_structure, clust_label_array, surf_va, atlas_settings):
    '''
    calculates the surface area column of the peaks table
    needs hemisphere specific inputs
    '''

    ## load atlas
    atlas_label_array, atlas_df = load_hemisphere_labels(atlas_settings['path'],
                                                       wb_structure,
                                                       map_number = atlas_settings['map_number'])
    atlas_prefix = atlas_settings['name']

    ## create new cols to hold the data
    df[atlas_prefix] = pd.Series('not_calculated', index = df.index)
    overlap_col = '{}_overlap'.format(atlas_prefix)
    df[overlap_col] = pd.Series(-99.0, index = df.index)

    for pd_idx in df.index.tolist():
        ## atlas interger label is the integer at the vertex
        atlas_label = atlas_label_array[df.loc[pd_idx, 'vertex']]

        ## the atlas column holds the labelname for this label
        df.loc[pd_idx, atlas_prefix] = atlas_df.iloc[atlas_label, 0]

        ## overlap indices are the intersection of the cluster and the atlas integer masks
        clust_mask = np.where(clust_label_array == df.loc[pd_idx, 'clusterID'])[0]
        atlas_mask = np.where(atlas_label_array == atlas_label)[0]
        overlap_mask = np.intersect1d(clust_mask,atlas_mask)

        ## overlap area is the area of the overlaping region over the total cluster area
        clust_area = df.loc[pd_idx, 'area']
        overlap_area = sum(surf_va[overlap_mask])
        df.loc[pd_idx, overlap_col] = overlap_area/clust_area

    return(df) 
Example 20
Project: simnibs   Author: simnibs   File: mesh_io.py    License: GNU General Public License v3.0 5 votes vote down vote up
def find_shared_nodes(self, tags):
        ''' Finds the nodes which are shared by all given tags

        Parameters
        -----------
        tags: list of integers
            Tags where to search

        Returns
        ---------
        shared_nodes: list of integers
            List of nodes which are shared by all tags
        '''
        if len(tags) < 2:
            raise ValueError('Tags should have at least 2 elements')
        shared_nodes = None
        for t in tags:
            nt = np.unique(self.elm[self.elm.tag1 == t])
            # Remove the -1 that marks triangles
            if nt[0] == -1:
                nt = nt[1:]
            # First iteration
            if shared_nodes is None:
                shared_nodes = nt
            # Other iterations
            else:
                shared_nodes = np.intersect1d(shared_nodes, nt)
        return shared_nodes 
Example 21
Project: python-control   Author: python-control   File: statesp.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _remove_useless_states(self):
        """Check for states that don't do anything, and remove them.

        Scan the A, B, and C matrices for rows or columns of zeros.  If the
        zeros are such that a particular state has no effect on the input-output
        dynamics, then remove that state from the A, B, and C matrices.

        """

        # Search for useless states and get indices of these states.
        #
        # Note: shape from np.where depends on whether we are storing state
        # space objects as np.matrix or np.array.  Code below will work
        # correctly in either case.
        ax1_A = np.where(~self.A.any(axis=1))[0]
        ax1_B = np.where(~self.B.any(axis=1))[0]
        ax0_A = np.where(~self.A.any(axis=0))[-1]
        ax0_C = np.where(~self.C.any(axis=0))[-1]
        useless_1 = np.intersect1d(ax1_A, ax1_B, assume_unique=True)
        useless_2 = np.intersect1d(ax0_A, ax0_C, assume_unique=True)
        useless = np.union1d(useless_1, useless_2)

        # Remove the useless states.
        self.A = delete(self.A, useless, 0)
        self.A = delete(self.A, useless, 1)
        self.B = delete(self.B, useless, 0)
        self.C = delete(self.C, useless, 1)

        self.states = self.A.shape[0]
        self.inputs = self.B.shape[1]
        self.outputs = self.C.shape[0] 
Example 22
Project: recruit   Author: Frank-qlu   File: test_numeric.py    License: Apache License 2.0 5 votes vote down vote up
def test_intersection(self):
        other = Index([1, 2, 3, 4, 5])
        result = self.index.intersection(other)
        expected = Index(np.sort(np.intersect1d(self.index.values,
                                                other.values)))
        tm.assert_index_equal(result, expected)

        result = other.intersection(self.index)
        expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values,
                                                           other.values))))
        tm.assert_index_equal(result, expected) 
Example 23
Project: recruit   Author: Frank-qlu   File: test_numeric.py    License: Apache License 2.0 5 votes vote down vote up
def test_intersection(self):
        other = Index([2**63, 2**63 + 5, 2**63 + 10, 2**63 + 15, 2**63 + 20])
        result = self.index.intersection(other)
        expected = Index(np.sort(np.intersect1d(self.index.values,
                                                other.values)))
        tm.assert_index_equal(result, expected)

        result = other.intersection(self.index)
        expected = Index(np.sort(np.asarray(np.intersect1d(self.index.values,
                                                           other.values))))
        tm.assert_index_equal(result, expected) 
Example 24
Project: ibllib   Author: int-brain-lab   File: atlas.py    License: MIT License 5 votes vote down vote up
def get(self, ids) -> Bunch:
        """
        Get a bunch of the name/id
        """
        uid, uind = np.unique(ids, return_inverse=True)
        a, iself, _ = np.intersect1d(self.id, uid, assume_unique=False, return_indices=True)
        return Bunch(id=self.id[iself[uind]], name=self.name[iself[uind]],
                     acronym=self.acronym[iself[uind]]) 
Example 25
Project: news-popularity-prediction   Author: MKLab-ITI   File: ranking.py    License: Apache License 2.0 5 votes vote down vote up
def jaccard_index(x, y):
    nom = np.intersect1d(x, y).size
    denom = np.union1d(x, y).size

    return nom/denom 
Example 26
Project: astroNN   Author: henrysky   File: h5.py    License: MIT License 5 votes vote down vote up
def filter_apogeeid_list(self, hdulist):
        vscatter = hdulist[1].data['VSCATTER']
        SNR = hdulist[1].data['SNR']
        location_id = hdulist[1].data['LOCATION_ID']
        teff = hdulist[1].data['PARAM'][:, 0]
        Fe = hdulist[1].data['X_H'][:, 17]

        total = range(len(SNR))

        if self.starflagcut is True:
            starflag = hdulist[1].data['STARFLAG']
            fitlered_starflag = np.where(starflag == 0)[0]
        else:
            fitlered_starflag = total

        if self.aspcapflagcut is True:
            aspcapflag = hdulist[1].data['ASPCAPFLAG']
            fitlered_aspcapflag = np.where(aspcapflag == 0)[0]
        else:
            fitlered_aspcapflag = total

        fitlered_temp_lower = np.where((self.teff_low <= teff))[0]
        fitlered_temp_upper = np.where((self.teff_high >= teff))[0]
        fitlered_vscatter = np.where(vscatter < self.vscattercut)[0]
        fitlered_Fe = np.where(Fe > self.ironlow)[0]
        fitlered_snrlow = np.where(SNR > self.SNR_low)[0]
        fitlered_snrhigh = np.where(SNR < self.SNR_high)[0]
        fitlered_location = np.where(location_id > 1)[0]

        filtered_index = reduce(np.intersect1d,
                                (fitlered_starflag, fitlered_aspcapflag, fitlered_temp_lower, fitlered_vscatter,
                                 fitlered_Fe, fitlered_snrlow, fitlered_snrhigh, fitlered_location,
                                 fitlered_temp_upper))

        print('Total Combined Spectra after filtering: ', filtered_index.shape[0])
        if self.continuum:
            print('Total Individual Visit Spectra there: ', np.sum(hdulist[1].data['NVISITS'][filtered_index]))

        return filtered_index 
Example 27
Project: astroNN   Author: henrysky   File: h5.py    License: MIT License 5 votes vote down vote up
def load_allowed_index(self):
        with h5py.File(self.h5path) as F:  # ensure the file will be cleaned up
            if self.exclude9999 is True:
                index_not9999 = None
                for counter, tg in enumerate(self.target):
                    if index_not9999 is None:
                        index_not9999 = np.arange(F[f'{tg}'].shape[0])
                    temp_index = np.where(np.array(F[f'{tg}']) != -9999)[0]
                    index_not9999 = reduce(np.intersect1d, (index_not9999, temp_index))

                in_flag = index_not9999
                if self.load_combined is True:
                    in_flag = np.where(np.array(F['in_flag']) == 0)[0]
                elif self.load_combined is False:
                    in_flag = np.where(np.array(F['in_flag']) == 1)[0]

                allowed_index = reduce(np.intersect1d, (index_not9999, in_flag))

            else:
                in_flag = []
                if self.load_combined is True:
                    in_flag = np.where(np.array(F['in_flag']) == 0)[0]
                elif self.load_combined is False:
                    in_flag = np.where(np.array(F['in_flag']) == 1)[0]

                allowed_index = in_flag

            F.close()

        return allowed_index 
Example 28
Project: paramz   Author: sods   File: index_operations.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def remove(self, prop, indices):
        if prop in self._properties:
            diff = remove_indices(self[prop], indices)
            removed = numpy.intersect1d(self[prop], indices, True)
            if not index_empty(diff):
                self._properties[prop] = diff
            else:
                del self._properties[prop]
            return removed.astype(int)
        return numpy.array([]).astype(int) 
Example 29
Project: dash-recipes   Author: plotly   File: dash-scattergl-select.py    License: MIT License 5 votes vote down vote up
def highlight(x, y):
    def callback(*selectedDatas):
        index = df.index
        for i, hover_data in enumerate(selectedDatas):
            selected_index = [
                p['customdata'] for p in selectedDatas[i]['points']
                # the first trace that includes all the data
                if p['curveNumber'] == 0
            ]
            if len(selected_index) > 0:
                index = np.intersect1d(index, selected_index)

        dff = df.iloc[index, :]


        figure = {
            'data': [
                dict({
                    'x': df[x], 'y': df[y], 'text': df.index,
                    'customdata': df.index,
                    'mode':'markers',
                    'type': 'scattergl', 'opacity': 0.1
                }),
                dict({
                    'x': dff[x], 'y': dff[y], 'text': dff.index,
                    'mode':'markers',
                    'type': 'scattergl', 'textposition': 'top',
                }),
            ],
            'layout': {
                'margin': {'l': 20, 'r': 0, 'b': 20, 't': 5},
                'dragmode': 'select',
                'hovermode': 'closest',
                'showlegend': False
            }
        }


        return figure

    return callback 
Example 30
Project: TKP   Author: guxinqian   File: eval_metrics.py    License: Apache License 2.0 5 votes vote down vote up
def evaluate(distmat, q_pids, g_pids, q_camids, g_camids):
    num_q, num_g = distmat.shape
    index = np.argsort(distmat, axis=1) # from small to large

    num_no_gt = 0 # num of query imgs without groundtruth
    num_r1 = 0
    CMC = np.zeros(len(g_pids))
    AP = 0

    for i in range(num_q):
        # groundtruth index
        query_index = np.argwhere(g_pids==q_pids[i])
        camera_index = np.argwhere(g_camids==q_camids[i])
        good_index = np.setdiff1d(query_index, camera_index, assume_unique=True)
        if good_index.size == 0:
            num_no_gt += 1
            continue
        # remove gallery samples that have the same pid and camid with query
        junk_index = np.intersect1d(query_index, camera_index)

        ap_tmp, CMC_tmp = compute_ap_cmc(index[i], good_index, junk_index)
        if CMC_tmp[0]==1:
            num_r1 += 1
        CMC = CMC + CMC_tmp
        AP += ap_tmp

    if num_no_gt > 0:
        print("{} query imgs do not have groundtruth.".format(num_no_gt))

    # print("R1:{}".format(num_r1))

    CMC = CMC / (num_q - num_no_gt)
    mAP = AP / (num_q - num_no_gt)

    return CMC, mAP