Python numpy.where() Examples

The following are code examples for showing how to use numpy.where(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don't like. You can also save this page to your account.

Example 1
Project: segmentation_DLMI   Author: imatge-upc   File: data_loader_test.py    (license) View Source Project 7 votes vote down vote up
def load_ROI_mask(self):

        proxy = nib.load(self.FLAIR_FILE)
        image_array = np.asarray(proxy.dataobj)

        mask = np.ones_like(image_array)
        mask[np.where(image_array < 90)] = 0

        # img = nib.Nifti1Image(mask, proxy.affine)
        # nib.save(img, join(modalities_path,'mask.nii.gz'))

        struct_element_size = (20, 20, 20)
        mask_augmented = np.pad(mask, [(21, 21), (21, 21), (21, 21)], 'constant', constant_values=(0, 0))
        mask_augmented = binary_closing(mask_augmented, structure=np.ones(struct_element_size, dtype=bool)).astype(
            np.int)

        return mask_augmented[21:-21, 21:-21, 21:-21].astype('bool') 
Example 2
Project: Homology_BG   Author: jyotikab   File: checkPDFeaturesStrRed.py    (license) View Source Project 6 votes vote down vote up
def spec_entropy(Rates,time_range=[],bin_w = 5.,freq_range = []):
	'''Function to calculate the spectral entropy'''

        power,freq,dfreq,dummy,dummy = mypsd(Rates,time_range,bin_w = bin_w)
        if freq_range != []:
                power = power[(freq>=freq_range[0]) & (freq <= freq_range[1])]
                freq = freq[(freq>=freq_range[0]) & (freq <= freq_range[1])]
		maxFreq = freq[np.where(power==np.max(power))]*1000*100
		perMax = (np.max(power)/np.sum(power))*100
        k = len(freq)
        power = power/sum(power)
        sum_power = 0
        for ii in range(k):
                sum_power += (power[ii]*np.log(power[ii]))
        spec_ent = -(sum_power/np.log(k))
        return spec_ent,dfreq,maxFreq,perMax 
Example 3
Project: cellranger   Author: 10XGenomics   File: multigenome.py    (license) View Source Project 6 votes vote down vote up
def _classify_gems(counts0, counts1):
        """ Infer number of distinct transcriptomes present in each GEM (1 or 2) and
            report cr_constants.GEM_CLASS_GENOME0 for a single cell w/ transcriptome 0,
            report cr_constants.GEM_CLASS_GENOME1 for a single cell w/ transcriptome 1,
            report cr_constants.GEM_CLASS_MULTIPLET for multiple transcriptomes """
        # Assumes that most of the GEMs are single-cell; model counts independently
        thresh0, thresh1 = [cr_constants.DEFAULT_MULTIPLET_THRESHOLD] * 2
        if sum(counts0 > counts1) >= 1 and sum(counts1 > counts0) >= 1:
            thresh0 = np.percentile(counts0[counts0 > counts1], cr_constants.MULTIPLET_PROB_THRESHOLD)
            thresh1 = np.percentile(counts1[counts1 > counts0], cr_constants.MULTIPLET_PROB_THRESHOLD)

        doublet = np.logical_and(counts0 >= thresh0, counts1 >= thresh1)
        dtype = np.dtype('|S%d' % max(len(cls) for cls in cr_constants.GEM_CLASSES))
        result = np.where(doublet, cr_constants.GEM_CLASS_MULTIPLET, cr_constants.GEM_CLASS_GENOME0).astype(dtype)
        result[np.logical_and(np.logical_not(result == cr_constants.GEM_CLASS_MULTIPLET), counts1 > counts0)] = cr_constants.GEM_CLASS_GENOME1

        return result 
Example 4
Project: genomedisco   Author: kundajelab   File: plot_quasar_transform.py    (MIT License) View Source Project 6 votes vote down vote up
def load_data(infile, chroms, resolutions):
    starts = infile['starts'][...]
    chromosomes = infile['chromosomes'][...]
    data = {}
    for res in resolutions:
        data[res] = {}
        for i, chrom in enumerate(chromosomes):
            if chrom not in chroms:
                continue
            start = (starts[i] / res) * res
            dist = infile['dist.%s.%i' % (chrom, res)][...]
            valid_rows = infile['valid.%s.%i' % (chrom, res)][...]
            corr = infile['corr.%s.%i' % (chrom, res)][...]
            valid = numpy.zeros(corr.shape, dtype=numpy.bool)
            N, M = corr.shape
            valid = numpy.zeros((N, M), dtype=numpy.int32)
            for i in range(min(N - 1, M)):
                P = N - i - 1
                valid[:P, i] = valid_rows[(i + 1):] * valid_rows[:P]
            temp = corr * dist
            valid[numpy.where(numpy.abs(temp) == numpy.inf)] = False
            data[res][chrom] = [start, temp, valid]
    return data 
Example 5
Project: genomedisco   Author: kundajelab   File: simulations_from_real_data.py    (MIT License) View Source Project 6 votes vote down vote up
def shift_dataset(m,boundarynoise):
    if boundarynoise==0:
        return m
    nonzero_rows=np.where(m.any(axis=1))[0]
    small_m=copy.deepcopy(m)
    small_m=small_m[nonzero_rows,:]
    small_m=small_m[:,nonzero_rows]
    print small_m
    print 'roll'
    small_m=np.roll(small_m,boundarynoise,axis=0)
    print small_m
    print 'roll2'
    small_m=np.roll(small_m,boundarynoise,axis=1)
    print small_m
    outm=np.zeros(m.shape)
    for i_idx in range(len(nonzero_rows)):
        i=nonzero_rows[i_idx]
        for j_idx in range(i_idx,len(nonzero_rows)):
            j=nonzero_rows[j_idx]
            outm[i,j]=small_m[i_idx,j_idx]
            outm[j,i]=outm[i,j]
    return outm 
Example 6
Project: HandDetection   Author: YunqiuXu   File: proposal_target_layer.py    (license) View Source Project 6 votes vote down vote up
def _get_bbox_regression_labels(bbox_target_data, num_classes):
  """Bounding-box regression targets (bbox_target_data) are stored in a
  compact form N x (class, tx, ty, tw, th)

  This function expands those targets into the 4-of-4*K representation used
  by the network (i.e. only one class has non-zero targets).

  Returns:
      bbox_target (ndarray): N x 4K blob of regression targets
      bbox_inside_weights (ndarray): N x 4K blob of loss weights
  """

  clss = bbox_target_data[:, 0]
  bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
  bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
  inds = np.where(clss > 0)[0]
  for ind in inds:
    cls = clss[ind]
    start = int(4 * cls)
    end = start + 4
    bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
    bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
  return bbox_targets, bbox_inside_weights 
Example 7
Project: HandDetection   Author: YunqiuXu   File: train_val.py    (license) View Source Project 6 votes vote down vote up
def filter_roidb(roidb):
  """Remove roidb entries that have no usable RoIs."""

  def is_valid(entry):
    # Valid images have:
    #   (1) At least one foreground RoI OR
    #   (2) At least one background RoI
    overlaps = entry['max_overlaps']
    # find boxes with sufficient overlap
    fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
    # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
    bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
                       (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
    # image is only valid if such boxes exist
    valid = len(fg_inds) > 0 or len(bg_inds) > 0
    return valid

  num = len(roidb)
  filtered_roidb = [entry for entry in roidb if is_valid(entry)]
  num_after = len(filtered_roidb)
  print('Filtered {} roidb entries: {} -> {}'.format(num - num_after,
                                                     num, num_after))
  return filtered_roidb 
Example 8
Project: pybot   Author: spillai   File: recognition_utils.py    (license) View Source Project 6 votes vote down vote up
def recall_from_IoU(IoU, samples=500): 
    """
    plot recall_vs_IoU_threshold
    """

    if not (isinstance(IoU, list) or IoU.ndim == 1):
        raise ValueError('IoU needs to be a list or 1-D')
    iou = np.float32(IoU)

    # Plot intersection over union
    IoU_thresholds = np.linspace(0.0, 1.0, samples)
    recall = np.zeros_like(IoU_thresholds)
    for idx, IoU_th in enumerate(IoU_thresholds):
        tp, relevant = 0, 0
        inds, = np.where(iou >= IoU_th)
        recall[idx] = len(inds) * 1.0 / len(IoU)

    return recall, IoU_thresholds 

# =====================================================================
# Generic utility functions for object recognition
# --------------------------------------------------------------------- 
Example 9
Project: pybot   Author: spillai   File: recognition_utils.py    (license) View Source Project 6 votes vote down vote up
def mine(self, im, gt_bboxes): 
        """
        Propose bounding boxes using proposer, and
        augment non-overlapping boxes with IoU < 0.1
        to the ground truth set.
        (up to a maximum of num_proposals)
        """
        bboxes = self.proposer_.process(im)

        if len(gt_bboxes): 
            # Determine bboxes that have low IoU with ground truth
            # iou = [N x GT]
            iou = brute_force_match(bboxes, gt_bboxes, 
                                    match_func=lambda x,y: intersection_over_union(x,y))
            # print('Detected {}, {}, {}'.format(iou.shape, len(gt_bboxes), len(bboxes))) # , np.max(iou, axis=1)
            overlap_inds, = np.where(np.max(iou, axis=1) < 0.1)
            bboxes = bboxes[overlap_inds]
            # print('Remaining non-overlapping {}'.format(len(bboxes)))

        bboxes = bboxes[:self.num_proposals_]
        targets = self.generate_targets(len(bboxes))
        return bboxes, targets 
Example 10
Project: sea-lion-counter   Author: rdinse   File: data_preparation.py    (license) View Source Project 6 votes vote down vote up
def compHistDistance(h1, h2):
  def normalize(h):    
    if np.sum(h) == 0: 
        return h
    else:
        return h / np.sum(h)

  def smoothstep(x, x_min=0., x_max=1., k=2.):
      m = 1. / (x_max - x_min)
      b = - m * x_min
      x = m * x + b
      return betainc(k, k, np.clip(x, 0., 1.))

  def fn(X, Y, k):
    return 4. * (1. - smoothstep(Y, 0, (1 - Y) * X + Y + .1)) \
      * np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) \
             + 2. * smoothstep(Y, 0, (1 - Y) * X + Y + .1) \
             * (1. - 2. * np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) - 0.5)

  h1 = normalize(h1)
  h2 = normalize(h2)

  return max(0, np.sum(fn(h2, h1, len(h1))))
  # return np.sum(np.where(h2 != 0, h2 * np.log10(h2 / (h1 + 1e-10)), 0))  # KL divergence 
Example 11
Project: tissue_analysis   Author: VirtualPlants   File: spatial_image_analysis.py    (license) View Source Project 6 votes vote down vote up
def cells_walls_coords(self):
        """Return coordinates of the voxels defining a cell wall.

        This function thus returns any voxel in contact with one of different label.

        Args:
          image (SpatialImage) - Segmented image (tissu)

        Returns:
          x,y,z (list) - coordinates of the voxels defining the cell boundaries (walls).
        """
        if self.is3D():
            image = hollow_out_cells(self.image, self.background, verbose=True)
        else:
            image = copy.copy(self.image)
            image[np.where(image==self.background)] = 0

        if self.is3D():
            x,y,z = np.where(image!=0)
            return list(x), list(y), list(z)
        else:
            x,y = np.where(image!=0)
            return list(x), list(y) 
Example 12
Project: tissue_analysis   Author: VirtualPlants   File: spatial_image_analysis.py    (license) View Source Project 6 votes vote down vote up
def fuse_labels_in_image(self, labels, verbose = True):
        """ Modify the image so the given labels are fused (to the min value)."""
        assert isinstance(labels, list) and len(labels) >= 2
        assert self.background() not in labels

        min_lab = min(labels)
        labels.remove(min_lab)
        N=len(labels); percent = 0
        if verbose: print "Fusing the following {} labels: {} to value '{}'.".format(N, labels, min_lab)
        for n, label in enumerate(labels):
            if verbose and n*100/float(N) >= percent: print "{}%...".format(percent),; percent += 5
            if verbose and n+1==N: print "100%"
            try:
                bbox = self.boundingbox(label)
                xyz = np.where( (self.image[bbox]) == label )
                self.image[tuple((xyz[0]+bbox[0].start, xyz[1]+bbox[1].start, xyz[2]+bbox[2].start))]=min_lab
            except:
                print "No boundingbox found for cell id #{}, skipping...".format(label)
                continue
        print "Done!"
        return None 
Example 13
Project: NeoAnalysis   Author: neoanalysis   File: spikedetection.py    (license) View Source Project 6 votes vote down vote up
def __detect_spike_peak(self,ang_data,Thr,peak_before,peak_after):
        if Thr < 0:
            dd_0 = np.where(ang_data<Thr)[0]
        elif Thr >=0:
            dd_0 = np.where(ang_data>=Thr)[0]
        dd_1 = np.diff(dd_0,n=1)
        dd_2 = np.where(dd_1 > 1)[0]+1
        dd_3 = np.split(dd_0,dd_2)
        spike_peak = []
        if Thr < 0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmin()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        elif Thr >=0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmax()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        return np.array(spike_peak) 
Example 14
Project: NeoAnalysis   Author: neoanalysis   File: test_nestio.py    (license) View Source Project 6 votes vote down vote up
def test_values(self):
        """
        Tests if the function returns the correct values.
        """

        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-2gex-3Vm-1261-0.dat',
                directory=self.local_test_dir, clean=False)

        id_to_test = 1
        r = NestIO(filenames=filename)
        seg = r.read_segment(gid_list=[id_to_test],
                             t_stop=1000. * pq.ms,
                             sampling_period=pq.ms, lazy=False,
                             id_column_dat=0, time_column_dat=1,
                             value_columns_dat=2, value_types='V_m')

        dat = np.loadtxt(filename)
        target_data = dat[:, 2][np.where(dat[:, 0] == id_to_test)]
        target_data = target_data[:, None]
        st = seg.analogsignals[0]
        np.testing.assert_array_equal(st.magnitude, target_data) 
Example 15
Project: NeoAnalysis   Author: neoanalysis   File: test_nestio.py    (license) View Source Project 6 votes vote down vote up
def test_values(self):
        """
        Tests if the routine loads the correct numbers from the file.
        """
        id_to_test = 1
        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-1256-0.gdf',
                directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        seg = r.read_segment(gid_list=[id_to_test],
                             t_start=400. * pq.ms,
                             t_stop=500. * pq.ms, lazy=False,
                             id_column_gdf=0, time_column_gdf=1)

        dat = np.loadtxt(filename)
        target_data = dat[:, 1][np.where(dat[:, 0] == id_to_test)]

        st = seg.spiketrains[0]
        np.testing.assert_array_equal(st.magnitude, target_data) 
Example 16
Project: NeoAnalysis   Author: neoanalysis   File: test_nestio.py    (license) View Source Project 6 votes vote down vote up
def test_correct_condition_selection(self):
        """
        Test if combination of condition function and condition_column works
        properly.
        """
        condition_column = 0
        condition_function = lambda x: x > 10
        result = self.testIO.get_columns(condition=condition_function,
                                         condition_column=0)
        selected_ids = np.where(condition_function(self.testIO.data[:,
                                                   condition_column]))[0]
        expected = self.testIO.data[selected_ids, :]

        np.testing.assert_array_equal(result, expected)

        assert all(condition_function(result[:, condition_column])) 
Example 17
Project: NeoAnalysis   Author: neoanalysis   File: elphyio.py    (license) View Source Project 6 votes vote down vote up
def get_event(self, ep, ch, marked_ks):
        """
        Return a :class:`ElphyEvent` which is a
        descriptor of the specified event channel.
        """
        assert ep in range(1, self.n_episodes + 1)
        assert ch in range(1, self.n_channels + 1)
        
        # find the event channel number
        evt_channel = np.where(marked_ks == -1)[0][0]
        assert evt_channel in range(1, self.n_events(ep) + 1)
        
        block = self.episode_block(ep)
        ep_blocks = self.get_blocks_stored_in_episode(ep)
        evt_blocks = [k for k in ep_blocks if k.identifier == 'REVT']
        n_events = np.sum([k.n_events[evt_channel - 1] for k in evt_blocks], dtype=int)
        x_unit = block.ep_block.x_unit
        
        return ElphyEvent(self, ep, evt_channel, x_unit, n_events, ch_number=ch) 
Example 18
Project: NeoAnalysis   Author: neoanalysis   File: spikedetection.py    (license) View Source Project 6 votes vote down vote up
def __detect_spike_peak(self,ang_data,Thr,peak_before,peak_after):
        if Thr < 0:
            dd_0 = np.where(ang_data<Thr)[0]
        elif Thr >=0:
            dd_0 = np.where(ang_data>=Thr)[0]
        dd_1 = np.diff(dd_0,n=1)
        dd_2 = np.where(dd_1 > 1)[0]+1
        dd_3 = np.split(dd_0,dd_2)
        spike_peak = []
        if Thr < 0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmin()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        elif Thr >=0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmax()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        return np.array(spike_peak) 
Example 19
Project: NeoAnalysis   Author: neoanalysis   File: test_nestio.py    (license) View Source Project 6 votes vote down vote up
def test_values(self):
        """
        Tests if the function returns the correct values.
        """

        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-2gex-3Vm-1261-0.dat',
                directory=self.local_test_dir, clean=False)

        id_to_test = 1
        r = NestIO(filenames=filename)
        seg = r.read_segment(gid_list=[id_to_test],
                             t_stop=1000. * pq.ms,
                             sampling_period=pq.ms, lazy=False,
                             id_column_dat=0, time_column_dat=1,
                             value_columns_dat=2, value_types='V_m')

        dat = np.loadtxt(filename)
        target_data = dat[:, 2][np.where(dat[:, 0] == id_to_test)]
        target_data = target_data[:, None]
        st = seg.analogsignals[0]
        np.testing.assert_array_equal(st.magnitude, target_data) 
Example 20
Project: NeoAnalysis   Author: neoanalysis   File: test_nestio.py    (license) View Source Project 6 votes vote down vote up
def test_values(self):
        """
        Tests if the routine loads the correct numbers from the file.
        """
        id_to_test = 1
        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-1256-0.gdf',
                directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        seg = r.read_segment(gid_list=[id_to_test],
                             t_start=400. * pq.ms,
                             t_stop=500. * pq.ms, lazy=False,
                             id_column_gdf=0, time_column_gdf=1)

        dat = np.loadtxt(filename)
        target_data = dat[:, 1][np.where(dat[:, 0] == id_to_test)]

        st = seg.spiketrains[0]
        np.testing.assert_array_equal(st.magnitude, target_data) 
Example 21
Project: NeoAnalysis   Author: neoanalysis   File: test_nestio.py    (license) View Source Project 6 votes vote down vote up
def test_correct_condition_selection(self):
        """
        Test if combination of condition function and condition_column works
        properly.
        """
        condition_column = 0
        condition_function = lambda x: x > 10
        result = self.testIO.get_columns(condition=condition_function,
                                         condition_column=0)
        selected_ids = np.where(condition_function(self.testIO.data[:,
                                                   condition_column]))[0]
        expected = self.testIO.data[selected_ids, :]

        np.testing.assert_array_equal(result, expected)

        assert all(condition_function(result[:, condition_column])) 
Example 22
Project: NeoAnalysis   Author: neoanalysis   File: elphyio.py    (license) View Source Project 6 votes vote down vote up
def get_event(self, ep, ch, marked_ks):
        """
        Return a :class:`ElphyEvent` which is a
        descriptor of the specified event channel.
        """
        assert ep in range(1, self.n_episodes + 1)
        assert ch in range(1, self.n_channels + 1)
        
        # find the event channel number
        evt_channel = np.where(marked_ks == -1)[0][0]
        assert evt_channel in range(1, self.n_events(ep) + 1)
        
        block = self.episode_block(ep)
        ep_blocks = self.get_blocks_stored_in_episode(ep)
        evt_blocks = [k for k in ep_blocks if k.identifier == 'REVT']
        n_events = np.sum([k.n_events[evt_channel - 1] for k in evt_blocks], dtype=int)
        x_unit = block.ep_block.x_unit
        
        return ElphyEvent(self, ep, evt_channel, x_unit, n_events, ch_number=ch) 
Example 23
Project: Tensormodels   Author: asheshjain399   File: image_color_augment.py    (MIT License) View Source Project 6 votes vote down vote up
def random_hue(img, label, max_delta=10):
    """
    Rotates the hue channel
    Args:
        img: input image in float32
        max_delta: Max number of degrees to rotate the hue channel
    """
    # Rotates the hue channel by delta degrees
    delta = -max_delta + 2.0 * max_delta * rand.rand()
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    hchannel = hsv[:, :, 0]
    hchannel = delta + hchannel

    # hue should always be within [0,360]
    idx = np.where(hchannel > 360)
    hchannel[idx] = hchannel[idx] - 360
    idx = np.where(hchannel < 0)
    hchannel[idx] = hchannel[idx] + 360

    hsv[:, :, 0] = hchannel
    return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), label 
Example 24
Project: AutoSleepScorerDev   Author: skjerns   File: keras_utils.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def reset(self):
        """ Resets the state of the generator"""
        self.step = 0
        Y = np.argmax(self.Y,1)
        labels = np.unique(Y)
        idx = []
        smallest = len(Y)
        for i,label in enumerate(labels):
            where = np.where(Y==label)[0]
            if smallest > len(where): 
                self.slabel = i
                smallest = len(where)
            idx.append(where)
        self.idx = idx
        self.labels = labels
        self.n_per_class = int(self.batch_size // len(labels))
        self.n_batches = int(np.ceil((smallest//self.n_per_class)))+1
        self.update_probabilities() 
Example 25
Project: hSBM_Topicmodel   Author: martingerlach   File: hsbm_tm.py    (license) View Source Project 6 votes vote down vote up
def __init__(self,args):
        '''
        Initialize hsbm-instance
        - create a folder where to save results: self.args.output
        - make a bipartite word-doc graph from the corpus. save as self.graph
        - do the hsbm inference. save the state as self.inference
        '''
        self.args = args
        self.out_path = self.args.output

        if not os.path.exists(self.out_path):
            os.makedirs(self.out_path)

        ## get the graph-object
        self.graph = self.make_graph()
        ## do the hsbm-inference
        self.state = self.inference(self.graph) 
Example 26
Project: corporadb   Author: nlesc-sherlock   File: fill_db.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def check_dataset(self):
    '''
    check if dataset is already in database
    if found set self.dataset_id to the entry in the database
    return boolean
    '''
    self.cursor.execute('select id as rowid, name from dataset')
    citems = self.cursor.fetchall()
    names = [citem['name'] for citem in citems]  # get all names
    if names:
      try:
        idx = numpy.where(numpy.array(names)==self.datasetname)[0][0]
        self.dataset_id = citems[idx]['rowid']
        return True
      except IndexError:
        return False
    else:
      return False 
Example 27
Project: lang-reps   Author: chaitanyamalaviya   File: lang2vec.py    (license) View Source Project 5 votes vote down vote up
def get_language_index(lang_code, feature_database):
    return np.where(feature_database["langs"] == lang_code)[0][0] 
Example 28
Project: lang-reps   Author: chaitanyamalaviya   File: lang2vec.py    (license) View Source Project 5 votes vote down vote up
def get_source_index(source_name, feature_database):
    return np.where(feature_database["sources"] == source_name)[0] 
Example 29
Project: lang-reps   Author: chaitanyamalaviya   File: lang2vec.py    (license) View Source Project 5 votes vote down vote up
def get_feature_index(feature_name, feature_database):
    return np.where(feature_database["feats"] == feature_name)[0][0] 
Example 30
Project: composability_bench   Author: IntelPython   File: collab_filt.py    (MIT License) View Source Project 5 votes vote down vote up
def run_numpy():
    x = topk.dot(users)
    x = np.where(users>0, 0, x)
    return x.argmax(axis=0) 
Example 31
Project: composability_bench   Author: IntelPython   File: collab_filt.py    (MIT License) View Source Project 5 votes vote down vote up
def run_dask():
    x = t.dot(u)
    x = da.where(u>0, 0, x)
    r = x.argmax(axis=0)
    return r.compute() 
Example 32
Project: uwb_tracker_ros   Author: eth-ait   File: uwb_tracker_node.py    (license) View Source Project 5 votes vote down vote up
def _solve_equation_least_squares(self, A, B):
        """Solve system of linear equations A X = B.
        Currently using Pseudo-inverse because it also allows for singular matrices.

        Args:
             A (numpy.ndarray): Left-hand side of equation.
             B (numpy.ndarray): Right-hand side of equation.

        Returns:
             X (numpy.ndarray): Solution of equation.
        """
        # Pseudo-inverse
        X = np.dot(np.linalg.pinv(A), B)
        # LU decomposition
        # lu, piv = scipy.linalg.lu_factor(A)
        # X = scipy.linalg.lu_solve((lu, piv), B)
        # Vanilla least-squares from numpy
        # X, _, _, _ = np.linalg.lstsq(A, B)
        # QR decomposition
        # Q, R, P = scipy.linalg.qr(A, mode='economic', pivoting=True)
        # # Find first zero element in R
        # out = np.where(np.diag(R) == 0)[0]
        # if out.size == 0:
        #     i = R.shape[0]
        # else:
        #     i = out[0]
        # B_prime = np.dot(Q.T, B)
        # X = np.zeros((A.shape[1], B.shape[1]), dtype=A.dtype)
        # X[P[:i], :] = scipy.linalg.solve_triangular(R[:i, :i], B_prime[:i, :])
        return X 
Example 33
Project: pyfds   Author: emtpb   File: fields.py    (BSD 3-Clause "New" or "Revised" License) View Source Project 5 votes vote down vote up
def get_index(self, value):
        """Returns the index of a given value.

        Args:
            value: Value the index requested for.

        Returns:
            Index.
        """

        index, = np.where(np.abs(self.vector - value) <= self.snap_radius)
        assert len(index) < 2, "Multiple points found within snap radius of given value."
        assert len(index) > 0, "No point found within snap radius of given value."

        return int(index) 
Example 34
Project: YellowFin_Pytorch   Author: JianGoForIt   File: nn1_stress_test.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def gen_minibatch(tokens, features, labels, mini_batch_size, shuffle= True):
    tokens = np.asarray(tokens)[np.where(labels!=0.5)[0]]
    if type(features) is np.ndarray:
      features = np.asarray(features)[np.where(labels!=0.5)[0]]
    else:
      features = np.asarray(features.todense())[np.where(labels!=0.5)[0]]
    labels = np.asarray(labels)[np.where(labels!=0.5)[0]]
#     print tokens.shape
#     print tokens[0]
    for token, feature, label in iterate_minibatches(tokens, features, labels, mini_batch_size, shuffle = shuffle):
#         print 'token', type(token)
#         print token
        token = [_ for _ in pad_batch(token)]
#         print len(token), token[0].size(), token[1].size()
        yield token, Variable(torch.from_numpy(feature)) , Variable(torch.FloatTensor(label), requires_grad= False) 
Example 35
Project: YellowFin_Pytorch   Author: JianGoForIt   File: nn1.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def gen_minibatch(tokens, features, labels, mini_batch_size, shuffle= True):
    tokens = np.asarray(tokens)[np.where(labels!=0.5)[0]]
    features = np.asarray(features.todense())[np.where(labels!=0.5)[0]]
    labels = np.asarray(labels)[np.where(labels!=0.5)[0]]
#     print tokens.shape
#     print tokens[0]
    for token, feature, label in iterate_minibatches(tokens, features, labels, mini_batch_size, shuffle = shuffle):
#         print 'token', type(token)
#         print token
        token = [_ for _ in pad_batch(token)]
#         print len(token), token[0].size(), token[1].size()
        yield token, Variable(torch.from_numpy(feature)) , Variable(torch.FloatTensor(label), requires_grad= False) 
Example 36
Project: spyking-circus   Author: spyking-circus   File: plot.py    (license) View Source Project 5 votes vote down vote up
def view_waveforms_clusters(data, halo, threshold, templates, amps_lim, n_curves=200, save=False):
    
    nb_templates = templates.shape[1]
    n_panels     = numpy.ceil(numpy.sqrt(nb_templates))
    mask         = numpy.where(halo > -1)[0]
    clust_idx    = numpy.unique(halo[mask])
    fig          = pylab.figure()    
    square       = True
    center       = len(data[0] - 1)//2
    for count, i in enumerate(xrange(nb_templates)):
        if square:
            pylab.subplot(n_panels, n_panels, count + 1)
            if (numpy.mod(count, n_panels) != 0):
                pylab.setp(pylab.gca(), yticks=[])
            if (count < n_panels*(n_panels - 1)):
                pylab.setp(pylab.gca(), xticks=[])
        
        subcurves = numpy.where(halo == clust_idx[count])[0]
        for k in numpy.random.permutation(subcurves)[:n_curves]:
            pylab.plot(data[k], '0.5')
        
        pylab.plot(templates[:, count], 'r')        
        pylab.plot(amps_lim[count][0]*templates[:, count], 'b', alpha=0.5)
        pylab.plot(amps_lim[count][1]*templates[:, count], 'b', alpha=0.5)
        
        xmin, xmax = pylab.xlim()
        pylab.plot([xmin, xmax], [-threshold, -threshold], 'k--')
        pylab.plot([xmin, xmax], [threshold, threshold], 'k--')
        #pylab.ylim(-1.5*threshold, 1.5*threshold)
        ymin, ymax = pylab.ylim()
        pylab.plot([center, center], [ymin, ymax], 'k--')
        pylab.title('Cluster %d' %i)

    if nb_templates > 0:
        pylab.tight_layout()
    if save:
        pylab.savefig(os.path.join(save[0], 'waveforms_%s' %save[1]))
        pylab.close()
    else:
        pylab.show()
    del fig 
Example 37
Project: spyking-circus   Author: spyking-circus   File: plot.py    (license) View Source Project 5 votes vote down vote up
def view_performance(file_name, triggers, lims=(150,150)):
    
    params          = CircusParser(file_name)
    N_e             = params.getint('data', 'N_e')
    N_total         = params.getint('data', 'N_total')
    sampling_rate   = params.getint('data', 'sampling_rate')
    do_temporal_whitening = params.getboolean('whitening', 'temporal')
    do_spatial_whitening  = params.getboolean('whitening', 'spatial')
    spike_thresh     = params.getfloat('detection', 'spike_thresh')
    file_out_suff    = params.get('data', 'file_out_suff')
    N_t              = params.getint('detection', 'N_t')
    nodes, edges     = get_nodes_and_edges(params)
    chunk_size       = N_t
    
    if do_spatial_whitening:
        spatial_whitening  = load_data(params, 'spatial_whitening')
    if do_temporal_whitening:
        temporal_whitening = load_data(params, 'temporal_whitening')

    thresholds       = load_data(params, 'thresholds')    
    
    try:
        result    = load_data(params, 'results')
    except Exception:
        result    = {'spiketimes' : {}, 'amplitudes' : {}}

    curve     = numpy.zeros((len(triggers), len(result['spiketimes'].keys()), lims[1]+lims[0]), dtype=numpy.int32)
    count     = 0
    
    for count, t_spike in enumerate(triggers):
        for key in result['spiketimes'].keys():
            elec  = int(key.split('_')[1])
            idx   = numpy.where((result['spiketimes'][key] > t_spike - lims[0]) & (result['spiketimes'][key] <  t_spike + lims[0]))
            curve[count, elec, t_spike - result['spiketimes'][key][idx]] += 1
    pylab.subplot(111)
    pylab.imshow(numpy.mean(curve, 0), aspect='auto') 
    return curve 
Example 38
Project: spyking-circus   Author: spyking-circus   File: algorithms.py    (license) View Source Project 5 votes vote down vote up
def slice_result(result, times):

    sub_results = []

    nb_temp = len(result['spiketimes'])
    for t in times:
        sub_result = {'spiketimes' : {}, 'amplitudes' : {}}
        for key in result['spiketimes'].keys():
            idx = numpy.where((result['spiketimes'][key] >= t[0]) & (result['spiketimes'][key] <= t[1]))[0]
            sub_result['spiketimes'][key] = result['spiketimes'][key][idx] - t[0]
            sub_result['amplitudes'][key] = result['amplitudes'][key][idx]
        sub_results += [sub_result]

    return sub_results 
Example 39
Project: pytorch-semseg   Author: meetshah1995   File: pascal_voc_loader.py    (MIT License) View Source Project 5 votes vote down vote up
def encode_segmap(self, mask):
        mask = mask.astype(int)
        label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
        for i, label in enumerate(self.get_pascal_labels()):
            label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = i
        label_mask = label_mask.astype(int)
        return label_mask 
Example 40
Project: squeezeDet-hand   Author: fyhtea   File: voc_eval.py    (BSD 2-Clause "Simplified" License) View Source Project 5 votes vote down vote up
def voc_ap(rec, prec, use_07_metric=False):
    """ ap = voc_ap(rec, prec, [use_07_metric])
    Compute VOC AP given precision and recall.
    If use_07_metric is true, uses the
    VOC 07 11 point method (default:False).
    """
    if use_07_metric:
        # 11 point metric
        ap = 0.
        for t in np.arange(0., 1.1, 0.1):
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = np.max(prec[rec >= t])
            ap = ap + p / 11.
    else:
        # correct AP calculation
        # first append sentinel values at the end
        mrec = np.concatenate(([0.], rec, [1.]))
        mpre = np.concatenate(([0.], prec, [0.]))

        # compute the precision envelope
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # to calculate area under PR curve, look for points
        # where X axis (recall) changes value
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # and sum (\Delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
    return ap 
Example 41
Project: sgcrfpy   Author: dswah   File: sgcrf.py    (MIT License) View Source Project 5 votes vote down vote up
def chol_inv(B, lower=True):
    """
    Returns the inverse of matrix A, where A = B*B.T,
    ie B is the Cholesky decomposition of A.

    Solves Ax = I
    given B is the cholesky factorization of A.
    """
    return cho_solve((B, lower), np.eye(B.shape[0])) 
Example 42
Project: sgcrfpy   Author: dswah   File: sgcrf.py    (MIT License) View Source Project 5 votes vote down vote up
def active_set_Lam(self, fixed, vary):
        grad = self.grad_wrt_Lam(fixed, vary)
        assert np.allclose(grad, grad.T, 1e-3)
        return np.where((np.abs(np.triu(grad)) > self.lamL) | (self.Lam != 0))
        # return np.where((np.abs(grad) > self.lamL) | (~np.isclose(self.Lam, 0))) 
Example 43
Project: sgcrfpy   Author: dswah   File: sgcrf.py    (MIT License) View Source Project 5 votes vote down vote up
def active_set_Theta(self, fixed, vary):
        grad = self.grad_wrt_Theta(fixed, vary)
        return np.where((np.abs(grad) > self.lamT) | (self.Theta != 0))
        # return np.where((np.abs(grad) > self.lamT) | (~np.isclose(self.Theta, 0))) 
Example 44
Project: human-rl   Author: gsastry   File: pong_catastrophe.py    (MIT License) View Source Project 5 votes vote down vote up
def paddle_top(observation, paddle="right"):
    column = observation[:, PADDLE_COLUMN[paddle], :] - PADDLE_COLOR[paddle]
    found = (np.sum(np.abs(column), axis=1) < TOLERANCE).astype(np.int)
    r = np.argmax(found)
    if not found[r]:
        return None
    else:
        return r


# def ball_center(observation):
#     w = np.where(np.abs(observation[:,6:36] - 0.30457518) > TOLERANCE)[:2]
#     if len(w[0]) == 0 or len(w[0]) > 4:
#         return None
#     w = np.mean(w, axis=1)
#     return w[0], w[1] + 6
#
# def ball_on_left(observation):
#     w = np.where(np.abs(observation[:,6:21] - 0.30457518) > TOLERANCE)[:2]
#     return(len(w[0]) > 0) 
Example 45
Project: human-rl   Author: gsastry   File: pong_catastrophe.py    (MIT License) View Source Project 5 votes vote down vote up
def paddle_top(observation, paddle="right"):
    column = observation[:, PADDLE_COLUMN[paddle], :] - PADDLE_COLOR[paddle]
    found = (np.sum(np.abs(column), axis=1) < TOLERANCE).astype(np.int)
    r = np.argmax(found)
    if not found[r]:
        return None
    else:
        return r


# def ball_center(observation):
#     w = np.where(np.abs(observation[:,6:36] - 0.30457518) > TOLERANCE)[:2]
#     if len(w[0]) == 0 or len(w[0]) > 4:
#         return None
#     w = np.mean(w, axis=1)
#     return w[0], w[1] + 6
#
# def ball_on_left(observation):
#     w = np.where(np.abs(observation[:,6:21] - 0.30457518) > TOLERANCE)[:2]
#     return(len(w[0]) > 0) 
Example 46
Project: human-rl   Author: gsastry   File: pong_catastrophe.py    (MIT License) View Source Project 5 votes vote down vote up
def paddle_top(observation, paddle="right"):
    column = observation[:, PADDLE_COLUMN[paddle], :] - PADDLE_COLOR[paddle]
    found = (np.sum(np.abs(column), axis=1) < TOLERANCE).astype(np.int)
    r = np.argmax(found)
    if not found[r]:
        return None
    else:
        return r


# def ball_center(observation):
#     w = np.where(np.abs(observation[:,6:36] - 0.30457518) > TOLERANCE)[:2]
#     if len(w[0]) == 0 or len(w[0]) > 4:
#         return None
#     w = np.mean(w, axis=1)
#     return w[0], w[1] + 6
#
# def ball_on_left(observation):
#     w = np.where(np.abs(observation[:,6:21] - 0.30457518) > TOLERANCE)[:2]
#     return(len(w[0]) > 0) 
Example 47
Project: human-rl   Author: gsastry   File: pong_catastrophe.py    (MIT License) View Source Project 5 votes vote down vote up
def paddle_top(observation, paddle="right"):
    column = observation[:, PADDLE_COLUMN[paddle], :] - PADDLE_COLOR[paddle]
    found = (np.sum(np.abs(column), axis=1) < TOLERANCE).astype(np.int)
    r = np.argmax(found)
    if not found[r]:
        return None
    else:
        return r


# def ball_center(observation):
#     w = np.where(np.abs(observation[:,6:36] - 0.30457518) > TOLERANCE)[:2]
#     if len(w[0]) == 0 or len(w[0]) > 4:
#         return None
#     w = np.mean(w, axis=1)
#     return w[0], w[1] + 6
#
# def ball_on_left(observation):
#     w = np.where(np.abs(observation[:,6:21] - 0.30457518) > TOLERANCE)[:2]
#     return(len(w[0]) > 0) 
Example 48
Project: human-rl   Author: gsastry   File: pong_catastrophe.py    (MIT License) View Source Project 5 votes vote down vote up
def paddle_top(observation, paddle="right"):
    column = observation[:, PADDLE_COLUMN[paddle], :] - PADDLE_COLOR[paddle]
    found = (np.sum(np.abs(column), axis=1) < TOLERANCE).astype(np.int)
    r = np.argmax(found)
    if not found[r]:
        return None
    else:
        return r


# def ball_center(observation):
#     w = np.where(np.abs(observation[:,6:36] - 0.30457518) > TOLERANCE)[:2]
#     if len(w[0]) == 0 or len(w[0]) > 4:
#         return None
#     w = np.mean(w, axis=1)
#     return w[0], w[1] + 6
#
# def ball_on_left(observation):
#     w = np.where(np.abs(observation[:,6:21] - 0.30457518) > TOLERANCE)[:2]
#     return(len(w[0]) > 0) 
Example 49
Project: retinex_for_mri   Author: ofgulban   File: utils.py    (GNU General Public License v3.0) View Source Project 5 votes vote down vote up
def truncate_and_scale(data, percMin=0.01, percMax=99.9, zeroTo=1.0):
    """Truncate and scale the data as a preprocessing step.

    Parameters
    ----------
    data : nd numpy array
        Data/image to be truncated and scaled.
    percMin : float, positive
        Minimum percentile to be truncated.
    percMax : float, positive
        Maximum percentile to be truncated.
    zeroTo : float
        Data will be returned in the range from 0 to this number.

    Returns
    -------
    data : nd numpy array
        Truncated and scaled data/image.

    """
    # adjust minimum
    percDataMin = np.percentile(data, percMin)
    data[np.where(data < percDataMin)] = percDataMin
    data = data - data.min()

    # adjust maximum
    percDataMax = np.percentile(data, percMax)
    data[np.where(data > percDataMax)] = percDataMax
    data = 1./data.max() * data
    return data * zeroTo 
Example 50
Project: zipline-chinese   Author: zhanghan1990   File: history_container.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def update_last_known_values(self):
        """
        Store the non-NaN values from our oldest frame in each frequency.
        """
        ffillable = self.ffillable_fields
        if not len(ffillable):
            return

        for frequency in self.unique_frequencies:
            digest_panel = self.digest_panels.get(frequency, None)
            if digest_panel:
                oldest_known_values = digest_panel.oldest_frame(raw=True)
            else:
                oldest_known_values = self.buffer_panel.oldest_frame(raw=True)

            oldest_vals = oldest_known_values
            oldest_columns = self.fields
            for field in ffillable:
                f_idx = oldest_columns.get_loc(field)
                field_vals = oldest_vals[f_idx]
                # isnan would be fast, possible to use?
                non_nan_sids = np.where(pd.notnull(field_vals))
                key = (frequency.freq_str, field)
                key_loc = self.last_known_prior_values.index.get_loc(key)
                self.last_known_prior_values.values[
                    key_loc, non_nan_sids
                ] = field_vals[non_nan_sids]