Python numpy.delete() Examples

The following are 30 code examples of numpy.delete(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: nms.py    From H3DNet with MIT License 6 votes vote down vote up
def nms_crnr_dist(boxes, conf, overlap_threshold):
        
    I = np.argsort(conf)
    pick = []
    while (I.size!=0):
        last = I.size
        i = I[-1]
        pick.append(i)        
        
        scores = []
        for ind in I[:-1]:
            scores.append(bbox_corner_dist_measure(boxes[i,:], boxes[ind, :]))

        I = np.delete(I, np.concatenate(([last-1], np.where(np.array(scores)>overlap_threshold)[0])))

    return pick 
Example #2
Source File: test_series.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_line_area_nan_series(self):
        values = [1, 2, np.nan, 3]
        s = Series(values)
        ts = Series(values, index=tm.makeDateIndex(k=4))

        for d in [s, ts]:
            ax = _check_plot_works(d.plot)
            masked = ax.lines[0].get_ydata()
            # remove nan for comparison purpose
            exp = np.array([1, 2, 3], dtype=np.float64)
            tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp)
            tm.assert_numpy_array_equal(
                masked.mask, np.array([False, False, True, False]))

            expected = np.array([1, 2, 0, 3], dtype=np.float64)
            ax = _check_plot_works(d.plot, stacked=True)
            tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
            ax = _check_plot_works(d.plot.area)
            tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
            ax = _check_plot_works(d.plot.area, stacked=False)
            tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) 
Example #3
Source File: common.py    From cat-bbs with MIT License 6 votes vote down vote up
def draw_heatmap(img, heatmap, alpha=0.5):
    """Draw a heatmap overlay over an image."""
    assert len(heatmap.shape) == 2 or \
        (len(heatmap.shape) == 3 and heatmap.shape[2] == 1)
    assert img.dtype in [np.uint8, np.int32, np.int64]
    assert heatmap.dtype in [np.float32, np.float64]

    if img.shape[0:2] != heatmap.shape[0:2]:
        heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8)
        heatmap_rs = ia.imresize_single_image(
            heatmap_rs[..., np.newaxis],
            img.shape[0:2],
            interpolation="nearest"
        )
        heatmap = np.squeeze(heatmap_rs) / 255.0

    cmap = plt.get_cmap('jet')
    heatmap_cmapped = cmap(heatmap)
    heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
    heatmap_cmapped = heatmap_cmapped * 255
    mix = (1-alpha) * img + alpha * heatmap_cmapped
    mix = np.clip(mix, 0, 255).astype(np.uint8)
    return mix 
Example #4
Source File: starkAYO_staticSchedule.py    From EXOSIMS with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def sacrificeStarCbyT(self, sInds, t_dets, fZ, fEZ, WA, overheadTime):
        """Sacrifice the worst performing CbyT star
        Args:
            sInds[nStars] - indicies of stars in the list
            t_dets[nStars] - time to observe each star (in days)
            fZ[nStars] - zodiacal light for each target
            fEZ - 0 
            WA - inner working angle of the instrument
            overheadTime - overheadTime added to each observation
        Return:
            sInds[nStars] - indicies of stars in the list
            t_dets[nStars] - time to observe each star (in days)
            sacrificedStarTime - time to distribute in days       
        """
        CbyT = self.Completeness.comp_per_intTime(t_dets*u.d, self.TargetList, sInds, self.valfZmin[sInds], fEZ, WA, self.mode, self.Cb[sInds], self.Csp[sInds])/t_dets#takes 5 seconds to do 1 time for all stars

        sacrificeIndex = np.argmin(CbyT)#finds index of star to sacrifice

        #Need index of sacrificed star by this point
        sacrificedStarTime = t_dets[sacrificeIndex] + overheadTime#saves time being sacrificed
        sInds = np.delete(sInds,sacrificeIndex)
        t_dets = np.delete(t_dets,sacrificeIndex)
        return sInds, t_dets, sacrificedStarTime 
Example #5
Source File: ModelingCloth.py    From Modeling-Cloth with MIT License 6 votes vote down vote up
def create_vertex_groups(groups=['common', 'not_used'], weights=[0.0, 0.0], ob=None):
    '''Creates vertex groups and sets weights. "groups" is a list of strings
    for the names of the groups. "weights" is a list of weights corresponding 
    to the strings. Each vertex is assigned a weight for each vertex group to
    avoid calling vertex weights that are not assigned. If the groups are
    already present, the previous weights will be preserved. To reset weights
    delete the created groups'''
    if ob is None:
        ob = bpy.context.object
    vg = ob.vertex_groups
    for g in range(0, len(groups)):
        if groups[g] not in vg.keys(): # Don't create groups if there are already there
            vg.new(groups[g])
            vg[groups[g]].add(range(0,len(ob.data.vertices)), weights[g], 'REPLACE')
        else:
            vg[groups[g]].add(range(0,len(ob.data.vertices)), 0, 'ADD') # This way we avoid resetting the weights for existing groups. 
Example #6
Source File: ModelingCloth.py    From Modeling-Cloth with MIT License 6 votes vote down vote up
def create_sew_edges():

    bpy.ops.mesh.bridge_edge_loops()
    bpy.ops.mesh.delete(type='ONLY_FACE')
    return
    #highlight a sew edge
    #compare vertex counts
    #subdivide to match counts
    #distribute and smooth back into mesh
    #create sew lines
     

    



# sewing functions ---------------->>> 
Example #7
Source File: prunable_nn_test.py    From prunnable-layers-pytorch with GNU General Public License v3.0 6 votes vote down vote up
def test_pruneFeatureMap_ShouldPruneRightParams(self):
        dropped_index = 0
        output = self.module(self.input)
        torch.autograd.backward(output, self.upstream_gradient)

        old_weight_size = self.module.weight.size()
        old_bias_size = self.module.bias.size()
        old_out_channels = self.module.out_channels
        old_weight_values = self.module.weight.data.cpu().numpy()

        # ensure that the chosen index is dropped
        self.module.prune_feature_map(dropped_index)

        # check bias size
        self.assertEqual(self.module.bias.size()[0], (old_bias_size[0]-1))
        # check output channels
        self.assertEqual(self.module.out_channels, old_out_channels-1)

        _, *other_old_weight_sizes = old_weight_size
        # check weight size
        self.assertEqual(self.module.weight.size(), (old_weight_size[0]-1, *other_old_weight_sizes))
        # check weight value
        expected = np.delete(old_weight_values, dropped_index , 0)
        self.assertTrue(np.array_equal(self.module.weight.data.cpu().numpy(), expected)) 
Example #8
Source File: prunable_nn_test.py    From prunnable-layers-pytorch with GNU General Public License v3.0 6 votes vote down vote up
def test_PLinearDropInputs_ShouldDropRightParams(self):
        dropped_index = 0

        # assume input is 2x2x2, 2 layers of 2x2
        input_shape = (2, 2, 2)
        module = pnn.PLinear(8, 10)

        old_num_features = module.in_features
        old_weight = module.weight.data.cpu().numpy()
        resized_old_weight = np.resize(old_weight, (module.out_features, *input_shape))

        module.drop_inputs(input_shape, dropped_index)
        new_shape = module.weight.size()

        # ensure that the chosen index is dropped
        expected_weight = np.resize(np.delete(resized_old_weight, dropped_index, 1), new_shape)
        output = module.weight.data.cpu().numpy()
        self.assertTrue(np.array_equal(output, expected_weight))

        # ensure num features is reduced
        self.assertTrue(module.in_features, old_num_features-1) 
Example #9
Source File: prunable_nn_test.py    From prunnable-layers-pytorch with GNU General Public License v3.0 6 votes vote down vote up
def test_PBatchNorm2dDropInputChannel_ShouldDropRightParams(self):
        dropped_index = 0
        module = pnn.PBatchNorm2d(2)

        old_num_features = module.num_features
        old_bias = module.bias.data.cpu().numpy()
        old_weight = module.weight.data.cpu().numpy()

        module.drop_input_channel(dropped_index)

        # ensure that the chosen index is dropped
        expected_weight = np.delete(old_weight, dropped_index, 0)
        self.assertTrue(np.array_equal(module.weight.data.cpu().numpy(), expected_weight))
        expected_bias = np.delete(old_bias, dropped_index, 0)
        self.assertTrue(np.array_equal(module.bias.data.cpu().numpy(), expected_bias))
        # ensure num features is reduced
        self.assertTrue(module.num_features, old_num_features-1) 
Example #10
Source File: data_cnn.py    From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License 6 votes vote down vote up
def compute_max_min(self, ske_joints):
        max_vals, min_vals = list(), list()
        for ske_joint in ske_joints:
            zero_row = []
            if self.dataset == 'NTU':
                for i in range(len(ske_joint)):
                    if (ske_joint[i, :] == np.zeros((1, 150))).all():
                        zero_row.append(i)
                ske_joint = np.delete(ske_joint, zero_row, axis=0)
                if (ske_joint[:, 0:75] == np.zeros((ske_joint.shape[0], 75))).all():
                    ske_joint = np.delete(ske_joint, range(75), axis=1)
                elif (ske_joint[:, 75:150] == np.zeros((ske_joint.shape[0], 75))).all():
                    ske_joint = np.delete(ske_joint, range(75, 150), axis=1)

            max_val = ske_joint.max()
            min_val = ske_joint.min()
            max_vals.append(float(max_val))
            min_vals.append(float(min_val))
        max_vals, min_vals = np.array(max_vals), np.array(min_vals)

        return max_vals.max(), min_vals.min() 
Example #11
Source File: signal_fixpeaks.py    From NeuroKit with MIT License 6 votes vote down vote up
def _correct_misaligned(misaligned_idcs, peaks):

    corrected_peaks = peaks.copy()
    misaligned_idcs = np.array(misaligned_idcs)
    # Make sure to not generate negative indices, or indices that exceed
    # the total number of peaks. prev_peaks and next_peaks must have the
    # same number of elements.
    valid_idcs = np.logical_and(
        misaligned_idcs > 1, misaligned_idcs < len(corrected_peaks) - 1  # pylint: disable=E1111
    )
    misaligned_idcs = misaligned_idcs[valid_idcs]
    prev_peaks = corrected_peaks[[i - 1 for i in misaligned_idcs]]
    next_peaks = corrected_peaks[[i + 1 for i in misaligned_idcs]]

    half_ibi = (next_peaks - prev_peaks) / 2
    peaks_interp = prev_peaks + half_ibi
    # Shift the R-peaks from the old to the new position.
    corrected_peaks = np.delete(corrected_peaks, misaligned_idcs)
    corrected_peaks = np.concatenate((corrected_peaks, peaks_interp)).astype(int)
    corrected_peaks.sort(kind="mergesort")

    return corrected_peaks 
Example #12
Source File: rsp_findpeaks.py    From NeuroKit with MIT License 6 votes vote down vote up
def _rsp_findpeaks_biosppy(rsp_cleaned, sampling_rate):

    extrema = _rsp_findpeaks_extrema(rsp_cleaned)
    extrema, amplitudes = _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=0)

    peaks, troughs = _rsp_findpeaks_sanitize(extrema, amplitudes)

    # Apply minimum period outlier-criterion (exclude inter-breath-intervals
    # that produce breathing rate larger than 35 breaths per minute.
    outlier_idcs = np.where((np.diff(peaks) / sampling_rate) < 1.7)[0]

    peaks = np.delete(peaks, outlier_idcs)
    troughs = np.delete(troughs, outlier_idcs)

    info = {"RSP_Peaks": peaks, "RSP_Troughs": troughs}
    return info 
Example #13
Source File: rsp_findpeaks.py    From NeuroKit with MIT License 6 votes vote down vote up
def _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=0.3):

    # Only consider those extrema that have a minimum vertical distance to
    # their direct neighbor, i.e., define outliers in absolute amplitude
    # difference between neighboring extrema.
    vertical_diff = np.abs(np.diff(rsp_cleaned[extrema]))
    median_diff = np.median(vertical_diff)
    min_diff = np.where(vertical_diff > (median_diff * amplitude_min))[0]
    extrema = extrema[min_diff]

    # Make sure that the alternation of peaks and troughs is unbroken. If
    # alternation of sign in extdiffs is broken, remove the extrema that
    # cause the breaks.
    amplitudes = rsp_cleaned[extrema]
    extdiffs = np.sign(np.diff(amplitudes))
    extdiffs = np.add(extdiffs[0:-1], extdiffs[1:])
    removeext = np.where(extdiffs != 0)[0] + 1
    extrema = np.delete(extrema, removeext)
    amplitudes = np.delete(amplitudes, removeext)

    return extrema, amplitudes 
Example #14
Source File: utils.py    From dataiku-contrib with Apache License 2.0 5 votes vote down vote up
def non_max_suppression(boxes, scores, threshold):
    """Performs non-maximum suppression and returns indices of kept boxes.
    boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
    scores: 1-D array of box scores.
    threshold: Float. IoU threshold to use for filtering.
    """
    assert boxes.shape[0] > 0
    if boxes.dtype.kind != "f":
        boxes = boxes.astype(np.float32)

    # Compute box areas
    y1 = boxes[:, 0]
    x1 = boxes[:, 1]
    y2 = boxes[:, 2]
    x2 = boxes[:, 3]
    area = (y2 - y1) * (x2 - x1)

    # Get indicies of boxes sorted by scores (highest first)
    ixs = scores.argsort()[::-1]

    pick = []
    while len(ixs) > 0:
        # Pick top box and add its index to the list
        i = ixs[0]
        pick.append(i)
        # Compute IoU of the picked box with the rest
        iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
        # Identify boxes with IoU over the threshold. This
        # returns indices into ixs[1:], so add 1 to get
        # indices into ixs.
        remove_ixs = np.where(iou > threshold)[0] + 1
        # Remove indices of the picked and overlapped boxes.
        ixs = np.delete(ixs, remove_ixs)
        ixs = np.delete(ixs, 0)
    return np.array(pick, dtype=np.int32) 
Example #15
Source File: category.py    From recruit with Apache License 2.0 5 votes vote down vote up
def delete(self, loc):
        """
        Make new Index with passed location(-s) deleted

        Returns
        -------
        new_index : Index
        """
        return self._create_from_codes(np.delete(self.codes, loc)) 
Example #16
Source File: np_utils.py    From keras-ctpn with Apache License 2.0 5 votes vote down vote up
def non_max_suppression(boxes, scores, iou_threshold):
    """
    非极大抑制
    :param boxes: [n,(y1,x1,y2,x2)]
    :param scores: [n]
    :param iou_threshold:
    :return:
    """
    assert boxes.shape[0] > 0
    if boxes.dtype.kind != "f":
        boxes = boxes.astype(np.float32)

    # Compute box areas
    y1 = boxes[:, 0]
    x1 = boxes[:, 1]
    y2 = boxes[:, 2]
    x2 = boxes[:, 3]
    area = (y2 - y1) * (x2 - x1)

    # Get indicies of boxes sorted by scores (highest first)
    ixs = scores.argsort()[::-1]

    pick = []
    while len(ixs) > 0:
        # Pick top box and add its index to the list
        i = ixs[0]
        pick.append(i)
        # Compute IoU of the picked box with the rest
        iou = compute_iou_1vn(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
        # Identify boxes with IoU over the threshold. This
        # returns indices into ixs[1:], so add 1 to get
        # indices into ixs.
        remove_ixs = np.where(iou > iou_threshold)[0] + 1
        # Remove indices of the picked and overlapped boxes.
        ixs = np.delete(ixs, remove_ixs)
        ixs = np.delete(ixs, 0)
    return np.array(pick, dtype=np.int32) 
Example #17
Source File: blocks.py    From recruit with Apache License 2.0 5 votes vote down vote up
def delete(self, loc):
        """
        Delete given loc(-s) from block in-place.
        """
        self.values = np.delete(self.values, loc, 0)
        self.mgr_locs = self.mgr_locs.delete(loc) 
Example #18
Source File: test_frame.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_line_area_nan_df(self):
        values1 = [1, 2, np.nan, 3]
        values2 = [3, np.nan, 2, 1]
        df = DataFrame({'a': values1, 'b': values2})
        tdf = DataFrame({'a': values1,
                         'b': values2}, index=tm.makeDateIndex(k=4))

        for d in [df, tdf]:
            ax = _check_plot_works(d.plot)
            masked1 = ax.lines[0].get_ydata()
            masked2 = ax.lines[1].get_ydata()
            # remove nan for comparison purpose

            exp = np.array([1, 2, 3], dtype=np.float64)
            tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)

            exp = np.array([3, 2, 1], dtype=np.float64)
            tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)
            tm.assert_numpy_array_equal(
                masked1.mask, np.array([False, False, True, False]))
            tm.assert_numpy_array_equal(
                masked2.mask, np.array([False, True, False, False]))

            expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
            expected2 = np.array([3, 0, 2, 1], dtype=np.float64)

            ax = _check_plot_works(d.plot, stacked=True)
            tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
            tm.assert_numpy_array_equal(ax.lines[1].get_ydata(),
                                        expected1 + expected2)

            ax = _check_plot_works(d.plot.area)
            tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
            tm.assert_numpy_array_equal(ax.lines[1].get_ydata(),
                                        expected1 + expected2)

            ax = _check_plot_works(d.plot.area, stacked=False)
            tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
            tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2) 
Example #19
Source File: ModelTree.py    From LearningX with MIT License 5 votes vote down vote up
def _split_data(j_feature, threshold, X, y):
    idx_left = np.where(X[:, j_feature] <= threshold)[0]
    idx_right = np.delete(np.arange(0, len(X)), idx_left)
    assert len(idx_left) + len(idx_right) == len(X)
    return (X[idx_left], y[idx_left]), (X[idx_right], y[idx_right]) 
Example #20
Source File: conditional_draws.py    From respy with MIT License 5 votes vote down vote up
def update_cholcov(shocks_cholesky, n_wages):
    """Calculate cholesky factors of conditional covs for all possible cases.

    Parameters
    ----------
    shocks_cholesky : numpy.ndarray
        cholesky factor of the covariance matrix before updating. Has
        dimension (n_choices, n_choices)
    n_wages : int
        Number of wage sectors.

    Returns
    -------
    updated_chols : numpy.ndarray
        Array of (shape n_wages + 1, n_choices, n_choices) with the cholesky factors
        of the updated covariance matrices for each possible observed shock. The
        last element corresponds to not observing any shock.

    """
    n_choices = len(shocks_cholesky)
    cov = shocks_cholesky @ shocks_cholesky.T

    updated_chols = np.zeros((n_wages + 1, n_choices, n_choices))

    for i in range(n_wages):
        reduced_cov = np.delete(np.delete(cov, i, axis=1), i, axis=0)
        choice_var = cov[i, i]

        f = np.delete(cov[i], i)

        updated_reduced_cov = reduced_cov - np.outer(f, f) / choice_var
        updated_reduced_chol = robust_cholesky(updated_reduced_cov)

        updated_chols[i, :i, :i] = updated_reduced_chol[:i, :i]
        updated_chols[i, :i, i + 1 :] = updated_reduced_chol[:i, i:]
        updated_chols[i, i + 1 :, :i] = updated_reduced_chol[i:, :i]
        updated_chols[i, i + 1 :, i + 1 :] = updated_reduced_chol[i:, i:]

    updated_chols[-1] = shocks_cholesky

    return updated_chols 
Example #21
Source File: ephysqc.py    From ibllib with MIT License 5 votes vote down vote up
def isi_violations(spike_train, min_time, max_time, isi_threshold, min_isi=0):
    """Calculate ISI violations for a spike train.

    Based on metric described in Hill et al. (2011) J Neurosci 31: 8699-8705

    modified by Dan Denman from cortex-lab/sortingQuality GitHub by Nick Steinmetz

    Inputs:
    -------
    spike_train : array of spike times
    min_time : minimum time for potential spikes
    max_time : maximum time for potential spikes
    isi_threshold : threshold for isi violation
    min_isi : threshold for duplicate spikes

    Outputs:
    --------
    fpRate : rate of contaminating spikes as a fraction of overall rate
        A perfect unit has a fpRate = 0
        A unit with some contamination has a fpRate < 0.5
        A unit with lots of contamination has a fpRate > 1.0
    num_violations : total number of violations

    """

    duplicate_spikes = np.where(np.diff(spike_train) <= min_isi)[0]

    spike_train = np.delete(spike_train, duplicate_spikes + 1)
    isis = np.diff(spike_train)

    num_spikes = spike_train.size
    num_violations = np.sum(isis < isi_threshold)
    violation_time = 2 * num_spikes * (isi_threshold - min_isi)
    total_rate = spike_train.size / (max_time - min_time)
    violation_rate = num_violations / violation_time
    fpRate = violation_rate / total_rate

    return fpRate, num_violations 
Example #22
Source File: data_cnn.py    From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License 5 votes vote down vote up
def torgb(self, ske_joints):
        rgb = []
        maxmin = list()
        self.idx = 0
        for ske_joint in ske_joints:
            zero_row = []
            if self.dataset == 'NTU':
                for i in range(len(ske_joint)):
                    if (ske_joint[i, :] == np.zeros((1, 150))).all():
                        zero_row.append(i)
                ske_joint = np.delete(ske_joint, zero_row, axis=0)
                if (ske_joint[:, 0:75] == np.zeros((ske_joint.shape[0], 75))).all():
                    ske_joint = np.delete(ske_joint, range(75), axis=1)
                elif (ske_joint[:, 75:150] == np.zeros((ske_joint.shape[0], 75))).all():
                    ske_joint = np.delete(ske_joint, range(75, 150), axis=1)

            max_val = self.max
            min_val = self.min

            #### original rescale to 0-255
            ske_joint =  255 * (ske_joint - min_val) / (max_val - min_val)
            rgb_ske = np.reshape(ske_joint, (ske_joint.shape[0], ske_joint.shape[1] //3, 3))
            rgb_ske = scipy.misc.imresize(rgb_ske, (224, 224)).astype(np.float32)
            rgb_ske = center(rgb_ske)
            rgb_ske = np.transpose(rgb_ske, [1, 0, 2])
            rgb_ske = np.transpose(rgb_ske, [2,1,0])
            rgb.append(rgb_ske)
            maxmin.append([max_val, min_val])
            self.idx = self.idx +1

        return rgb, maxmin 
Example #23
Source File: ephys_fpga.py    From ibllib with MIT License 5 votes vote down vote up
def _bpod_events_extraction(bpod_t, bpod_fronts):
    """
    From detected fronts on the bpod sync traces, outputs the synchronisation events
    related to trial start and valve opening
    :param bpod_t: numpy vector containing times of fronts
    :param bpod_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall)
    :return: numpy arrays of times t_trial_start, t_valve_open and t_iti_in
    """
    TRIAL_START_TTL_LEN = 2.33e-4
    VALVE_OPEN_TTL_LEN = 0.4
    # make sure that there are no 2 consecutive fall or consecutive rise events
    assert(np.all(np.abs(np.diff(bpod_fronts)) == 2))
    # make sure that the first event is a rise
    assert(bpod_fronts[0] == 1)
    # take only even time differences: ie. from rising to falling fronts
    dt = np.diff(bpod_t)[::2]
    # detect start trials event assuming length is 0.23 ms except the first trial
    i_trial_start = np.r_[0, np.where(dt <= TRIAL_START_TTL_LEN)[0] * 2]
    t_trial_start = bpod_t[i_trial_start]
    # # the first trial we detect the first falling edge to which we subtract 0.1ms
    # t_trial_start[0] -= 1e-4
    # the last trial is a dud and should be removed
    t_trial_start = t_trial_start[:-1]
    # valve open events are between 50ms to 300 ms
    i_valve_open = np.where(np.logical_and(dt > TRIAL_START_TTL_LEN,
                                           dt < VALVE_OPEN_TTL_LEN))[0] * 2
    i_valve_open = np.delete(i_valve_open, np.where(i_valve_open < 2))
    t_valve_open = bpod_t[i_valve_open]
    # ITI events are above 400 ms
    i_iti_in = np.where(dt > VALVE_OPEN_TTL_LEN)[0] * 2
    i_iti_in = np.delete(i_iti_in, np.where(i_valve_open < 2))
    i_iti_in = bpod_t[i_iti_in]
    # # some debug plots when needed
    # import matplotlib.pyplot as plt
    # import ibllib.plots as plots
    # plt.figure()
    # plots.squares(bpod_t, bpod_fronts)
    # plots.vertical_lines(t_valve_open, ymin=-0.2, ymax=1.2, linewidth=0.5, color='g')
    # plots.vertical_lines(t_trial_start, ymin=-0.2, ymax=1.2, linewidth=0.5, color='r')
    return t_trial_start, t_valve_open, i_iti_in 
Example #24
Source File: training_trials.py    From ibllib with MIT License 5 votes vote down vote up
def get_feedback_times_ge5(session_path, data=False):
    # ger err and no go trig times -- look for BNC2High of trial -- verify
    # only 2 onset times go tone and noise, select 2nd/-1 OR select the one
    # that is grater than the nogo or err trial onset time
    if not data:
        data = raw.load_data(session_path)
    missed_bnc2 = 0
    rw_times, err_sound_times, merge = [np.zeros([len(data), ]) for _ in range(3)]

    for ind, tr in enumerate(data):
        st = tr['behavior_data']['Events timestamps'].get('BNC2High', None)
        if not st:
            st = np.array([np.nan, np.nan])
            missed_bnc2 += 1
        # xonar soundcard duplicates events, remove consecutive events too close together
        st = np.delete(st, np.where(np.diff(st) < 0.020)[0] + 1)
        rw_times[ind] = tr['behavior_data']['States timestamps']['reward'][0][0]
        # get the error sound only if the reward is nan
        err_sound_times[ind] = st[-1] if st.size >= 2 and np.isnan(rw_times[ind]) else np.nan
    if missed_bnc2 == len(data):
        logger_.warning('No BNC2 for feedback times, filling error trials NaNs')
    merge *= np.nan
    merge[~np.isnan(rw_times)] = rw_times[~np.isnan(rw_times)]
    merge[~np.isnan(err_sound_times)] = err_sound_times[~np.isnan(err_sound_times)]

    return merge 
Example #25
Source File: test_extractors.py    From ibllib with MIT License 5 votes vote down vote up
def test_sync_bpod_bonsai_poor_quality_timestamps(self):
        sync_trials_robust = raw.sync_trials_robust
        drift_pol = np.array([11 * 1e-6, -20])  # bpod starts 20 secs before with 10 ppm drift
        np.random.seed(seed=784)
        t0_full = np.cumsum(np.random.rand(50)) + .001
        t1_full = np.polyval(drift_pol, t0_full) + t0_full
        t0 = t0_full.copy()
        t1 = t1_full.copy()

        t0_, t1_ = sync_trials_robust(t0, t1)
        assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)

        t0_, t1_ = sync_trials_robust(t0, t1[:-1])
        assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)

        t0_, t1_ = sync_trials_robust(t0, t1[1:])
        assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)

        t0_, t1_ = sync_trials_robust(t0[1:], t1)
        assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)

        t0_, t1_ = sync_trials_robust(t0[:-1], t1)
        assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)

        t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))
        assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)

        t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))
        assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_) 
Example #26
Source File: electrode_placement.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def _edge_list(triangles):
    '''Gets the list of edges and adjacencies. This is a 2D version of the get_faces()
    method in mesh_io '''
    edges = triangles[:, [[0, 1], [0, 2], [1, 2]]]
    edges = edges.reshape(-1, 2)
    hash_array = _hash_rows(edges)
    unique, idx, inv, count = np.unique(hash_array, return_index=True,
                                        return_inverse=True, return_counts=True)
    edges = edges[idx]
    edge_adjacency_list = -np.ones((len(unique), 2), dtype=int)
    edge_adjacency_list[:, 0] = idx // 3

    if np.any(count > 2):
        raise ValueError('Invalid Mesh: Found an edge with more than 2 adjacent'
                         ' Triangles!')

    # Remove the edges already seen from consideration
    # Second round in order to make adjacency list
    # create a new array with a mask in the elements already seen
    mask = unique[-1] + 1
    hash_array_masked = np.copy(hash_array)
    hash_array_masked[idx] = mask
    # make another array, where we delete the elements we have already seen
    hash_array_reduced = np.delete(hash_array, idx)
    # Finds where each element of the second array is in the first array
    # (https://stackoverflow.com/a/8251668)
    hash_array_masked_sort = hash_array_masked.argsort()
    hash_array_repeated_pos = hash_array_masked_sort[
        np.searchsorted(hash_array_masked[hash_array_masked_sort], hash_array_reduced)]
    # Now find the index of the face corresponding to each element in the
    # hash_array_reduced
    edges_repeated = np.searchsorted(unique, hash_array_reduced)
    # Finally, fill out the second column in the adjacency list
    edge_adjacency_list[edges_repeated, 1] = hash_array_repeated_pos // 3

    return edges, inv.reshape(-1, 3), edge_adjacency_list 
Example #27
Source File: train.py    From tartarus with MIT License 5 votes vote down vote up
def batch_block_generator(params, y_path, N_train, id2gt, X_meta=None,
                          val_from_file=False):
    hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
    f = h5py.File(hdf5_file,"r")
    block_step = 50000
    batch_size = params['training']['n_minibatch']
    randomize = True
    with_meta = False
    if X_meta != None:
        with_meta = True
    while 1:
        for i in range(0, N_train, block_step):
            x_block = f['features'][i:min(N_train, i+block_step)]
            index_block = f['index'][i:min(N_train, i+block_step)]
            #y_block = f['targets'][i:min(N_train,i+block_step)]
            x_block = np.delete(x_block, np.where(index_block == ""), axis=0)
            index_block = np.delete(index_block, np.where(index_block == ""))
            y_block = np.asarray([id2gt[id] for id in index_block])
            if params['training']['normalize_y']:
                normalize(y_block, copy=False)
            items_list = range(x_block.shape[0])
            if randomize:
                random.shuffle(items_list)
            for j in range(0, len(items_list), batch_size):
                if j+batch_size <= x_block.shape[0]:
                    items_in_batch = items_list[j:j+batch_size]
                    x_batch = x_block[items_in_batch]
                    y_batch = y_block[items_in_batch]
                    if with_meta:
                        x_batch = [x_batch, X_meta[items_in_batch]]
                    yield (x_batch, y_batch) 
Example #28
Source File: utils.py    From fenics-topopt with MIT License 5 votes vote down vote up
def deleterowcol(A, delrow, delcol):
    """Assumes that matrix is in symmetric csc form !"""
    m = A.shape[0]
    keep = np.delete(np.arange(0, m), delrow)
    A = A[keep, :]
    keep = np.delete(np.arange(0, m), delcol)
    A = A[:, keep]
    return A 
Example #29
Source File: test_frame.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_memory_leak(self):
        """ Check that every plot type gets properly collected. """
        import weakref
        import gc

        results = {}
        for kind in plotting._core._plot_klass.keys():
            if not _ok_for_gaussian_kde(kind):
                continue
            args = {}
            if kind in ['hexbin', 'scatter', 'pie']:
                df = self.hexbin_df
                args = {'x': 'A', 'y': 'B'}
            elif kind == 'area':
                df = self.tdf.abs()
            else:
                df = self.tdf

            # Use a weakref so we can see if the object gets collected without
            # also preventing it from being collected
            results[kind] = weakref.proxy(df.plot(kind=kind, **args))

        # have matplotlib delete all the figures
        tm.close()
        # force a garbage collection
        gc.collect()
        for key in results:
            # check that every plot was collected
            with pytest.raises(ReferenceError):
                # need to actually access something to get an error
                results[key].lines 
Example #30
Source File: nms.py    From H3DNet with MIT License 5 votes vote down vote up
def nms_3d_faster_samecls(boxes, overlap_threshold, old_type=False):
    x1 = boxes[:,0]
    y1 = boxes[:,1]
    z1 = boxes[:,2]
    x2 = boxes[:,3]
    y2 = boxes[:,4]
    z2 = boxes[:,5]
    score = boxes[:,6]
    cls = boxes[:,7]
    area = (x2-x1)*(y2-y1)*(z2-z1)

    I = np.argsort(score)
    pick = []
    while (I.size!=0):
        last = I.size
        i = I[-1]
        pick.append(i)

        xx1 = np.maximum(x1[i], x1[I[:last-1]])
        yy1 = np.maximum(y1[i], y1[I[:last-1]])
        zz1 = np.maximum(z1[i], z1[I[:last-1]])
        xx2 = np.minimum(x2[i], x2[I[:last-1]])
        yy2 = np.minimum(y2[i], y2[I[:last-1]])
        zz2 = np.minimum(z2[i], z2[I[:last-1]])
        cls1 = cls[i]
        cls2 = cls[I[:last-1]]

        l = np.maximum(0, xx2-xx1)
        w = np.maximum(0, yy2-yy1)
        h = np.maximum(0, zz2-zz1)

        if old_type:
            o = (l*w*h)/area[I[:last-1]]
        else:
            inter = l*w*h
            o = inter / (area[i] + area[I[:last-1]] - inter)
        o = o * (cls1==cls2)

        I = np.delete(I, np.concatenate(([last-1], np.where(o>overlap_threshold)[0])))

    return pick