Python numpy.delete() Examples

The following are 30 code examples for showing how to use numpy.delete(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: cat-bbs   Author: aleju   File: common.py    License: MIT License 6 votes vote down vote up
def draw_heatmap(img, heatmap, alpha=0.5):
    """Draw a heatmap overlay over an image."""
    assert len(heatmap.shape) == 2 or \
        (len(heatmap.shape) == 3 and heatmap.shape[2] == 1)
    assert img.dtype in [np.uint8, np.int32, np.int64]
    assert heatmap.dtype in [np.float32, np.float64]

    if img.shape[0:2] != heatmap.shape[0:2]:
        heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8)
        heatmap_rs = ia.imresize_single_image(
            heatmap_rs[..., np.newaxis],
            img.shape[0:2],
            interpolation="nearest"
        )
        heatmap = np.squeeze(heatmap_rs) / 255.0

    cmap = plt.get_cmap('jet')
    heatmap_cmapped = cmap(heatmap)
    heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
    heatmap_cmapped = heatmap_cmapped * 255
    mix = (1-alpha) * img + alpha * heatmap_cmapped
    mix = np.clip(mix, 0, 255).astype(np.uint8)
    return mix 
Example 2
Project: EXOSIMS   Author: dsavransky   File: starkAYO_staticSchedule.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def sacrificeStarCbyT(self, sInds, t_dets, fZ, fEZ, WA, overheadTime):
        """Sacrifice the worst performing CbyT star
        Args:
            sInds[nStars] - indicies of stars in the list
            t_dets[nStars] - time to observe each star (in days)
            fZ[nStars] - zodiacal light for each target
            fEZ - 0 
            WA - inner working angle of the instrument
            overheadTime - overheadTime added to each observation
        Return:
            sInds[nStars] - indicies of stars in the list
            t_dets[nStars] - time to observe each star (in days)
            sacrificedStarTime - time to distribute in days       
        """
        CbyT = self.Completeness.comp_per_intTime(t_dets*u.d, self.TargetList, sInds, self.valfZmin[sInds], fEZ, WA, self.mode, self.Cb[sInds], self.Csp[sInds])/t_dets#takes 5 seconds to do 1 time for all stars

        sacrificeIndex = np.argmin(CbyT)#finds index of star to sacrifice

        #Need index of sacrificed star by this point
        sacrificedStarTime = t_dets[sacrificeIndex] + overheadTime#saves time being sacrificed
        sInds = np.delete(sInds,sacrificeIndex)
        t_dets = np.delete(t_dets,sacrificeIndex)
        return sInds, t_dets, sacrificedStarTime 
Example 3
Project: Modeling-Cloth   Author: the3dadvantage   File: ModelingCloth.py    License: MIT License 6 votes vote down vote up
def create_vertex_groups(groups=['common', 'not_used'], weights=[0.0, 0.0], ob=None):
    '''Creates vertex groups and sets weights. "groups" is a list of strings
    for the names of the groups. "weights" is a list of weights corresponding 
    to the strings. Each vertex is assigned a weight for each vertex group to
    avoid calling vertex weights that are not assigned. If the groups are
    already present, the previous weights will be preserved. To reset weights
    delete the created groups'''
    if ob is None:
        ob = bpy.context.object
    vg = ob.vertex_groups
    for g in range(0, len(groups)):
        if groups[g] not in vg.keys(): # Don't create groups if there are already there
            vg.new(groups[g])
            vg[groups[g]].add(range(0,len(ob.data.vertices)), weights[g], 'REPLACE')
        else:
            vg[groups[g]].add(range(0,len(ob.data.vertices)), 0, 'ADD') # This way we avoid resetting the weights for existing groups. 
Example 4
Project: Modeling-Cloth   Author: the3dadvantage   File: ModelingCloth.py    License: MIT License 6 votes vote down vote up
def create_sew_edges():

    bpy.ops.mesh.bridge_edge_loops()
    bpy.ops.mesh.delete(type='ONLY_FACE')
    return
    #highlight a sew edge
    #compare vertex counts
    #subdivide to match counts
    #distribute and smooth back into mesh
    #create sew lines
     

    



# sewing functions ---------------->>> 
Example 5
def test_pruneFeatureMap_ShouldPruneRightParams(self):
        dropped_index = 0
        output = self.module(self.input)
        torch.autograd.backward(output, self.upstream_gradient)

        old_weight_size = self.module.weight.size()
        old_bias_size = self.module.bias.size()
        old_out_channels = self.module.out_channels
        old_weight_values = self.module.weight.data.cpu().numpy()

        # ensure that the chosen index is dropped
        self.module.prune_feature_map(dropped_index)

        # check bias size
        self.assertEqual(self.module.bias.size()[0], (old_bias_size[0]-1))
        # check output channels
        self.assertEqual(self.module.out_channels, old_out_channels-1)

        _, *other_old_weight_sizes = old_weight_size
        # check weight size
        self.assertEqual(self.module.weight.size(), (old_weight_size[0]-1, *other_old_weight_sizes))
        # check weight value
        expected = np.delete(old_weight_values, dropped_index , 0)
        self.assertTrue(np.array_equal(self.module.weight.data.cpu().numpy(), expected)) 
Example 6
def test_PLinearDropInputs_ShouldDropRightParams(self):
        dropped_index = 0

        # assume input is 2x2x2, 2 layers of 2x2
        input_shape = (2, 2, 2)
        module = pnn.PLinear(8, 10)

        old_num_features = module.in_features
        old_weight = module.weight.data.cpu().numpy()
        resized_old_weight = np.resize(old_weight, (module.out_features, *input_shape))

        module.drop_inputs(input_shape, dropped_index)
        new_shape = module.weight.size()

        # ensure that the chosen index is dropped
        expected_weight = np.resize(np.delete(resized_old_weight, dropped_index, 1), new_shape)
        output = module.weight.data.cpu().numpy()
        self.assertTrue(np.array_equal(output, expected_weight))

        # ensure num features is reduced
        self.assertTrue(module.in_features, old_num_features-1) 
Example 7
def test_PBatchNorm2dDropInputChannel_ShouldDropRightParams(self):
        dropped_index = 0
        module = pnn.PBatchNorm2d(2)

        old_num_features = module.num_features
        old_bias = module.bias.data.cpu().numpy()
        old_weight = module.weight.data.cpu().numpy()

        module.drop_input_channel(dropped_index)

        # ensure that the chosen index is dropped
        expected_weight = np.delete(old_weight, dropped_index, 0)
        self.assertTrue(np.array_equal(module.weight.data.cpu().numpy(), expected_weight))
        expected_bias = np.delete(old_bias, dropped_index, 0)
        self.assertTrue(np.array_equal(module.bias.data.cpu().numpy(), expected_bias))
        # ensure num features is reduced
        self.assertTrue(module.num_features, old_num_features-1) 
Example 8
Project: NeuroKit   Author: neuropsychology   File: signal_fixpeaks.py    License: MIT License 6 votes vote down vote up
def _correct_misaligned(misaligned_idcs, peaks):

    corrected_peaks = peaks.copy()
    misaligned_idcs = np.array(misaligned_idcs)
    # Make sure to not generate negative indices, or indices that exceed
    # the total number of peaks. prev_peaks and next_peaks must have the
    # same number of elements.
    valid_idcs = np.logical_and(
        misaligned_idcs > 1, misaligned_idcs < len(corrected_peaks) - 1  # pylint: disable=E1111
    )
    misaligned_idcs = misaligned_idcs[valid_idcs]
    prev_peaks = corrected_peaks[[i - 1 for i in misaligned_idcs]]
    next_peaks = corrected_peaks[[i + 1 for i in misaligned_idcs]]

    half_ibi = (next_peaks - prev_peaks) / 2
    peaks_interp = prev_peaks + half_ibi
    # Shift the R-peaks from the old to the new position.
    corrected_peaks = np.delete(corrected_peaks, misaligned_idcs)
    corrected_peaks = np.concatenate((corrected_peaks, peaks_interp)).astype(int)
    corrected_peaks.sort(kind="mergesort")

    return corrected_peaks 
Example 9
Project: NeuroKit   Author: neuropsychology   File: rsp_findpeaks.py    License: MIT License 6 votes vote down vote up
def _rsp_findpeaks_biosppy(rsp_cleaned, sampling_rate):

    extrema = _rsp_findpeaks_extrema(rsp_cleaned)
    extrema, amplitudes = _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=0)

    peaks, troughs = _rsp_findpeaks_sanitize(extrema, amplitudes)

    # Apply minimum period outlier-criterion (exclude inter-breath-intervals
    # that produce breathing rate larger than 35 breaths per minute.
    outlier_idcs = np.where((np.diff(peaks) / sampling_rate) < 1.7)[0]

    peaks = np.delete(peaks, outlier_idcs)
    troughs = np.delete(troughs, outlier_idcs)

    info = {"RSP_Peaks": peaks, "RSP_Troughs": troughs}
    return info 
Example 10
Project: NeuroKit   Author: neuropsychology   File: rsp_findpeaks.py    License: MIT License 6 votes vote down vote up
def _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=0.3):

    # Only consider those extrema that have a minimum vertical distance to
    # their direct neighbor, i.e., define outliers in absolute amplitude
    # difference between neighboring extrema.
    vertical_diff = np.abs(np.diff(rsp_cleaned[extrema]))
    median_diff = np.median(vertical_diff)
    min_diff = np.where(vertical_diff > (median_diff * amplitude_min))[0]
    extrema = extrema[min_diff]

    # Make sure that the alternation of peaks and troughs is unbroken. If
    # alternation of sign in extdiffs is broken, remove the extrema that
    # cause the breaks.
    amplitudes = rsp_cleaned[extrema]
    extdiffs = np.sign(np.diff(amplitudes))
    extdiffs = np.add(extdiffs[0:-1], extdiffs[1:])
    removeext = np.where(extdiffs != 0)[0] + 1
    extrema = np.delete(extrema, removeext)
    amplitudes = np.delete(amplitudes, removeext)

    return extrema, amplitudes 
Example 11
Project: H3DNet   Author: zaiweizhang   File: nms.py    License: MIT License 6 votes vote down vote up
def nms_crnr_dist(boxes, conf, overlap_threshold):
        
    I = np.argsort(conf)
    pick = []
    while (I.size!=0):
        last = I.size
        i = I[-1]
        pick.append(i)        
        
        scores = []
        for ind in I[:-1]:
            scores.append(bbox_corner_dist_measure(boxes[i,:], boxes[ind, :]))

        I = np.delete(I, np.concatenate(([last-1], np.where(np.array(scores)>overlap_threshold)[0])))

    return pick 
Example 12
def compute_max_min(self, ske_joints):
        max_vals, min_vals = list(), list()
        for ske_joint in ske_joints:
            zero_row = []
            if self.dataset == 'NTU':
                for i in range(len(ske_joint)):
                    if (ske_joint[i, :] == np.zeros((1, 150))).all():
                        zero_row.append(i)
                ske_joint = np.delete(ske_joint, zero_row, axis=0)
                if (ske_joint[:, 0:75] == np.zeros((ske_joint.shape[0], 75))).all():
                    ske_joint = np.delete(ske_joint, range(75), axis=1)
                elif (ske_joint[:, 75:150] == np.zeros((ske_joint.shape[0], 75))).all():
                    ske_joint = np.delete(ske_joint, range(75, 150), axis=1)

            max_val = ske_joint.max()
            min_val = ske_joint.min()
            max_vals.append(float(max_val))
            min_vals.append(float(min_val))
        max_vals, min_vals = np.array(max_vals), np.array(min_vals)

        return max_vals.max(), min_vals.min() 
Example 13
Project: recruit   Author: Frank-qlu   File: test_series.py    License: Apache License 2.0 6 votes vote down vote up
def test_line_area_nan_series(self):
        values = [1, 2, np.nan, 3]
        s = Series(values)
        ts = Series(values, index=tm.makeDateIndex(k=4))

        for d in [s, ts]:
            ax = _check_plot_works(d.plot)
            masked = ax.lines[0].get_ydata()
            # remove nan for comparison purpose
            exp = np.array([1, 2, 3], dtype=np.float64)
            tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp)
            tm.assert_numpy_array_equal(
                masked.mask, np.array([False, False, True, False]))

            expected = np.array([1, 2, 0, 3], dtype=np.float64)
            ax = _check_plot_works(d.plot, stacked=True)
            tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
            ax = _check_plot_works(d.plot.area)
            tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
            ax = _check_plot_works(d.plot.area, stacked=False)
            tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) 
Example 14
Project: fenics-topopt   Author: zfergus   File: utils.py    License: MIT License 5 votes vote down vote up
def deleterowcol(A, delrow, delcol):
    """Assumes that matrix is in symmetric csc form !"""
    m = A.shape[0]
    keep = np.delete(np.arange(0, m), delrow)
    A = A[keep, :]
    keep = np.delete(np.arange(0, m), delcol)
    A = A[:, keep]
    return A 
Example 15
Project: fenics-topopt   Author: zfergus   File: utils.py    License: MIT License 5 votes vote down vote up
def deleterowcol(A, delrow, delcol):
    """Assumes that matrix is in symmetric csc form !"""
    m = A.shape[0]
    keep = np.delete(np.arange(0, m), delrow)
    A = A[keep, :]
    keep = np.delete(np.arange(0, m), delcol)
    A = A[:, keep]
    return A 
Example 16
Project: animal-tracking   Author: colinlaney   File: track.py    License: Creative Commons Zero v1.0 Universal 5 votes vote down vote up
def drawFloorCrop(event, x, y, flags, params):
    global perspectiveMatrix, name, RENEW_TETRAGON
    imgCroppingPolygon = np.zeros_like(params['imgFloorCorners'])
    if event == cv2.EVENT_RBUTTONUP:
        cv2.destroyWindow(f'Floor Corners for {name}')
    if len(params['croppingPolygons'][name]) > 4 and event == cv2.EVENT_LBUTTONUP:
        RENEW_TETRAGON = True
        h = params['imgFloorCorners'].shape[0]
        # delete 5th extra vertex of the floor cropping tetragon
        params['croppingPolygons'][name] = np.delete(params['croppingPolygons'][name], -1, 0)
        params['croppingPolygons'][name] = params['croppingPolygons'][name] - [h,0]
        
        # Sort cropping tetragon vertices counter-clockwise starting with top left
        params['croppingPolygons'][name] = counterclockwiseSort(params['croppingPolygons'][name])
        # Get the matrix of perspective transformation
        params['croppingPolygons'][name] = np.reshape(params['croppingPolygons'][name], (4,2))
        tetragonVertices = np.float32(params['croppingPolygons'][name])
        tetragonVerticesUpd = np.float32([[0,0], [0,h], [h,h], [h,0]])
        perspectiveMatrix[name] = cv2.getPerspectiveTransform(tetragonVertices, tetragonVerticesUpd)
    if event == cv2.EVENT_LBUTTONDOWN:
        if len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON:
            params['croppingPolygons'][name] = np.array([[0,0]])
            RENEW_TETRAGON = False
        if len(params['croppingPolygons'][name]) == 1:
            params['croppingPolygons'][name][0] = [x,y]
        params['croppingPolygons'][name] = np.append(params['croppingPolygons'][name], [[x,y]], axis=0)
    if event == cv2.EVENT_MOUSEMOVE and not (len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON):
        params['croppingPolygons'][name][-1] = [x,y]
        if len(params['croppingPolygons'][name]) > 1:
            cv2.fillPoly(
                imgCroppingPolygon,
                [np.reshape(
                    params['croppingPolygons'][name],
                    (len(params['croppingPolygons'][name]),2)
                )],
                BGR_COLOR['green'], cv2.LINE_AA)
            imgCroppingPolygon = cv2.addWeighted(params['imgFloorCorners'], 1.0, imgCroppingPolygon, 0.5, 0.)
            cv2.imshow(f'Floor Corners for {name}', imgCroppingPolygon) 
Example 17
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def value(self, params):
        params = flattest(params)
        n = len(params)
        ii = np.arange(n)
        res = np.zeros(n)
        for ((mn,mx), f) in self.pieces_with_default:
            if len(ii) == 0: break
            k = np.where((params >= mn) & (params <= mx))[0]
            if len(k) == 0: continue
            kk = ii[k]
            res[kk] = f.value(params[k])
            ii = np.delete(ii, k)
            params = np.delete(params, k)
        return res 
Example 18
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def jacobian(self, params, into=None):
        params = flattest(params)
        n = len(params)
        ii = np.arange(n)
        (rs,cs,zs) = ([],[],[])
        for ((mn,mx), f) in self.pieces_with_default:
            if len(ii) == 0: break
            k = np.where((params >= mn) & (params <= mx))[0]
            if len(k) == 0: continue
            kk = ii[k]
            j = f.jacobian(params[k])
            if j.shape[0] == 1 and j.shape[1] > 1: j = repmat(j, j.shape[1], 1)
            (rj,cj,vj) = sps.find(j)
            rs.append(kk[rj])
            cs.append(kk[cj])
            zs.append(vj)
            ii = np.delete(ii, k)
            params = np.delete(params, k)
        (rs,cs,zs) = [np.concatenate(us) if len(us) > 0 else [] for us in (rs,cs,zs)]
        dz = sps.csr_matrix((zs, (rs,cs)), shape=(n,n))
        return safe_into(into, dz) 
Example 19
Project: fullrmc   Author: bachiraoun   File: Engine.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def _on_collector_collect_atom(self, realIndex):
        assert not self._atomsCollector.is_collected(realIndex), LOGGER.error("Trying to collect atom index %i which is already collected."%realIndex)
        relativeIndex = self._atomsCollector.get_relative_index(realIndex)
        # create dataDict and remove
        dataDict = {}
        dataDict['realCoordinates'] = self.__realCoordinates[relativeIndex,:]
        dataDict['boxCoordinates']  = self.__boxCoordinates[relativeIndex, :]
        dataDict['moleculesIndex']  = self.__moleculesIndex[relativeIndex]
        dataDict['moleculesName']   = self.__moleculesName[relativeIndex]
        dataDict['elementsIndex']   = self.__elementsIndex[relativeIndex]
        dataDict['allElements']     = self.__allElements[relativeIndex]
        dataDict['namesIndex']      = self.__namesIndex[relativeIndex]
        dataDict['allNames']        = self.__allNames[relativeIndex]
        assert self.__numberOfAtomsPerElement[dataDict['allElements']]-1>0, LOGGER.error("Collecting last atom of any element type is not allowed. It's better to restart your simulation without any '%s' rather than removing them all!"%dataDict['allElements'])
        # collect atom
        self._atomsCollector.collect(index=realIndex, dataDict=dataDict)
        # collect all constraints BEFORE removing data from engine.
        for c in self.__constraints:
            c._on_collector_collect_atom(realIndex=realIndex)
        # remove data from engine AFTER collecting constraints data.
        self.__realCoordinates = np.delete(self.__realCoordinates, relativeIndex, axis=0)
        self.__boxCoordinates  = np.delete(self.__boxCoordinates,  relativeIndex, axis=0)
        self.__moleculesIndex  = np.delete(self.__moleculesIndex,relativeIndex, axis=0)
        self.__moleculesName.pop(relativeIndex)
        self.__elementsIndex   = np.delete(self.__elementsIndex, relativeIndex, axis=0)
        self.__allElements.pop(relativeIndex)
        self.__namesIndex      = np.delete(self.__namesIndex,    relativeIndex, axis=0)
        self.__allNames.pop(relativeIndex)
        # adjust other attributes
        self.__numberOfAtomsPerName[dataDict['allNames']]       -= 1
        self.__numberOfAtomsPerElement[dataDict['allElements']] -= 1
        #self.__elements = sorted(set(self.__allElements)) # no element should disappear
        self.__names             = sorted(set(self.__names))
        self.__numberOfMolecules = len(set(self.__moleculesIndex))
        # update number density in periodic boundary conditions only
        if self.__isPBC:
            self.__numberDensity = FLOAT_TYPE(self.numberOfAtoms) / FLOAT_TYPE(self.__volume) 
Example 20
Project: fullrmc   Author: bachiraoun   File: Engine.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def delete_frame(self, frame):
        """
        Delete frame data from Engine as well as from repository .

        :Parameters:
            #. frame (string): The frame to delete.
        """
        isNormalFrame, isMultiframe, isSubframe = self.get_frame_category(frame)
        assert frame != self.__usedFrame, LOGGER.error("It's not allowed to delete the used frame '%s'. Change used frame using Engine.set_used_frame method and try again"%frame)
        if isMultiframe:
            _multi = self.usedFrame.split(os.sep)[0]
            assert frame != _multi, LOGGER.error("It's not allowed to delete multiframe '%s' of used frame '%s'. Change used frame using Engine.set_used_frame method and try again"%(_multi,self.usedFrame))
        if isNormalFrame:
            _f = [f for f in self.__frames if self.__frames[f] is None]
            assert len(_f)>=1, LOGGER.error("No traditional frames found. This shouldn't have happened. PLEASE REPORT")
            assert len(_f)>=2, LOGGER.error("It's not allowed to delete the last traditional frame in engine '%s'"%(_f[0],))
        if isSubframe:
            _name = frame.split(os.sep)[0]
            if len(self.__frames[_name]['frames_name']) == 1:
                LOGGER.usage("Deleting last subframe '%s' of multiframe '%s' has resulted in deleting the multiframe"%(frame, _name))
                frame         = _name
                isNormalFrame = False
                isSubframe    = False
                isMultiframe  = True
        # remove frame directory
        if self.__repository is not None:
            self.__repository.remove_directory(relativePath=frame, clean=True)
        # reset frames
        if isNormalFrame or isMultiframe:
            self.__frames.pop(frame)
        else:
            _multiframe, _subframe = frame.split(os.sep)
            self.__frames[_multiframe]['frames_name'] = [frm for frm in self.__frames[_multiframe]['frames_name'] if frm !=_subframe]
        # save frames
        if self.__repository is not None:
            self.__repository.update_file(value=self.__frames, relativePath='_Engine__frames') 
Example 21
Project: fullrmc   Author: bachiraoun   File: AtomicCoordinationConstraints.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def _on_collector_collect_atom(self, realIndex):
        # get relative index
        relativeIndex = self._atomsCollector.get_relative_index(realIndex)
        # create data dict
        dataDict = {}
        # cores indexes
        coresIndexes = []
        for idx, ci in enumerate(self.__coresIndexes):
            coresIndexes.append( np.where(ci==relativeIndex)[0] )
            ci = np.delete(ci, coresIndexes[-1], axis=0)
            ci[np.where(ci>relativeIndex)[0]] -= 1
            self.__coresIndexes[idx] = ci
        dataDict['coresIndexes'] = coresIndexes
        # shells indexes
        shellsIndexes = []
        for idx, si in enumerate(self.__shellsIndexes):
            shellsIndexes.append( np.where(si==relativeIndex)[0] )
            si = np.delete(si, shellsIndexes[-1], axis=0)
            si[np.where(si>relativeIndex)[0]] -= 1
            self.__shellsIndexes[idx] = si
        dataDict['shellsIndexes'] = shellsIndexes
        # asCorDefIdxs and inShellDefIdxs
        dataDict['asCoreDefIdxs']  = self.__asCoreDefIdxs.pop(relativeIndex)
        dataDict['inShellDefIdxs'] = self.__inShellDefIdxs.pop(relativeIndex)
        # correct number of cores without collecting
        for idx, ci in enumerate(coresIndexes):
            self.__numberOfCores[idx] -= len(ci)
        # collect atom
        self._atomsCollector.collect(realIndex, dataDict=dataDict) 
Example 22
Project: fullrmc   Author: bachiraoun   File: DistanceConstraints.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def _on_collector_collect_atom(self, realIndex):
        # get relative index
        relativeIndex = self._atomsCollector.get_relative_index(realIndex)
        # create dataDict
        dataDict = {}
        dataDict['typesIndex'] = self.typesIndex[relativeIndex]
        dataDict['allTypes']   = self.allTypes[relativeIndex]
        # reduce all indexes above relativeIndex in typesIndex
        # delete data
        self.__typesIndex = np.delete(self.__typesIndex, relativeIndex, axis=0)
        self.__allTypes   = np.delete(self.__allTypes,     relativeIndex, axis=0)
        self.__numberOfAtomsPerType[dataDict['allTypes']] -= 1
        # collect atom
        self._atomsCollector.collect(realIndex, dataDict=dataDict) 
Example 23
def _recall_prec(self, record, count):
        """ get recall and precision from internal records """
        record = np.delete(record, np.where(record[:, 1].astype(int) == 0)[0], axis=0)
        sorted_records = record[record[:,0].argsort()[::-1]]
        tp = np.cumsum(sorted_records[:, 1].astype(int) == 1)
        fp = np.cumsum(sorted_records[:, 1].astype(int) == 2)
        if count <= 0:
            recall = tp * 0.0
        else:
            recall = tp / float(count)
        prec = tp.astype(float) / (tp + fp)
        return recall, prec 
Example 24
Project: pywr   Author: pywr   File: test_parameters.py    License: GNU General Public License v3.0 5 votes vote down vote up
def test_scenario_daily_profile(self, simple_linear_model):

        model = simple_linear_model
        scenario = Scenario(model, 'A', 2)
        values = np.array([np.arange(366, dtype=np.float64), np.arange(366, 0, -1, dtype=np.float64)])

        # Remove values for 29th feb as not testing leap year in this func
        expected_values = np.delete(values.T, 59, 0)

        p = ScenarioDailyProfileParameter.load(model, {"scenario": "A", "values": values})

        AssertionRecorder(model, p, expected_data=expected_values)

        model.setup()
        model.run() 
Example 25
def secondSmallest(d_diff_pts):
    """For a list of points, return the value and ind of the second smallest
    args:
        d_diff_pts - numy array of floats of distances between points
    returns:
        secondSmallest_value - 
        secondSmallest_ind - 
    """
    tmp_inds = np.arange(len(d_diff_pts))
    tmp_inds_min0 = np.argmin(d_diff_pts)
    tmp_inds = np.delete(tmp_inds, tmp_inds_min0)
    tmp_d_diff_pts =np.delete(d_diff_pts, tmp_inds_min0)
    secondSmallest_value = min(tmp_d_diff_pts)
    secondSmallest_ind = np.argmin(np.abs(d_diff_pts - secondSmallest_value))
    return secondSmallest_value, secondSmallest_ind 
Example 26
Project: Modeling-Cloth   Author: the3dadvantage   File: ModelingCloth.py    License: MIT License 5 votes vote down vote up
def init_cloth(self, context):
    global data, extra_data
    data = bpy.context.scene.modeling_cloth_data_set
    extra_data = bpy.context.scene.modeling_cloth_data_set_extra
    extra_data['alert'] = False
    extra_data['drag_alert'] = False
    extra_data['last_object'] = self
    extra_data['clicked'] = False
    
    # object collisions
    colliders = [i for i in bpy.data.objects if i.modeling_cloth_object_collision]
    if len(colliders) == 0:    
        extra_data['colliders'] = None    
    
    # iterate through dict: for i, j in d.items()
    if self.modeling_cloth:
        cloth = create_instance() # generate an instance of the class
        data[cloth.name] = cloth  # store class in dictionary using the object name as a key
    
    cull = [] # can't delete dict items while iterating
    for i, value in data.items():
        if not value.ob.modeling_cloth:
            cull.append(i) # store keys to delete

    for i in cull:
        del data[i]
    
#    # could keep the handler unless there are no modeling cloth objects active
#    
#    if handler_frame in bpy.app.handlers.frame_change_post:
#        bpy.app.handlers.frame_change_post.remove(handler_frame)
#    
#    if len(data) > 0:
#        bpy.app.handlers.frame_change_post.append(handler_frame) 
Example 27
def test_dropInputChannel_ShouldDropRightValues(self):
        dropped_index = 0

        old_weight_values = self.module.weight.data.cpu().numpy()

        # ensure that the chosen index is dropped
        self.module.drop_input_channel(dropped_index)
        expected = np.delete(old_weight_values, dropped_index, 1)
        self.assertTrue(np.array_equal(self.module.weight.data.cpu().numpy(), expected)) 
Example 28
Project: Bidirectiona-LSTM-for-text-summarization-   Author: DeepsMoseli   File: word2vec.py    License: MIT License 5 votes vote down vote up
def cutoffSequences(data,artLen,sumlen):
    data2={"article":[],"summaries":[]}
    for k in range(len(data["article"])):
        if len(data["article"][k])<artLen or len(data["summaries"][k])<sumlen:
             #data["article"]=np.delete(data["article"],k,0)
             #data["article"]=np.delete(data["summaries"],k,0)
             pass
        else:
            data2["article"].append(data["article"][k][:artLen])
            data2["summaries"].append(data["summaries"][k][:sumlen])
    return data2 
Example 29
Project: pymoo   Author: msu-coinlab   File: reference_direction.py    License: Apache License 2.0 5 votes vote down vote up
def select_points_with_maximum_distance(X, n_select, selected=[]):
    n_points, n_dim = X.shape

    # calculate the distance matrix
    D = cdist(X, X)

    # if no selection provided pick randomly in the beginning
    if len(selected) == 0:
        selected = [np.random.randint(len(X))]

    # create variables to store what selected and what not
    not_selected = [i for i in range(n_points) if i not in selected]

    # remove unnecessary points
    dist_to_closest_selected = D[:, selected].min(axis=1)

    # now select the points until sufficient ones are found
    while len(selected) < n_select:
        # find point that has the maximum distance to all others
        index_in_not_selected = dist_to_closest_selected[not_selected].argmax()
        I = not_selected[index_in_not_selected]

        # add the closest distance to selected point
        is_closer = D[I] < dist_to_closest_selected
        dist_to_closest_selected[is_closer] = D[I][is_closer]

        # add it to the selected and remove from not selected
        selected.append(I)
        not_selected = np.delete(not_selected, index_in_not_selected)

    return selected 
Example 30
Project: interpret-text   Author: interpretml   File: utils_introspective_rationale.py    License: MIT License 5 votes vote down vote up
def generate_data(batch, use_cuda):
    """Create a formatted and ordered data batch to use in the
    three player model.

    :param batch: A pandas dataframe containing the tokens, masks, counts, and
        labels associated with a batch of data
    :type batch: DataFrame
    :param use_cuda: whether to use CUDA
    :type use_cuda: bool
    :return: formatted and ordered tokens (x), masks (m), and
        labels (y) associated with a batch of data
    :rtype: dict
    """
    # sort for rnn happiness
    batch.sort_values("counts", inplace=True, ascending=False)

    x_mask = np.stack(batch["mask"], axis=0)
    # drop all zero columns
    zero_col_idxs = np.argwhere(np.all(x_mask[..., :] == 0, axis=0))
    x_mask = np.delete(x_mask, zero_col_idxs, axis=1)

    x_mat = np.stack(batch["tokens"], axis=0)
    # drop all zero columns
    x_mat = np.delete(x_mat, zero_col_idxs, axis=1)

    y_vec = np.stack(batch["labels"], axis=0)

    batch_x_ = Variable(torch.from_numpy(x_mat)).to(torch.int64)
    batch_m_ = Variable(torch.from_numpy(x_mask)).type(torch.FloatTensor)
    batch_y_ = Variable(torch.from_numpy(y_vec)).to(torch.int64)

    if use_cuda:
        batch_x_ = batch_x_.cuda()
        batch_m_ = batch_m_.cuda()
        batch_y_ = batch_y_.cuda()

    return {"x": batch_x_, "m": batch_m_, "y": batch_y_}