Python scipy.ndimage.sum() Examples

The following are 30 code examples of scipy.ndimage.sum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.ndimage , or try the search function .
Example #1
Source File: image_process.py    From PyMIC with Apache License 2.0 7 votes vote down vote up
def get_largest_component(image):
    """
    get the largest component from 2D or 3D binary image
    image: nd array
    """
    dim = len(image.shape)
    if(image.sum() == 0 ):
        print('the largest component is null')
        return image
    if(dim == 2):
        s = ndimage.generate_binary_structure(2,1)
    elif(dim == 3):
        s = ndimage.generate_binary_structure(3,1)
    else:
        raise ValueError("the dimension number should be 2 or 3")
    labeled_array, numpatches = ndimage.label(image, s)
    sizes = ndimage.sum(image, labeled_array, range(1, numpatches + 1))
    max_label = np.where(sizes == sizes.max())[0] + 1
    output = np.asarray(labeled_array == max_label, np.uint8)
    return  output 
Example #2
Source File: test_ndimage.py    From Computable with MIT License 6 votes vote down vote up
def test_generic_filter01(self):
        filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]])
        footprint = numpy.array([[1, 0], [0, 1]])
        cf = numpy.array([1., 4.])

        def _filter_func(buffer, weights, total=1.0):
            weights = cf / total
            return (buffer * weights).sum()
        for type in self.types:
            a = numpy.arange(12, dtype=type)
            a.shape = (3,4)
            r1 = ndimage.correlate(a, filter_ * footprint)
            if type in self.float_types:
                r1 /= 5
            else:
                r1 //= 5
            r2 = ndimage.generic_filter(a, _filter_func,
                            footprint=footprint, extra_arguments=(cf,),
                            extra_keywords={'total': cf.sum()})
            assert_array_almost_equal(r1, r2) 
Example #3
Source File: data_process.py    From brats17 with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def remove_external_core(lab_main, lab_ext):
    """
    remove the core region that is outside of whole tumor
    """
    
    # for each component of lab_ext, compute the overlap with lab_main
    s = ndimage.generate_binary_structure(3,2) # iterate structure
    labeled_array, numpatches = ndimage.label(lab_ext,s) # labeling
    sizes = ndimage.sum(lab_ext,labeled_array,range(1,numpatches+1)) 
    sizes_list = [sizes[i] for i in range(len(sizes))]
    new_lab_ext = np.zeros_like(lab_ext)
    for i in range(len(sizes)):
        sizei = sizes_list[i]
        labeli =  np.where(sizes == sizei)[0] + 1
        componenti = labeled_array == labeli
        overlap = componenti * lab_main
        if((overlap.sum()+ 0.0)/sizei >= 0.5):
            new_lab_ext = np.maximum(new_lab_ext, componenti)
    return new_lab_ext 
Example #4
Source File: data_process.py    From brats17 with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def binary_dice3d(s,g):
    """
    dice score of 3d binary volumes
    inputs: 
        s: segmentation volume
        g: ground truth volume
    outputs:
        dice: the dice score
    """
    assert(len(s.shape)==3)
    [Ds, Hs, Ws] = s.shape
    [Dg, Hg, Wg] = g.shape
    assert(Ds==Dg and Hs==Hg and Ws==Wg)
    prod = np.multiply(s, g)
    s0 = prod.sum()
    s1 = s.sum()
    s2 = g.sum()
    dice = (2.0*s0 + 1e-10)/(s1 + s2 + 1e-10)
    return dice 
Example #5
Source File: FCN_CrackAnalysis.py    From FCN_for_crack_recognition with MIT License 6 votes vote down vote up
def Hilditch_skeleton(binary_image):
    size = binary_image.size
    skel = np.zeros(binary_image.shape, np.uint8)

    elem = np.array([[0, 1, 0],
                     [1, 1, 1],
                     [0, 1, 0]])

    image = binary_image.copy()
    for _ in range(10000):
        eroded = ndi.binary_erosion(image, elem)
        temp = ndi.binary_dilation(eroded, elem)
        temp = image - temp
        skel = np.bitwise_or(skel, temp)
        image = eroded.copy()

        zeros = size - np.sum(image > 0)
        if zeros == size:
            break

    return skel 
Example #6
Source File: data_process.py    From Brain-Tumor-Segmentation-using-Topological-Loss with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def remove_external_core(lab_main, lab_ext):
    """
    remove the core region that is outside of whole tumor
    """
    
    # for each component of lab_ext, compute the overlap with lab_main
    s = ndimage.generate_binary_structure(3,2) # iterate structure
    labeled_array, numpatches = ndimage.label(lab_ext,s) # labeling
    sizes = ndimage.sum(lab_ext,labeled_array,range(1,numpatches+1)) 
    sizes_list = [sizes[i] for i in range(len(sizes))]
    new_lab_ext = np.zeros_like(lab_ext)
    for i in range(len(sizes)):
        sizei = sizes_list[i]
        labeli =  np.where(sizes == sizei)[0] + 1
        componenti = labeled_array == labeli
        overlap = componenti * lab_main
        if((overlap.sum()+ 0.0)/sizei >= 0.5):
            new_lab_ext = np.maximum(new_lab_ext, componenti)
    return new_lab_ext 
Example #7
Source File: data_process.py    From Brain-Tumor-Segmentation-using-Topological-Loss with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def binary_dice3d(s,g):
    """
    dice score of 3d binary volumes
    inputs: 
        s: segmentation volume
        g: ground truth volume
    outputs:
        dice: the dice score
    """
    assert(len(s.shape)==3)
    [Ds, Hs, Ws] = s.shape
    [Dg, Hg, Wg] = g.shape
    assert(Ds==Dg and Hs==Hg and Ws==Wg)
    prod = np.multiply(s, g)
    s0 = prod.sum()
    s1 = s.sum()
    s2 = g.sum()
    dice = (2.0*s0 + 1e-10)/(s1 + s2 + 1e-10)
    return dice 
Example #8
Source File: test_ndimage.py    From Computable with MIT License 6 votes vote down vote up
def test_generic_filter1d01(self):
        weights = numpy.array([1.1, 2.2, 3.3])

        def _filter_func(input, output, fltr, total):
            fltr = fltr / total
            for ii in range(input.shape[0] - 2):
                output[ii] = input[ii] * fltr[0]
                output[ii] += input[ii + 1] * fltr[1]
                output[ii] += input[ii + 2] * fltr[2]
        for type in self.types:
            a = numpy.arange(12, dtype=type)
            a.shape = (3,4)
            r1 = ndimage.correlate1d(a, weights / weights.sum(), 0,
                                              origin=-1)
            r2 = ndimage.generic_filter1d(a, _filter_func, 3,
                      axis=0, origin=-1, extra_arguments=(weights,),
                      extra_keywords={'total': weights.sum()})
            assert_array_almost_equal(r1, r2) 
Example #9
Source File: test_ndimage.py    From Computable with MIT License 6 votes vote down vote up
def test_fourier_uniform_real01(self):
        for shape in [(32, 16), (31, 15)]:
            for type in [numpy.float32, numpy.float64]:
                a = numpy.zeros(shape, type)
                a[0, 0] = 1.0
                a = fft.rfft(a, shape[0], 0)
                a = fft.fft(a, shape[1], 1)
                a = ndimage.fourier_uniform(a, [5.0, 2.5],
                                                      shape[0], 0)
                a = fft.ifft(a, shape[1], 1)
                a = fft.irfft(a, shape[0], 0)
                assert_almost_equal(ndimage.sum(a), 1.0) 
Example #10
Source File: data_augmentation.py    From Automated-Cardiac-Segmentation-and-Disease-Diagnosis with MIT License 6 votes vote down vote up
def GetAvgbatchClassWeights(label, scale=1, label_ids=[0,1], assign_equal_wt=False):
    """
    This function calulates the class weights for a batch of data
    Args:
    label: [batch_size,H,W]
    return:
    [class1_weight, ..., class2_weight] 
    """
    batch_size = label.shape[0]
    batch_weights = np.zeros((batch_size, len(label_ids)))
    if assign_equal_wt:
        return np.ones(len(label_ids), dtype=np.uint8)
    pixel_cnt = label[0,:,:].size
    eps = 0.001
    for i in range(batch_size): 
        for _id in label_ids:
            batch_weights[i, label_ids.index(_id)] = scale*pixel_cnt/np.float(np.sum(label[i,:,:] == label_ids[_id])+eps)
            # print (np.uint8(np.mean(batch_weights+1, axis=0)))
    return np.float32(np.mean(batch_weights+1, axis=0)) 

#**************************************Data Preprocessing functions******************** 
Example #11
Source File: test_ndimage.py    From Computable with MIT License 6 votes vote down vote up
def test_fourier_ellipsoid_real01(self):
        for shape in [(32, 16), (31, 15)]:
            for type in [numpy.float32, numpy.float64]:
                a = numpy.zeros(shape, type)
                a[0, 0] = 1.0
                a = fft.rfft(a, shape[0], 0)
                a = fft.fft(a, shape[1], 1)
                a = ndimage.fourier_ellipsoid(a, [5.0, 2.5],
                                              shape[0], 0)
                a = fft.ifft(a, shape[1], 1)
                a = fft.irfft(a, shape[0], 0)
                assert_almost_equal(ndimage.sum(a), 1.0) 
Example #12
Source File: data_augmentation.py    From Automated-Cardiac-Segmentation-and-Disease-Diagnosis with MIT License 6 votes vote down vote up
def getEdgeEnhancedWeightMap(label, label_ids =[0,1,2,3], scale=1, edgescale=1, assign_equal_wt=False):
    shape = (0,)+label.shape[1:]
    weight_map = np.empty(shape, dtype='uint8')
    if assign_equal_wt:
        return np.ones_like(label)
    for i in range(label.shape[0]): 
        #Estimate weight maps:
        weights = np.ones(len(label_ids))
        slice_map = np.ones(label[i,:,:].shape)
        for _id in label_ids:
            class_frequency = np.sum(label[i,:,:] == label_ids[_id])
            if class_frequency:
                weights[label_ids.index(_id)] = scale*label[i,:,:].size/class_frequency
                slice_map[np.where(label[i,:,:]==label_ids.index(_id))] = weights[label_ids.index(_id)]
                edge = np.float32(morph.binary_dilation(
                    canny(np.float32(label[i,:,:]==label_ids.index(_id)),sigma=1), selem=selem))
                edge_frequency = np.sum(np.sum(edge==1.0))
                if edge_frequency:    
                    slice_map[np.where(edge==1.0)] += edgescale*label[i,:,:].size/edge_frequency
            # print (weights)
            # utils.imshow(edge, cmap='gray')
        # utils.imshow(weight_map, cmap='gray')
        weight_map = np.append(weight_map, np.expand_dims(slice_map, axis=0), axis=0)
    return np.float32(weight_map) 
Example #13
Source File: image_process.py    From PyMIC with Apache License 2.0 6 votes vote down vote up
def get_euclidean_distance(image, dim = 3, spacing = [1.0, 1.0, 1.0]):
    """
    get euclidean distance transform of 2D or 3D binary images
    the output distance map is unsigned
    """
    img_shape = image.shape
    input_dim = len(img_shape)
    if(input_dim != 3):
        raise ValueError("Not implemented for {0:}D image".format(input_dim))
    if(dim == 2):
        raise ValueError("Not implemented for {0:}D image".format(input_dim))
        # dis_map = np.ones_like(image, np.float32)
        # for d in range(img_shape[0]):
        #     if(image[d].sum() > 0):
        #         dis_d = ndimage.morphology.distance_transform_edt(image[d])
        #         dis_map[d] = dis_d/dis_d.max()
    elif(dim == 3):
        fg_dis_map = ndimage.morphology.distance_transform_edt(image > 0.5)
        bg_dis_map = ndimage.morphology.distance_transform_edt(image <= 0.5)
        dis_map = bg_dis_map - fg_dis_map
    else:
        raise ValueError("Not implemented for {0:}D distance".format(dim))
    return dis_map 
Example #14
Source File: find_pictures.py    From oldnyc with Apache License 2.0 6 votes vote down vote up
def LoadAndBinarizeImage(path):
  orig_im = Image.open(path)
  w, h = orig_im.size
  im = orig_im.crop((80, 80, w - 80, h - 80))
  w, h = im.size
  im = im.resize((w / 5, h / 5), Image.ANTIALIAS)
  blur_im = im.filter(ImageFilter.BLUR)

  I = np.asarray(blur_im).copy()

  brown = np.array([178.1655574, 137.2695507, 90.26289517])
  brown[0] = np.median(I[:,:,0])
  brown[1] = np.median(I[:,:,1])
  brown[2] = np.median(I[:,:,2])

  # I[100:200, 100:200] = brown
  # ShowBinaryArray(I)

  # TODO(danvk): does removing the np.sqrt have an effect on perfomance?
  return (np.sqrt(((I - brown) ** 2).sum(2)/3) >= 20) 
Example #15
Source File: test_ndimage.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_generic_filter01(self):
        filter_ = numpy.array([[1.0, 2.0], [3.0, 4.0]])
        footprint = numpy.array([[1, 0], [0, 1]])
        cf = numpy.array([1., 4.])

        def _filter_func(buffer, weights, total=1.0):
            weights = cf / total
            return (buffer * weights).sum()
        for type_ in self.types:
            a = numpy.arange(12, dtype=type_)
            a.shape = (3, 4)
            r1 = ndimage.correlate(a, filter_ * footprint)
            if type_ in self.float_types:
                r1 /= 5
            else:
                r1 //= 5
            r2 = ndimage.generic_filter(
                a, _filter_func, footprint=footprint, extra_arguments=(cf,),
                extra_keywords={'total': cf.sum()})
            assert_array_almost_equal(r1, r2) 
Example #16
Source File: test_ndimage.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_generic_filter1d01(self):
        weights = numpy.array([1.1, 2.2, 3.3])

        def _filter_func(input, output, fltr, total):
            fltr = fltr / total
            for ii in range(input.shape[0] - 2):
                output[ii] = input[ii] * fltr[0]
                output[ii] += input[ii + 1] * fltr[1]
                output[ii] += input[ii + 2] * fltr[2]
        for type_ in self.types:
            a = numpy.arange(12, dtype=type_)
            a.shape = (3, 4)
            r1 = ndimage.correlate1d(a, weights / weights.sum(), 0, origin=-1)
            r2 = ndimage.generic_filter1d(
                a, _filter_func, 3, axis=0, origin=-1,
                extra_arguments=(weights,),
                extra_keywords={'total': weights.sum()})
            assert_array_almost_equal(r1, r2) 
Example #17
Source File: birdCLEF_spec.py    From BirdCLEF2017 with MIT License 5 votes vote down vote up
def filter_isolated_cells(array, struct):

    filtered_array = np.copy(array)
    id_regions, num_ids = ndimage.label(filtered_array, structure=struct)
    id_sizes = np.array(ndimage.sum(array, id_regions, range(num_ids + 1)))
    area_mask = (id_sizes == 1)
    filtered_array[area_mask[id_regions]] = 0
    
    return filtered_array

#Decide if given spectrum shows bird sounds or noise only 
Example #18
Source File: anatomical.py    From mriqc with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _estimate_snr(in_file, seg_file):
    import numpy as np
    import nibabel as nb
    from mriqc.qc.anatomical import snr
    data = nb.load(in_file).get_data()
    mask = nb.load(seg_file).get_data() == 2  # WM label
    out_snr = snr(np.mean(data[mask]), data[mask].std(), mask.sum())
    return out_snr 
Example #19
Source File: data_augmentation.py    From Automated-Cardiac-Segmentation-and-Disease-Diagnosis with MIT License 5 votes vote down vote up
def getLargestConnectedComponent_2D(itk_img):
    data = np.uint8(sitk.GetArrayFromImage(itk_img))
    for i in range(data.shape[0]):
        c,n = snd.label(data[i])
        sizes = snd.sum(data[i], c, range(n+1))
        mask_size = sizes < (max(sizes))
        remove_voxels = mask_size[c]
        c[remove_voxels] = 0
        c[np.where(c!=0)]=1
        data[i][np.where(c==0)] = 0
    return sitk.GetImageFromArray(data) 
Example #20
Source File: anatomical.py    From mriqc with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _run_interface(self, runtime):
        in_file = nb.load(self.inputs.in_file)
        data = in_file.get_data()
        mask = data <= 0

        # Pad one pixel to control behavior on borders of binary_opening
        mask = np.pad(mask, pad_width=(1,), mode="constant", constant_values=1)

        # Remove noise
        struc = nd.generate_binary_structure(3, 2)
        mask = nd.binary_opening(mask, structure=struc).astype(np.uint8)

        # Remove small objects
        label_im, nb_labels = nd.label(mask)
        if nb_labels > 2:
            sizes = nd.sum(mask, label_im, list(range(nb_labels + 1)))
            ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1))))))
            for _, label in ordered[2:]:
                mask[label_im == label] = 0

        # Un-pad
        mask = mask[1:-1, 1:-1, 1:-1]

        # If mask is small, clean-up
        if mask.sum() < 500:
            mask = np.zeros_like(mask, dtype=np.uint8)

        out_img = in_file.__class__(mask, in_file.affine, in_file.header)
        out_img.header.set_data_dtype(np.uint8)

        out_file = fname_presuffix(self.inputs.in_file, suffix="_rotmask", newpath=".")
        out_img.to_filename(out_file)
        self._results["out_file"] = out_file
        return runtime 
Example #21
Source File: data_process.py    From Brain-Tumor-Segmentation-using-Topological-Loss with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fill_holes(img):
    """
    filling small holes of a binary volume with morphological operations
    """
    neg = 1 - img
    s = ndimage.generate_binary_structure(3,1) # iterate structure
    labeled_array, numpatches = ndimage.label(neg,s) # labeling
    sizes = ndimage.sum(neg,labeled_array,range(1,numpatches+1)) 
    sizes_list = [sizes[i] for i in range(len(sizes))]
    sizes_list.sort()
    max_size = sizes_list[-1]
    max_label = np.where(sizes == max_size)[0] + 1
    component = labeled_array == max_label
    return 1 - component 
Example #22
Source File: test_ndimage.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_fourier_ellipsoid_complex01(self):
        for shape in [(32, 16), (31, 15)]:
            for type_, dec in zip([numpy.complex64, numpy.complex128],
                                  [5, 14]):
                a = numpy.zeros(shape, type_)
                a[0, 0] = 1.0
                a = fft.fft(a, shape[0], 0)
                a = fft.fft(a, shape[1], 1)
                a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], -1, 0)
                a = fft.ifft(a, shape[1], 1)
                a = fft.ifft(a, shape[0], 0)
                assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec) 
Example #23
Source File: test_ndimage.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_fourier_ellipsoid_real01(self):
        for shape in [(32, 16), (31, 15)]:
            for type_, dec in zip([numpy.float32, numpy.float64], [5, 14]):
                a = numpy.zeros(shape, type_)
                a[0, 0] = 1.0
                a = fft.rfft(a, shape[0], 0)
                a = fft.fft(a, shape[1], 1)
                a = ndimage.fourier_ellipsoid(a, [5.0, 2.5],
                                              shape[0], 0)
                a = fft.ifft(a, shape[1], 1)
                a = fft.irfft(a, shape[0], 0)
                assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec) 
Example #24
Source File: test_ndimage.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_fourier_uniform_complex01(self):
        for shape in [(32, 16), (31, 15)]:
            for type_, dec in zip([numpy.complex64, numpy.complex128], [6, 14]):
                a = numpy.zeros(shape, type_)
                a[0, 0] = 1.0
                a = fft.fft(a, shape[0], 0)
                a = fft.fft(a, shape[1], 1)
                a = ndimage.fourier_uniform(a, [5.0, 2.5], -1, 0)
                a = fft.ifft(a, shape[1], 1)
                a = fft.ifft(a, shape[0], 0)
                assert_almost_equal(ndimage.sum(a.real), 1.0, decimal=dec) 
Example #25
Source File: test_ndimage.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_fourier_uniform_real01(self):
        for shape in [(32, 16), (31, 15)]:
            for type_, dec in zip([numpy.float32, numpy.float64], [6, 14]):
                a = numpy.zeros(shape, type_)
                a[0, 0] = 1.0
                a = fft.rfft(a, shape[0], 0)
                a = fft.fft(a, shape[1], 1)
                a = ndimage.fourier_uniform(a, [5.0, 2.5], shape[0], 0)
                a = fft.ifft(a, shape[1], 1)
                a = fft.irfft(a, shape[0], 0)
                assert_almost_equal(ndimage.sum(a), 1.0, decimal=dec) 
Example #26
Source File: test_ndimage.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_fourier_gaussian_real01(self):
        for shape in [(32, 16), (31, 15)]:
            for type_, dec in zip([numpy.float32, numpy.float64], [6, 14]):
                a = numpy.zeros(shape, type_)
                a[0, 0] = 1.0
                a = fft.rfft(a, shape[0], 0)
                a = fft.fft(a, shape[1], 1)
                a = ndimage.fourier_gaussian(a, [5.0, 2.5], shape[0], 0)
                a = fft.ifft(a, shape[1], 1)
                a = fft.irfft(a, shape[0], 0)
                assert_almost_equal(ndimage.sum(a), 1, decimal=dec) 
Example #27
Source File: test_measurements.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_sum09():
    labels = np.array([1, 0], bool)
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.sum(input, labels=labels)
        assert_almost_equal(output, 4.0) 
Example #28
Source File: test_ndimage.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_gauss03(self):
        # single precision data"
        input = numpy.arange(100 * 100).astype(numpy.float32)
        input.shape = (100, 100)
        output = ndimage.gaussian_filter(input, [1.0, 1.0])

        assert_equal(input.dtype, output.dtype)
        assert_equal(input.shape, output.shape)

        # input.sum() is 49995000.0.  With single precision floats, we can't
        # expect more than 8 digits of accuracy, so use decimal=0 in this test.
        assert_almost_equal(output.sum(dtype='d'), input.sum(dtype='d'),
                            decimal=0)
        assert_(sumsq(input, output) > 1.0) 
Example #29
Source File: test_ndimage.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def sumsq(a, b):
    return math.sqrt(((a - b)**2).sum()) 
Example #30
Source File: __funcs__.py    From porespy with MIT License 5 votes vote down vote up
def trim_small_clusters(im, size=1):
    r"""
    Remove isolated voxels or clusters smaller than a given size

    Parameters
    ----------
    im : ND-array
        The binary image from which voxels are to be removed
    size : scalar
        The threshold size of clusters to trim.  As clusters with this many
        voxels or fewer will be trimmed.  The default is 1 so only single
        voxels are removed.

    Returns
    -------
    im : ND-image
        A copy of ``im`` with clusters of voxels smaller than the given
        ``size`` removed.

    """
    if im.dims == 2:
        strel = disk(1)
    elif im.ndims == 3:
        strel = ball(1)
    else:
        raise Exception('Only 2D or 3D images are accepted')
    filtered_array = np.copy(im)
    labels, N = spim.label(filtered_array, structure=strel)
    id_sizes = np.array(spim.sum(im, labels, range(N + 1)))
    area_mask = (id_sizes <= size)
    filtered_array[area_mask[labels]] = 0
    return filtered_array