Python cv2.connectedComponentsWithStats() Examples

The following are 7 code examples for showing how to use cv2.connectedComponentsWithStats(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don't like. You can also save this page to your account.

Example 1
Project: handfontgen   Author: nixeneko   File: scanchars.py    (license) View Source Project 11 votes vote down vote up
def getmarkerboundingrect(img, mkpos, mksize):
    buffer = int(mksize * 0.15)
    x = mkpos[0] - buffer
    y = mkpos[1] - buffer
    w = mksize + buffer*2
    h = mksize + buffer*2
    roi = img[y:y+h, x:x+w]
    
    grayroi = getgrayimage(roi)
    ret, binimage = cv2.threshold(grayroi,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(binimage)
    # stats[0], centroids[0] are for the background label. ignore
    # cv2.CC_STAT_LEFT, cv2.CC_STAT_TOP, cv2.CC_STAT_WIDTH, cv2.CC_STAT_HEIGHT
    lblareas = stats[1:,cv2.CC_STAT_AREA]
    imax = max(enumerate(lblareas), key=(lambda x: x[1]))[0] + 1
    boundingrect = Rect(stats[imax, cv2.CC_STAT_LEFT],
                        stats[imax, cv2.CC_STAT_TOP], 
                        stats[imax, cv2.CC_STAT_WIDTH], 
                        stats[imax, cv2.CC_STAT_HEIGHT])
    return boundingrect.addoffset((x,y)) 
Example 2
Project: handfontgen   Author: nixeneko   File: slantcorrection.py    (license) View Source Project 8 votes vote down vote up
def getmarkercenter(image, pos):
    mkradius = getapproxmarkerradius(image)
    buffer = int(mkradius * 0.15)
    roisize = mkradius + buffer # half of the height or width
    x = pos[0] - roisize
    y = pos[1] - roisize
    w = 2 * roisize
    h = 2 * roisize
    roi = image[y:y+h, x:x+w]
    
    grayroi = getgrayimage(roi)
    ret, binimage = cv2.threshold(grayroi,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(binimage)
    # stats[0], centroids[0] are for the background label. ignore
    lblareas = stats[1:,cv2.CC_STAT_AREA]
    
    ave = np.average(centroids[1:], axis=0, weights=lblareas)
    return tuple(np.array([x, y]) + ave) # weighted average pos of centroids 
Example 3
Project: dvd   Author: ajayrfhp   File: obj_detector.py    (license) View Source Project 7 votes vote down vote up
def MoG2(vid, min_thresh=800, max_thresh=10000):
    '''
	Args 	: Video object and threshold parameters
  	Returns : None
    '''
    cap = cv2.VideoCapture(vid)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    fgbg = cv2.createBackgroundSubtractorMOG2()
    connectivity = 4
    while(cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break
        fgmask = fgbg.apply(frame)
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
        output = cv2.connectedComponentsWithStats(
            fgmask, connectivity, cv2.CV_32S)
        for i in range(output[0]):
            if output[2][i][4] >= min_thresh and output[2][i][4] <= max_thresh:
                cv2.rectangle(frame, (output[2][i][0], output[2][i][1]), (
                    output[2][i][0] + output[2][i][2], output[2][i][1] + output[2][i][3]), (0, 255, 0), 2)
        cv2.imshow('detection', frame)
    cap.release()
    cv2.destroyAllWindows() 
Example 4
Project: dream2016_dm   Author: lishen   File: dm_preprocess.py    (license) View Source Project 7 votes vote down vote up
def select_largest_obj(self, img_bin, lab_val=255, fill_holes=False, 
                           smooth_boundary=False, kernel_size=15):
        '''Select the largest object from a binary image and optionally
        fill holes inside it and smooth its boundary.
        Args:
            img_bin (2D array): 2D numpy array of binary image.
            lab_val ([int]): integer value used for the label of the largest 
                    object. Default is 255.
            fill_holes ([boolean]): whether fill the holes inside the largest 
                    object or not. Default is false.
            smooth_boundary ([boolean]): whether smooth the boundary of the 
                    largest object using morphological opening or not. Default 
                    is false.
            kernel_size ([int]): the size of the kernel used for morphological 
                    operation. Default is 15.
        Returns:
            a binary image as a mask for the largest object.
        '''
        n_labels, img_labeled, lab_stats, _ = \
            cv2.connectedComponentsWithStats(img_bin, connectivity=8, 
                                             ltype=cv2.CV_32S)
        largest_obj_lab = np.argmax(lab_stats[1:, 4]) + 1
        largest_mask = np.zeros(img_bin.shape, dtype=np.uint8)
        largest_mask[img_labeled == largest_obj_lab] = lab_val
        # import pdb; pdb.set_trace()
        if fill_holes:
            bkg_locs = np.where(img_labeled == 0)
            bkg_seed = (bkg_locs[0][0], bkg_locs[1][0])
            img_floodfill = largest_mask.copy()
            h_, w_ = largest_mask.shape
            mask_ = np.zeros((h_ + 2, w_ + 2), dtype=np.uint8)
            cv2.floodFill(img_floodfill, mask_, seedPoint=bkg_seed, 
                          newVal=lab_val)
            holes_mask = cv2.bitwise_not(img_floodfill)  # mask of the holes.
            largest_mask = largest_mask + holes_mask
        if smooth_boundary:
            kernel_ = np.ones((kernel_size, kernel_size), dtype=np.uint8)
            largest_mask = cv2.morphologyEx(largest_mask, cv2.MORPH_OPEN, 
                                            kernel_)
            
        return largest_mask 
Example 5
Project: qtim_ROP   Author: QTIM-Lab   File: methods.py    (license) View Source Project 6 votes vote down vote up
def binary_morph(img, thresh=50, min_size=None, mask_only=True):

    if min_size is None:  # default to 10% of largest image dimension
        min_size = float(max(img.shape)) * .1

    if len(img.shape) == 3:  # flatten if RGB image
        img = np.mean(img, 2).astype(np.uint8)

    # Apply binary threshold and erode
    ret, thresh_im = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)

    # Connected component labelling
    n, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh_im)
    mask = np.zeros_like(labels)

    # Loop through areas in order of size
    areas = [s[4] for s in stats]
    sorted_idx = np.argsort(areas)

    for lidx, cc in zip(sorted_idx, [areas[s] for s in sorted_idx][:-1]):

        if cc > min_size:
            mask[labels == lidx] = 1

    if mask_only:
        return mask * 255
    return np.dstack([img * mask] * 3).astype(np.uint8) 
Example 6
Project: handfontgen   Author: nixeneko   File: scanchars.py    (license) View Source Project 5 votes vote down vote up
def splitimage(image):
    dpmm = min(image.shape[0:2]) / DOCSIZE[0]
    sizethresh = SIZE_THRESH_MM * dpmm
    
    uprightimg = makeupright(image)
    grayimg = getgrayimage(uprightimg)
    
    # top line
    top = grayimg[0,:]
    sepx = [0,]
    ret, binimg = cv2.threshold(top,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(binimg)
    for i in range(1,nlabels):
        if stats[i,cv2.CC_STAT_AREA] >= sizethresh:
            sepx.append(centroids[i][1])
            
    # left line 
    left = grayimg[:,0]
    sepy = [0,]
    ret, binimg = cv2.threshold(left,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(binimg)
    for i in range(1,nlabels):
        if stats[i,cv2.CC_STAT_AREA] >= sizethresh:
            sepy.append(centroids[i][1])
    
    # divide into images
    imgs = []
    for iy in range(len(sepy)):
        for ix in range(len(sepx)):
            if iy == len(sepy) - 1:
                if ix == len(sepx) - 1:
                    #right-bottom corner
                    imgs.append(uprightimg[int(sepy[iy]):,int(sepx[ix]):])
                else:
                    #bottom end
                    imgs.append(uprightimg[int(sepy[iy]):,int(sepx[ix]):int(sepx[ix+1])])
            else:
                if ix == len(sepx) - 1:
                    #right end
                    imgs.append(uprightimg[int(sepy[iy]):int(sepy[iy+1]),int(sepx[ix]):])
                else:
                    #others
                    imgs.append(uprightimg[int(sepy[iy]):int(sepy[iy+1]),int(sepx[ix]):int(sepx[ix+1])])
                    
    return imgs 
Example 7
Project: AMBR   Author: Algomorph   File: background_subtractor.py    (license) View Source Project 5 votes vote down vote up
def extract_tracked_object(self, image, prev_cc_center):
        contour_found = False
        dist = 0.0
        tracked_px_count = 0
        tracked_object_stats = None
        largest_centroid = None

        mask = self.extract_foreground_mask(image)

        bin_mask = mask.copy()
        bin_mask[bin_mask < MaskLabel.PERSISTENCE_LABEL.value] = 0
        bin_mask[bin_mask > 0] = 1

        labels, stats, centroids = cv2.connectedComponentsWithStats(bin_mask, ltype=cv2.CV_16U)[1:4]

        if len(stats) > 1:
            # initially, just grab the biggest connected component
            ix_of_tracked_component = np.argmax(stats[1:, 4]) + 1
            largest_centroid = centroids[ix_of_tracked_component].copy()
            tracking_ok = True

            if prev_cc_center is not None:
                a = prev_cc_center
                b = largest_centroid
                dist = np.linalg.norm(a - b)
                # check to make sure we're not too far from the previously-detected blob
                if dist > 50:
                    dists = np.linalg.norm(centroids - a, axis=1)
                    ix_of_tracked_component = np.argmin(dists)
                    if dists[ix_of_tracked_component] > ConnectedComponentThreshold.TRACK_DIST_THRESH.value:
                        tracking_ok = False
                    largest_centroid = centroids[ix_of_tracked_component].copy()

            tracked_px_count = stats[ix_of_tracked_component, 4]
            tracked_object_stats = stats[ix_of_tracked_component]
            contour_found = tracked_px_count > ConnectedComponentThreshold.HIDDEN.value and tracking_ok

            if contour_found:
                bin_mask[labels != ix_of_tracked_component] = 0
                mask[bin_mask == 0] = 0
        return mask, bin_mask, contour_found, dist, tracked_px_count, tracked_object_stats, largest_centroid