Python cv2.arcLength() Examples

The following are 30 code examples of cv2.arcLength(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: chapter2.py    From OpenCV-Computer-Vision-Projects-with-Python with MIT License 19 votes vote down vote up
def FindHullDefects(self, segment):
        _,contours,hierarchy = cv2.findContours(segment, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        # find largest area contour
        max_area = -1
        for i in range(len(contours)):
            area = cv2.contourArea(contours[i])
            if area>max_area:
                cnt = contours[i]
                max_area = area

        cnt = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
        hull = cv2.convexHull(cnt, returnPoints=False)
        defects = cv2.convexityDefects(cnt, hull)

        return [cnt,defects] 
Example #2
Source File: squares.py    From OpenCV-Python-Tutorial with MIT License 9 votes vote down vote up
def find_squares(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < 0.1:
                        squares.append(cnt)
    return squares 
Example #3
Source File: load_saved_model.py    From document-ocr with Apache License 2.0 7 votes vote down vote up
def mask_to_bbox(mask, image, num_class, area_threhold=0, out_path=None, out_file_name=None):
  bbox_list = []
  im = copy.copy(image)
  mask = mask.astype(np.uint8)
  for i in range(1, num_class, 1):
    c_bbox_list = []
    c_mask = np.zeros_like(mask)
    c_mask[np.where(mask==i)] = 255
    bimg , countours, hier = cv2.findContours(c_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    for cnt in countours:
      area = cv2.contourArea(cnt)
      if area < area_threhold:
        continue
      epsilon = 0.005 * cv2.arcLength(cnt,True)
      approx = cv2.approxPolyDP(cnt,epsilon,True)
      (x, y, w, h) = cv2.boundingRect(approx)
      c_bbox_list.append([x,  y, x+w, y+h])
      if out_path is not None:
        color = COLOR_LIST[i-1]
        im=cv2.rectangle(im, pt1=(x, y), pt2=(x+w, y+h),color=color, thickness=2)
    bbox_list.append(c_bbox_list)
  if out_path is not None:
    outf = os.path.join(out_path, out_file_name)
    cv2.imwrite(outf, im)
  return bbox_list 
Example #4
Source File: RegionOfInterest.py    From DoNotSnap with GNU General Public License v3.0 7 votes vote down vote up
def findEllipses(edges):
    contours, _ = cv2.findContours(edges.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    ellipseMask = np.zeros(edges.shape, dtype=np.uint8)
    contourMask = np.zeros(edges.shape, dtype=np.uint8)

    pi_4 = np.pi * 4

    for i, contour in enumerate(contours):
        if len(contour) < 5:
            continue

        area = cv2.contourArea(contour)
        if area <= 100:  # skip ellipses smaller then 10x10
            continue

        arclen = cv2.arcLength(contour, True)
        circularity = (pi_4 * area) / (arclen * arclen)
        ellipse = cv2.fitEllipse(contour)
        poly = cv2.ellipse2Poly((int(ellipse[0][0]), int(ellipse[0][1])), (int(ellipse[1][0] / 2), int(ellipse[1][1] / 2)), int(ellipse[2]), 0, 360, 5)

        # if contour is circular enough
        if circularity > 0.6:
            cv2.fillPoly(ellipseMask, [poly], 255)
            continue

        # if contour has enough similarity to an ellipse
        similarity = cv2.matchShapes(poly.reshape((poly.shape[0], 1, poly.shape[1])), contour, cv2.cv.CV_CONTOURS_MATCH_I2, 0)
        if similarity <= 0.2:
            cv2.fillPoly(contourMask, [poly], 255)

    return ellipseMask, contourMask 
Example #5
Source File: __init__.py    From rubiks-cube-tracker with MIT License 6 votes vote down vote up
def __init__(self, rubiks_parent, index, contour, heirarchy, debug):
        self.rubiks_parent = rubiks_parent
        self.index = index
        self.contour = contour
        self.heirarchy = heirarchy
        peri = cv2.arcLength(contour, True)
        self.approx = cv2.approxPolyDP(contour, 0.1 * peri, True)
        self.area = cv2.contourArea(contour)
        self.corners = len(self.approx)
        self.width = None
        self.debug = debug

        # compute the center of the contour
        M = cv2.moments(contour)

        if M["m00"]:
            self.cX = int(M["m10"] / M["m00"])
            self.cY = int(M["m01"] / M["m00"])

            # if self.cX == 188 and self.cY == 93:
            #    log.warning("CustomContour M %s" % pformat(M))
        else:
            self.cX = None
            self.cY = None 
Example #6
Source File: dataset.py    From Real-time-Text-Detection with Apache License 2.0 6 votes vote down vote up
def _get_annotation(self, label_path: str) -> tuple:
        boxes = []
        text_tags = []
        with open(label_path, encoding='utf-8', mode='r') as f:
            for line in f.readlines():
                params = line.strip().strip('\ufeff').strip('\xef\xbb\xbf').split(',')
                try:
                    box = order_points_clockwise(np.array(list(map(float, params[:8]))).reshape(-1, 2))
                    if cv2.arcLength(box, True) > 0:
                        boxes.append(box)
                        label = params[8]
                        if label == '*' or label == '###':
                            text_tags.append(False)
                        else:
                            text_tags.append(True)
                except:
                    print('load label failed on {}'.format(label_path))
        return np.array(boxes, dtype=np.float32), np.array(text_tags, dtype=np.bool) 
Example #7
Source File: ocr_controller.py    From JusticeAI with MIT License 6 votes vote down vote up
def _find_document_corners(resized_img):
    contours = _compute_all_contours(resized_img)
    resized_height, resized_width = _get_img_dimensions(resized_img)
    full_resized_image_area = resized_height * resized_width

    # Default to the smallest possible document area and save any larger document areas
    largest_document_area = full_resized_image_area * ALIGNMENT_PERCENT_AREA_DOCUMENT_MUST_COVER

    # Default to largest: no modification to the image if no document is found
    largest_document_corners = _get_corner_array(resized_height, resized_width)

    for contour in contours:
        contour_perimeter = cv2.arcLength(contour, True)
        approximate_polygonal_contour = cv2.approxPolyDP(contour, 0.03 * contour_perimeter, True)

        # All pages have 4 corners and are convex
        if (len(approximate_polygonal_contour) == 4 and
                cv2.isContourConvex(approximate_polygonal_contour) and
                cv2.contourArea(approximate_polygonal_contour) > largest_document_area):
            largest_document_area = cv2.contourArea(approximate_polygonal_contour)
            largest_document_corners = approximate_polygonal_contour

    return largest_document_corners 
Example #8
Source File: detect_tables.py    From namsel with MIT License 6 votes vote down vote up
def find_boxes(tiff_fl, blur=False):
    im = Image.open(tiff_fl).convert('L')
    a = np.asarray(im)
    if blur:
        a = cv.GaussianBlur(a, (5, 5), 0)
    contours, hierarchy = cv.findContours(a.copy(), mode=cv.RETR_TREE, method=cv.CHAIN_APPROX_SIMPLE)
    border_boxes = []
#     n = np.ones_like(a)
    for j,cnt in enumerate(contours):
        cnt_len = cv.arcLength(cnt, True)
        orig_cnt = cnt.copy()
        cnt = cv.approxPolyDP(cnt, 0.02*cnt_len, True)
        if len(cnt) == 4 and ((a.shape[0]-3) * (a.shape[1] -3)) > cv.contourArea(cnt) > 1000 and cv.isContourConvex(cnt):
            cnt = cnt.reshape(-1, 2)
            max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
            if max_cos < 0.1:
                b = cv.boundingRect(orig_cnt)
                x,y,w,h = b
                border_boxes.append(b)
#                 cv.rectangle(n, (x,y), (x+w, y+h), 0)
#                 cv.drawContours(n, [cnt], -1,0, thickness = 5)
#     Image.fromarray(n*255).show()
    return border_boxes 
Example #9
Source File: test.py    From object-localization with MIT License 5 votes vote down vote up
def main():
    model = create_model()
    model.load_weights(WEIGHTS_FILE)

    for filename in glob.glob(IMAGES):
        unscaled = cv2.imread(filename)
        image = cv2.resize(unscaled, (IMAGE_SIZE, IMAGE_SIZE))
        feat_scaled = preprocess_input(np.array(image, dtype=np.float32))

        region = np.squeeze(model.predict(feat_scaled[np.newaxis,:]))

        output = np.zeros(region.shape, dtype=np.uint8)
        output[region > 0.5] = 1

        contours, _ = cv2.findContours(output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        for cnt in contours:
            approx = cv2.approxPolyDP(cnt, EPSILON * cv2.arcLength(cnt, True), True)
            x, y, w, h = cv2.boundingRect(approx)

            x0 = np.rint(x * unscaled.shape[1] / output.shape[1]).astype(int)
            x1 = np.rint((x + w) * unscaled.shape[1] / output.shape[1]).astype(int)
            y0 = np.rint(y * unscaled.shape[0] / output.shape[0]).astype(int)
            y1 = np.rint((y + h) * unscaled.shape[0] / output.shape[0]).astype(int)
            cv2.rectangle(unscaled, (x0, y0), (x1, y1), (0, 255, 0), 1)

        cv2.imshow("image", unscaled)
        cv2.waitKey(0)
        cv2.destroyAllWindows() 
Example #10
Source File: document.py    From web-document-scanner with MIT License 5 votes vote down vote up
def detect_edge(self, image, enabled_transform = False):
        dst = None
        orig = image.copy()

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(blurred, 0, 20)
        _, contours, _ = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

        contours = sorted(contours, key=cv2.contourArea, reverse=True)

        for cnt in contours:
            epsilon = 0.051 * cv2.arcLength(cnt, True)
            approx = cv2.approxPolyDP(cnt, epsilon, True)

            if len(approx) == 4:
                target = approx
                cv2.drawContours(image, [target], -1, (0, 255, 0), 2)

                if enabled_transform:
                    approx = rect.rectify(target)
                    # pts2 = np.float32([[0,0],[800,0],[800,800],[0,800]])
                    # M = cv2.getPerspectiveTransform(approx,pts2)
                    # dst = cv2.warpPerspective(orig,M,(800,800))
                    dst = self.four_point_transform(orig, approx)
                break

        return image, dst 
Example #11
Source File: squares.py    From PyCV-time with MIT License 5 votes vote down vote up
def find_squares(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < 0.1:
                        squares.append(cnt)
    return squares 
Example #12
Source File: ocr.py    From smashscan with MIT License 5 votes vote down vote up
def contour_test(img):
    _, contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
    img_d = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.drawContours(img_d, contours, -1, (255, 0, 0), 2)
    cv2.imshow('test', img_d)
    cv2.waitKey(0)
    res = np.zeros(img.shape, np.uint8)

    for i, contour in enumerate(contours):
        img_d = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        cv2.drawContours(img_d, contour, -1, (255, 0, 0), 3)

        moment = cv2.moments(contour)
        if moment['m00']: # Removes single points
            cx = int(moment['m10']/moment['m00'])
            cy = int(moment['m01']/moment['m00'])
            print("Center: {}".format((cx, cy)))
            cv2.circle(img_d, (cx, cy), 3, (0, 0, 255), -1)

        print("Area: {}".format(cv2.contourArea(contour)))
        print("Permeter: {} ".format(cv2.arcLength(contour, True)))

        cv2.imshow('test', img_d)
        cv2.waitKey(0)

        # The result displayed is an accumulation of previous contours.
        mask = np.zeros(img.shape, np.uint8)
        cv2.drawContours(mask, contours, i, 255, cv2.FILLED)
        mask = cv2.bitwise_and(img, mask)
        res = cv2.bitwise_or(res, mask)
        cv2.imshow('test', res)
        cv2.waitKey(0) 
Example #13
Source File: digital_display_ocr.py    From display_ocr with GNU General Public License v2.0 5 votes vote down vote up
def find_display_contour(edge_img_arr):
  display_contour = None
  edge_copy = edge_img_arr.copy()
  contours,hierarchy = cv2.findContours(edge_copy, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
  top_cntrs = sorted(contours, key = cv2.contourArea, reverse = True)[:10]

  for cntr in top_cntrs:
    peri = cv2.arcLength(cntr,True)
    approx = cv2.approxPolyDP(cntr, 0.02 * peri, True)

    if len(approx) == 4:
      display_contour = approx
      break

  return display_contour 
Example #14
Source File: data_utils.py    From Real-time-Text-Detection with Apache License 2.0 5 votes vote down vote up
def unshrink_offset(poly,ratio):
    area = cv2.contourArea(poly)
    peri = cv2.arcLength(poly, True)
    a = 8
    b = peri - 4
    c = 1-0.5 * peri - area/ratio
    return quadratic(a,b,c) 
Example #15
Source File: geometry_utils.py    From open-mcr with GNU General Public License v3.0 5 votes vote down vote up
def approx_poly(contour: np.ndarray) -> Polygon:
    """Approximate the simple polygon for the contour. Returns a polygon in
    clockwise order."""
    perimeter = cv2.arcLength(contour, True)
    simple = cv2.approxPolyDP(contour, 0.05 * perimeter, True)
    polygon = contour_to_polygon(simple)
    return polygon_to_clockwise(polygon) 
Example #16
Source File: helpers.py    From SnapSudoku with MIT License 5 votes vote down vote up
def approx(self, cnt):
        peri = cv2.arcLength(cnt, True)
        app = cv2.approxPolyDP(cnt, 0.01 * peri, True)
        return app 
Example #17
Source File: omr.py    From omr with MIT License 5 votes vote down vote up
def get_approx_contour(contour, tol=.01):
    """Gets rid of 'useless' points in the contour."""
    epsilon = tol * cv2.arcLength(contour, True)
    return cv2.approxPolyDP(contour, epsilon, True) 
Example #18
Source File: process_image.py    From RealTime-DigitRecognition with GNU General Public License v3.0 5 votes vote down vote up
def get_output_image(path):
  
    img = cv2.imread(path,2)
    img_org =  cv2.imread(path)

    ret,thresh = cv2.threshold(img,127,255,0)
    im2,contours,hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)

    for j,cnt in enumerate(contours):
        epsilon = 0.01*cv2.arcLength(cnt,True)
        approx = cv2.approxPolyDP(cnt,epsilon,True)
        
        hull = cv2.convexHull(cnt)
        k = cv2.isContourConvex(cnt)
        x,y,w,h = cv2.boundingRect(cnt)
        
        if(hierarchy[0][j][3]!=-1 and w>10 and h>10):
            #putting boundary on each digit
            cv2.rectangle(img_org,(x,y),(x+w,y+h),(0,255,0),2)
            
            #cropping each image and process
            roi = img[y:y+h, x:x+w]
            roi = cv2.bitwise_not(roi)
            roi = image_refiner(roi)
            th,fnl = cv2.threshold(roi,127,255,cv2.THRESH_BINARY)

            # getting prediction of cropped image
            pred = predict_digit(roi)
            print(pred)
            
            # placing label on each digit
            (x,y),radius = cv2.minEnclosingCircle(cnt)
            img_org = put_label(img_org,pred,x,y)

    return img_org 
Example #19
Source File: Contours.py    From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def main():
    image = cv2.imread("../data/detect_blob.png", 1)

    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    binay_thresh = cv2.adaptiveThreshold(gray_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)

    _, contours, _ = cv2.findContours(binay_thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(image, contours, -1, (0, 255, 0), 3)

    new_image = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)

    for cnt in contours:
        cv2.drawContours(new_image, [cnt], -1, (255, 0, 255), -1)

        # get contour area using 'contourArea' method
        area_cnt = cv2.contourArea(cnt)

        # get the perimeter of any contour using 'arcLength'
        perimeter_cnt = cv2.arcLength(cnt, True)

        # get centroid oy contour using moments
        M = cv2.moments(cnt)
        cx = int(M['m10'] / M['m00'])
        cy = int(M['m01'] / M['m00'])

        cv2.circle(new_image, (cx, cy), 3, (0, 255, 0), -1)

        print("Area : {}, Perimeter : {}".format(area_cnt, perimeter_cnt))

    cv2.imshow("Contoured Image", new_image)

    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example #20
Source File: FuzzyContour.py    From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def main():
    kernal = np.ones((5, 5), np.uint8)
    image = cv2.imread("../data/fuzzy.png", 1)

    # converting the image into gray scale
    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # threshold the image to diff object for background
    _, thresh = cv2.threshold(gray_image, 50, 255, cv2.THRESH_BINARY_INV)

    # dilating the image to remove noise from objects
    dilated_image = cv2.dilate(thresh, kernal, iterations=2)

    # finding all contours in fuzzy image
    _, contours, _ = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # new image to draw contour objects
    sample = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)

    for cnt in contours:

        color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))

        # get contour area using 'contourArea' method
        area_cnt = cv2.contourArea(cnt)

        # get the perimeter of any contour using 'arcLength'
        perimeter_cnt = cv2.arcLength(cnt, True)

        if int(area_cnt) > 1000:
            cv2.drawContours(sample, [cnt], -1, color, -1)

        print("Area : {}, Perimeter : {}".format(area_cnt, perimeter_cnt))

    cv2.imshow("Contoured Image", sample)

    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example #21
Source File: cv_handler2.py    From Rule-based_Expert_System with GNU General Public License v2.0 5 votes vote down vote up
def get_contours(img):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
    contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    for i in range(1, len(contours)):
        approx = cv2.approxPolyDP(contours[i], 0.01 * cv2.arcLength(contours[i], True), True)
        contours[i] = approx
    return contours 
Example #22
Source File: utils.py    From answer-sheet-scan with MIT License 5 votes vote down vote up
def detect_cnt_again(poly, base_img):
    """
    继续检测已截取区域是否涵盖了答题卡区域
    :param poly: ndarray
    :param base_img: ndarray
    :return: ndarray
    """
    # 该多边形区域是否还包含答题卡区域的flag
    flag = False

    # 计算多边形四个顶点,并且截图,然后处理截取后的图片
    top_left, bottom_left, top_right, bottom_right = get_corner_node_list(poly)
    roi_img = get_roi_img(base_img, bottom_left, bottom_right, top_left, top_right)
    img = get_init_process_img(roi_img)

    # 获得面积最大的轮廓
    cnt = get_max_area_cnt(img)

    # 如果轮廓面积足够大,重新计算多边形四个顶点
    if cv2.contourArea(cnt) > roi_img.shape[0] * roi_img.shape[1] * SHEET_AREA_MIN_RATIO:
        flag = True
        poly = cv2.approxPolyDP(cnt, cv2.arcLength((cnt,), True) * 0.1, True)
        top_left, bottom_left, top_right, bottom_right = get_corner_node_list(poly)
        if not poly.shape[0] == 4:
            raise PolyNodeCountError

    # 多边形顶点和图片顶点,主要用于纠偏
    base_poly_nodes = np.float32([top_left[0], bottom_left[0], top_right[0], bottom_right[0]])
    base_nodes = np.float32([[0, 0],
                            [base_img.shape[1], 0],
                            [0, base_img.shape[0]],
                            [base_img.shape[1], base_img.shape[0]]])
    transmtx = cv2.getPerspectiveTransform(base_poly_nodes, base_nodes)

    if flag:
        img_warp = cv2.warpPerspective(roi_img, transmtx, (base_img.shape[1], base_img.shape[0]))
    else:
        img_warp = cv2.warpPerspective(base_img, transmtx, (base_img.shape[1], base_img.shape[0]))
    return img_warp 
Example #23
Source File: squareClass.py    From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International 5 votes vote down vote up
def __init__(self, position, c1, c2, c3, c4, index, image, state=''):
        # ID
        self.position = position
        self.index = index
        # Corners
        self.c1 = c1
        self.c2 = c2
        self.c3 = c3
        self.c4 = c4
        # State
        self.state = state

        # Actual polygon as a numpy array of corners
        self.contours = np.array([c1, c2, c3, c4], dtype=np.int32)

        # Properties of the contour
        self.area = cv2.contourArea(self.contours)
        self.perimeter = cv2.arcLength(self.contours, True)

        M = cv2.moments(self.contours)
        cx = int(M['m10'] / M['m00'])
        cy = int(M['m01'] / M['m00'])

        # ROI is the small circle within the square on which we will do the averaging
        self.roi = (cx, cy)
        self.radius = 5

        # Empty color. The colour the square has when it's not occupied, i.e. shade of black or white. By storing these
        # at the beginnig of the game, we can then make much more robust predictions on how the state of the board has
        # changed.
        self.emptyColor = self.roiColor(image) 
Example #24
Source File: calibration.py    From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International 5 votes vote down vote up
def detect(c):
    """
    Used by find_cross function to detect the edges and vertices of possible polygons in an image.

    :param c:
    :return:
    """
    peri = cv2.arcLength(c, True)
    area = cv2.contourArea(c)
    approx = cv2.approxPolyDP(c, 0.02 * peri, True)
    return len(approx), peri, area 
Example #25
Source File: contours_analysis.py    From Mastering-OpenCV-4-with-Python with MIT License 5 votes vote down vote up
def roundness(contour, moments):
    """Calculates the roundness of a contour"""

    length = cv2.arcLength(contour, True)
    k = (length * length) / (moments['m00'] * 4 * np.pi)
    return k 
Example #26
Source File: active_weather.py    From aggregation with Apache License 2.0 5 votes vote down vote up
def __image_clean__(self,image):
        """
        after removing grid lines and applying thresholding, we will probably still have small "ticks" - bits of the
        grid line which weren't removed but can still cause problems for Tesseract (and probably other approaches too)
        """
        _,contours, hier = cv2.findContours(image.copy(),cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)

        # contours are probably in sorted order but just to be sure
        for cnt in contours:
            x,y,w,h = cv2.boundingRect(cnt)
            perimeter = cv2.arcLength(cnt,True)
            if (h <= 7) or (w <= 7) or (perimeter <= 30):
                cv2.drawContours(image,[cnt],0,255,-1)

        return image 
Example #27
Source File: contours_ellipses.py    From Mastering-OpenCV-4-with-Python with MIT License 5 votes vote down vote up
def roundness(contour, moments):
    """Calculates the roundness of a contour"""

    length = cv2.arcLength(contour, True)
    k = (length * length) / (moments['m00'] * 4 * np.pi)
    return k 
Example #28
Source File: geometries.py    From deeposlandia with MIT License 5 votes vote down vote up
def extract_geometry_vertices(mask, structure_size=(10, 10), approx_eps=0.01):
    """Extract polygon vertices from a boolean mask with the help of OpenCV
    utilities, as a numpy array

    Parameters
    ----------
    mask : numpy.array
        Image mask where to find polygons
    structure_size : tuple
        Size of the cv2 structuring artefact, as a tuple of horizontal and
    vertical pixels
    approx_eps : double
        Approximation coefficient, aiming at building more simplified polygons
    (this coefficient lies between 0 and 1, the larger the value is, the more
    important the approximation is)

    Returns
    -------
    numpy.array
        List of polygons contained in the mask, identified by their vertices
    """
    structure = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, structure_size)
    denoised = cv2.morphologyEx(mask, cv2.MORPH_OPEN, structure)
    grown = cv2.morphologyEx(denoised, cv2.MORPH_CLOSE, structure)
    _, contours, hierarchy = cv2.findContours(
        grown, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
    )
    polygons = [
        cv2.approxPolyDP(
            c, epsilon=approx_eps * cv2.arcLength(c, closed=True), closed=True
        )
        for c in contours
    ]
    return polygons, hierarchy 
Example #29
Source File: EdgeBased.py    From ImageProcessingProjects with MIT License 5 votes vote down vote up
def filter_contours(contours, min_area=100, max_area=300, angle_thresh=15.0):
    filtered = []
    for cnt in contours:
        if len(cnt) < 5:
            continue
        # rect = cv2.minAreaRect(cnt)
        (x, y), (major, minor), angle = cv2.fitEllipse(cnt)
        area = cv2.contourArea(cnt)
        # cv2.ellipse(image, ((x,y), (major,minor), angle), (0,255,0), 2)

        if abs(angle - 90) < angle_thresh:
            c = cv2.approxPolyDP(cnt, 0.01*cv2.arcLength(cnt, False), False)
            filtered.append(c)
    return filtered 
Example #30
Source File: process.py    From dhSegment with GNU General Public License v3.0 5 votes vote down vote up
def line_extraction_v1(probs: np.ndarray,
                       low_threshold: float,
                       high_threshold: float,
                       sigma: float=0.0,
                       filter_width: float=0.00,
                       vertical_maxima: bool=False) -> Tuple[List[np.ndarray], np.ndarray]:
    """
    Given a probability map, returns the contour of lines and the corresponding mask

    :param probs: probability map (numpy array)
    :param low_threshold: hysteresis low threshold
    :param high_threshold: hysteresis high threshold
    :param sigma: sigma value for gaussian filtering
    :param filter_width: percentage of the image width to filter out lines that are close to borders (default 0.0)
    :param vertical_maxima: set to True to use vertical local maxima as candidates for the hysteresis thresholding
    :return:
    """
    # Smooth
    probs2 = cleaning_probs(probs, sigma=sigma)

    lines_mask = hysteresis_thresholding(probs2, low_threshold, high_threshold,
                                         candidates_mask=vertical_local_maxima(probs2) if vertical_maxima else None)
    # Remove lines touching border
    # lines_mask = remove_borders(lines_mask)

    # Extract polygons from line mask
    contours = find_lines(lines_mask)

    filtered_contours = []
    page_width = probs.shape[1]
    for cnt in contours:
        centroid_x, centroid_y = np.mean(cnt, axis=0)[0]
        if centroid_x < filter_width*page_width or centroid_x > (1-filter_width)*page_width:
            continue
        # if cv2.arcLength(cnt, False) < filter_width*page_width:
        #    continue
        filtered_contours.append(cnt)

    return filtered_contours, lines_mask