Python cv2.minAreaRect() Examples

The following are 30 code examples of cv2.minAreaRect(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: ChickenVision.py    From ChickenVision with MIT License 13 votes vote down vote up
def getEllipseRotation(image, cnt):
    try:
        # Gets rotated bounding ellipse of contour
        ellipse = cv2.fitEllipse(cnt)
        centerE = ellipse[0]
        # Gets rotation of ellipse; same as rotation of contour
        rotation = ellipse[2]
        # Gets width and height of rotated ellipse
        widthE = ellipse[1][0]
        heightE = ellipse[1][1]
        # Maps rotation to (-90 to 90). Makes it easier to tell direction of slant
        rotation = translateRotation(rotation, widthE, heightE)

        cv2.ellipse(image, ellipse, (23, 184, 80), 3)
        return rotation
    except:
        # Gets rotated bounding rectangle of contour
        rect = cv2.minAreaRect(cnt)
        # Creates box around that rectangle
        box = cv2.boxPoints(rect)
        # Not exactly sure
        box = np.int0(box)
        # Gets center of rotated rectangle
        center = rect[0]
        # Gets rotation of rectangle; same as rotation of contour
        rotation = rect[2]
        # Gets width and height of rotated rectangle
        width = rect[1][0]
        height = rect[1][1]
        # Maps rotation to (-90 to 90). Makes it easier to tell direction of slant
        rotation = translateRotation(rotation, width, height)
        return rotation

#################### FRC VISION PI Image Specific ############# 
Example #2
Source File: transforms_rbbox.py    From AerialDetection with Apache License 2.0 7 votes vote down vote up
def mask2poly_single(binary_mask):
    """

    :param binary_mask:
    :return:
    """
    try:
        contours, hierarchy = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        # contour_lens = np.array(list(map(len, contours)))
        # max_id = contour_lens.argmax()
        # max_contour = contours[max_id]
        max_contour = max(contours, key=len)
        rect = cv2.minAreaRect(max_contour)
        poly = cv2.boxPoints(rect)
        # poly = TuplePoly2Poly(poly)
    except:
        import pdb
        pdb.set_trace()
    return poly 
Example #3
Source File: class_PlateDetection.py    From ALPR_System with Apache License 2.0 7 votes vote down vote up
def check_plate(self, input_img, contour):
        min_rect = cv2.minAreaRect(contour)
        if self.validateRotationAndRatio(min_rect):
            x, y, w, h = cv2.boundingRect(contour)
            after_validation_img = input_img[y:y+h, x:x+w]
            after_clean_plate_img, plateFound, coordinates = self.clean_plate(after_validation_img)
            if plateFound:
                characters_on_plate = self.find_characters_on_plate(after_clean_plate_img)
                if (characters_on_plate is not None and len(characters_on_plate) > 5):
                    x1, y1, w1, h1 = coordinates
                    coordinates = x1+x, y1+y
                    after_check_plate_img = after_clean_plate_img
                    return after_check_plate_img, characters_on_plate, coordinates
        return None, None, None


#################### PLATE FEATURES #################### 
Example #4
Source File: seglink.py    From seglink with GNU General Public License v3.0 7 votes vote down vote up
def min_area_rect(xs, ys):
    """
    Args:
        xs: numpy ndarray with shape=(N,4). N is the number of oriented bboxes. 4 contains [x1, x2, x3, x4]
        ys: numpy ndarray with shape=(N,4), [y1, y2, y3, y4]
            Note that [(x1, y1), (x2, y2), (x3, y3), (x4, y4)] can represent an oriented bbox.
    Return:
        the oriented rects sorrounding the box, in the format:[cx, cy, w, h, theta]. 
    """
    xs = np.asarray(xs, dtype = np.float32)
    ys = np.asarray(ys, dtype = np.float32)
        
    num_rects = xs.shape[0]
    box = np.empty((num_rects, 5))#cx, cy, w, h, theta
    for idx in xrange(num_rects):
        points = zip(xs[idx, :], ys[idx, :])
        cnt = util.img.points_to_contour(points)
        rect = cv2.minAreaRect(cnt)
        cx, cy = rect[0]
        w, h = rect[1]
        theta = rect[2]
        box[idx, :] = [cx, cy, w, h, theta]
    
    box = np.asarray(box, dtype = xs.dtype)
    return box 
Example #5
Source File: plate_locate.py    From EasyPR-python with Apache License 2.0 6 votes vote down vote up
def sobelSecSearchPart(self, bound, refpoint, out):
        bound_threshold = self.sobelOperT(bound, 3, 6, 2)

        tempBoundThread = bound_threshold.copy()
        clearLiuDingOnly(tempBoundThread)

        posLeft, posRight, flag = bFindLeftRightBound(tempBoundThread)
        if flag:
            if posRight != 0 and posLeft != 0 and posLeft < posRight:
                posY = int(bound_threshold.shape[0] * 0.5)
                for i in range(posLeft + int(bound_threshold.shape[0] * 0.1), posRight - 4):
                    bound_threshold[posY, i] = 255
            for i in range(bound_threshold.shape[0]):
                bound_threshold[i, posLeft] = 0
                bound_threshold[i, posRight] = 0

        _, contours, _ = cv2.findContours(bound_threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        for it in contours:
            mr = cv2.minAreaRect(it)
            if self.verifySizes(mr):
                tmp = (mr[0][0] + refpoint[0], mr[0][1] + refpoint[1])
                out.append((tmp, mr[1], mr[2])) 
Example #6
Source File: img.py    From HUAWEIOCR-2019 with MIT License 6 votes vote down vote up
def min_area_rect(xs, ys):
    """
    Args:
        xs: numpy ndarray with shape=(N,4). N is the number of oriented bboxes. 4 contains [x1, x2, x3, x4]
        ys: numpy ndarray with shape=(N,4), [y1, y2, y3, y4]
            Note that [(x1, y1), (x2, y2), (x3, y3), (x4, y4)] can represent an oriented bbox.
    Return:
        the oriented rects sorrounding the box, in the format:[cx, cy, w, h, theta]. 
    """
    xs = np.asarray(xs, dtype = np.float32)
    ys = np.asarray(ys, dtype = np.float32)
        
    num_rects = xs.shape[0]
    box = np.empty((num_rects, 5))#cx, cy, w, h, theta
    for idx in xrange(num_rects):
        points = zip(xs[idx, :], ys[idx, :])
        cnt = points_to_contour(points)
        rect = cv2.minAreaRect(cnt)
        cx, cy = rect[0]
        w, h = rect[1]
        theta = rect[2]
        box[idx, :] = [cx, cy, w, h, theta]
    
    box = np.asarray(box, dtype = xs.dtype)
    return box 
Example #7
Source File: class_PlateDetection.py    From ALPR_System with Apache License 2.0 6 votes vote down vote up
def clean_plate(self, plate):
        gray = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY)
        thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
        _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

        if contours:
            areas = [cv2.contourArea(c) for c in contours]
            max_index = np.argmax(areas) # index of the largest contour in the area array
            
            max_cnt = contours[max_index]
            max_cntArea = areas[max_index]
            x,y,w,h = cv2.boundingRect(max_cnt)
            rect = cv2.minAreaRect(max_cnt)
            rotatedPlate = self.crop_rotated_contour(plate, rect)
            if not self.ratioCheck(max_cntArea, rotatedPlate.shape[1], rotatedPlate.shape[0]):
                return plate, False, None
            return rotatedPlate, True, [x, y, w, h]
        else:
            return plate, False, None 
Example #8
Source File: crop.py    From idmatch with MIT License 6 votes vote down vote up
def remove_border(contour, ary):
    """Remove everything outside a border contour."""
    # Use a rotated rectangle (should be a good approximation of a border).
    # If it's far from a right angle, it's probably two sides of a border and
    # we should use the bounding box instead.
    c_im = np.zeros(ary.shape)
    r = cv2.minAreaRect(contour)
    degs = r[2]
    if angle_from_right(degs) <= 10.0:
        box = cv2.boxPoints(r)
        box = np.int0(box)
        cv2.drawContours(c_im, [box], 0, 255, -1)
        cv2.drawContours(c_im, [box], 0, 0, 4)
    else:
        x1, y1, x2, y2 = cv2.boundingRect(contour)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)

    return np.minimum(c_im, ary) 
Example #9
Source File: img.py    From HUAWEIOCR-2019 with MIT License 6 votes vote down vote up
def min_area_rect(xs, ys):
    """
    Args:
        xs: numpy ndarray with shape=(N,4). N is the number of oriented bboxes. 4 contains [x1, x2, x3, x4]
        ys: numpy ndarray with shape=(N,4), [y1, y2, y3, y4]
            Note that [(x1, y1), (x2, y2), (x3, y3), (x4, y4)] can represent an oriented bbox.
    Return:
        the oriented rects sorrounding the box, in the format:[cx, cy, w, h, theta]. 
    """
    xs = np.asarray(xs, dtype = np.float32)
    ys = np.asarray(ys, dtype = np.float32)
        
    num_rects = xs.shape[0]
    box = np.empty((num_rects, 5))#cx, cy, w, h, theta
    for idx in xrange(num_rects):
        points = zip(xs[idx, :], ys[idx, :])
        cnt = points_to_contour(points)
        rect = cv2.minAreaRect(cnt)
        cx, cy = rect[0]
        w, h = rect[1]
        theta = rect[2]
        box[idx, :] = [cx, cy, w, h, theta]
    
    box = np.asarray(box, dtype = xs.dtype)
    return box 
Example #10
Source File: mask_rcnn_with_text.py    From open_model_zoo with Apache License 2.0 6 votes vote down vote up
def masks_to_rects(masks):
        rects = []
        for mask in masks:
            decoded_mask = mask
            contours = cv2.findContours(decoded_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]

            areas = []
            boxes = []
            for contour in contours:
                area = cv2.contourArea(contour)
                areas.append(area)

                rect = cv2.minAreaRect(contour)
                box = cv2.boxPoints(rect)
                box = np.int0(box)
                boxes.append(box)

            if areas:
                i = np.argmax(areas)
                rects.append(boxes[i])

        return rects 
Example #11
Source File: mlt17_to_voc.py    From tf_ctpn with MIT License 6 votes vote down vote up
def test():
    # p1 = Point(5, 1)
    # p2 = Point(1, 4)
    # l1 = Line(p2, p1)
    # print(l1.k)
    #
    # p1 = Point(5, 4)
    # p2 = Point(1, 1)
    # l1 = Line(p1, p2)
    # print(l1.k)

    pnts = [(40, 40), (140, 85), (140, 160), (50, 100)]
    img = np.zeros((200, 200, 3))
    a = cv2.minAreaRect(np.asarray(pnts))

    img = cv2.line(img, pnts[0], pnts[1], (0, 255, 0), thickness=1)
    img = cv2.line(img, pnts[1], pnts[2], (0, 255, 0), thickness=1)
    img = cv2.line(img, pnts[2], pnts[3], (0, 255, 0), thickness=1)
    img = cv2.line(img, pnts[3], pnts[0], (0, 255, 0), thickness=1)

    box = cv2.boxPoints(a)

    def tt(p):
        return (p[0], p[1])

    img = cv2.line(img, tt(box[0]), tt(box[1]), (255, 255, 0), thickness=1)
    img = cv2.line(img, tt(box[1]), tt(box[2]), (255, 255, 0), thickness=1)
    img = cv2.line(img, tt(box[2]), tt(box[3]), (255, 255, 0), thickness=1)
    img = cv2.line(img, tt(box[3]), tt(box[0]), (255, 255, 0), thickness=1)

    cv2.imshow('test', img.astype(np.uint8))
    cv2.waitKey(0) 
Example #12
Source File: TableRecognition.py    From OTR with GNU General Public License v3.0 6 votes vote down vote up
def compute_cell_hulls(self):
        """
        Run find_table_cell_polygons() and compute a rectangle enclosing the cell (for each cell).
        For most (4-point) cells, this is equivalent to the original path, however this removes
        small irregularities and extra points from larger, 5+-point cells (mostly merged cells)
        """
        self.compute_cell_polygons()
        # cv2 convexHull / minAreaRect only work with integer coordinates.
        self.cell_hulls = [
            cv2.boxPoints(cv2.minAreaRect(np.rint(self.cluster_coords[path]).astype(int)))
            for path in self.cell_polygons]
        # Compute centers of cell hulls
        self.cell_centers = np.zeros((len(self.cell_hulls), 2))
        for i in range(len(self.cell_hulls)):
            hull_points = self.cell_hulls[i]
            self.cell_centers[i] = cv_algorithms.meanCenter(hull_points) 
Example #13
Source File: seg_detector_representer.py    From DBNet.pytorch with Apache License 2.0 6 votes vote down vote up
def get_mini_boxes(self, contour):
        bounding_box = cv2.minAreaRect(contour)
        points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])

        index_1, index_2, index_3, index_4 = 0, 1, 2, 3
        if points[1][1] > points[0][1]:
            index_1 = 0
            index_4 = 1
        else:
            index_1 = 1
            index_4 = 0
        if points[3][1] > points[2][1]:
            index_2 = 2
            index_3 = 3
        else:
            index_2 = 3
            index_3 = 2

        box = [points[index_1], points[index_2], points[index_3], points[index_4]]
        return box, min(bounding_box[1]) 
Example #14
Source File: iou.py    From DBNet.pytorch with Apache License 2.0 6 votes vote down vote up
def iou_rotate(box_a, box_b, method='union'):
    rect_a = cv2.minAreaRect(box_a)
    rect_b = cv2.minAreaRect(box_b)
    r1 = cv2.rotatedRectangleIntersection(rect_a, rect_b)
    if r1[0] == 0:
        return 0
    else:
        inter_area = cv2.contourArea(r1[1])
        area_a = cv2.contourArea(box_a)
        area_b = cv2.contourArea(box_b)
        union_area = area_a + area_b - inter_area
        if union_area == 0 or inter_area == 0:
            return 0
        if method == 'union':
            iou = inter_area / union_area
        elif method == 'intersection':
            iou = inter_area / min(area_a, area_b)
        else:
            raise NotImplementedError
        return iou 
Example #15
Source File: plate_locate.py    From EasyPR-python with Apache License 2.0 6 votes vote down vote up
def sobelFrtSearch(self, src):
        out_rects = []

        src_threshold = self.sobelOper(src, self.m_GaussianBlurSize, self.m_MorphSizeWidth, self.m_MorphSizeHeight)
        _, contours, _ = cv2.findContours(src_threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

        for it in contours:
            mr = cv2.minAreaRect(it)

            if self.verifySizes(mr):
                safeBoundRect, flag = self.calcSafeRect(mr, src)
                if not flag:
                    continue
                out_rects.append(safeBoundRect)

        return out_rects 
Example #16
Source File: plate_locate.py    From EasyPR-python with Apache License 2.0 6 votes vote down vote up
def colorSearch(self, src, color, out_rect):
        """

        :param src:
        :param color:
        :param out_rect: minAreaRect
        :return: binary
        """
        color_morph_width = 10
        color_morph_height = 2

        match_gray = colorMatch(src, color, False)

        _, src_threshold = cv2.threshold(match_gray, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)

        element = cv2.getStructuringElement(cv2.MORPH_RECT, (color_morph_width, color_morph_height))
        src_threshold = cv2.morphologyEx(src_threshold, cv2.MORPH_CLOSE, element)

        out = src_threshold.copy()

        _, contours, _ = cv2.findContours(src_threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

        for cnt in contours:
            mr = cv2.minAreaRect(cnt)
            if self.verifySizes(mr):
                out_rect.append(mr)

        return out 
Example #17
Source File: crop_morphology.py    From PAN-Card-OCR with MIT License 6 votes vote down vote up
def remove_border(contour, ary):
    """Remove everything outside a border contour."""
    # Use a rotated rectangle (should be a good approximation of a border).
    # If it's far from a right angle, it's probably two sides of a border and
    # we should use the bounding box instead.
    c_im = np.zeros(ary.shape)
    r = cv2.minAreaRect(contour)
    degs = r[2]
    if angle_from_right(degs) <= 10.0:
        box = cv2.cv.BoxPoints(r)
        box = np.int0(box)
        cv2.drawContours(c_im, [box], 0, 255, -1)
        cv2.drawContours(c_im, [box], 0, 0, 4)
    else:
        x1, y1, x2, y2 = cv2.boundingRect(contour)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)

    return np.minimum(c_im, ary) 
Example #18
Source File: tools.py    From keras-ocr with MIT License 6 votes vote down vote up
def combine_line(line):
    """Combine a set of boxes in a line into a single bounding
    box.

    Args:
        line: A list of (box, character) entries

    Returns:
        A (box, text) tuple
    """
    text = ''.join([character if character is not None else '' for _, character in line])
    box = np.concatenate([coords[:2] for coords, _ in line] +
                         [np.array([coords[3], coords[2]])
                          for coords, _ in reversed(line)]).astype('float32')
    first_point = box[0]
    rectangle = cv2.minAreaRect(box)
    box = cv2.boxPoints(rectangle)

    # Put the points in clockwise order
    box = np.array(np.roll(box, -np.linalg.norm(box - first_point, axis=1).argmin(), 0))
    return box, text 
Example #19
Source File: image_functions.py    From niryo_one_ros with GNU General Public License v3.0 6 votes vote down vote up
def get_contour_angle(cnt):
    """
    Return orientation of a contour
    :param cnt: contour
    :return: Angle in radians
    """
    rotrect = cv2.minAreaRect(cnt)
    angle = rotrect[-1]
    size1, size2 = rotrect[1][0], rotrect[1][1]
    ratio_size = float(size1) / float(size2)
    if 1.25 > ratio_size > 0.75:
        if angle < -45:
            angle = 90 + angle
    else:
        if size1 < size2:
            angle = angle + 180
        else:
            angle = angle + 90

        if angle > 90:
            angle = angle - 180

    return math.radians(angle) 
Example #20
Source File: crop_morphology.py    From Python-Code with MIT License 6 votes vote down vote up
def remove_border(contour, ary):
    """Remove everything outside a border contour."""
    # Use a rotated rectangle (should be a good approximation of a border).
    # If it's far from a right angle, it's probably two sides of a border and
    # we should use the bounding box instead.
    c_im = np.zeros(ary.shape)
    r = cv2.minAreaRect(contour)
    degs = r[2]
    if angle_from_right(degs) <= 10.0:
        box = cv2.cv.BoxPoints(r)
        box = np.int0(box)
        cv2.drawContours(c_im, [box], 0, 255, -1)
        cv2.drawContours(c_im, [box], 0, 0, 4)
    else:
        x1, y1, x2, y2 = cv2.boundingRect(contour)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)

    return np.minimum(c_im, ary) 
Example #21
Source File: crop_morphology.py    From oldnyc with Apache License 2.0 6 votes vote down vote up
def remove_border(contour, ary):
    """Remove everything outside a border contour."""
    # Use a rotated rectangle (should be a good approximation of a border).
    # If it's far from a right angle, it's probably two sides of a border and
    # we should use the bounding box instead.
    c_im = np.zeros(ary.shape)
    r = cv2.minAreaRect(contour)
    degs = r[2]
    if angle_from_right(degs) <= 10.0:
        box = cv2.cv.BoxPoints(r)
        box = np.int0(box)
        cv2.drawContours(c_im, [box], 0, 255, -1)
        cv2.drawContours(c_im, [box], 0, 0, 4)
    else:
        x1, y1, x2, y2 = cv2.boundingRect(contour)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)

    return np.minimum(c_im, ary) 
Example #22
Source File: omr.py    From omr with MIT License 5 votes vote down vote up
def get_bounding_rect(contour):
    rect = cv2.minAreaRect(contour)
    box = cv2.boxPoints(rect)
    return np.int0(box) 
Example #23
Source File: extract_candidate.py    From DVCNN_Lane_Detection with Apache License 2.0 5 votes vote down vote up
def __extract_line_from_filtered_image(img):
        """
        Do normalization and thresholding on the result of weighted hat-like filter image to extract line candidate
        :param img:input image
        :return:rotate rect list []
        """
        image = img[:, :, 0]
        inds = np.where(image[:, :] > 300)
        norm_thresh_image = np.zeros(image.shape).astype(np.uint8)
        norm_thresh_image[inds] = 255

        # find connected component
        image, contours, hierarchy = cv2.findContours(image=norm_thresh_image, mode=cv2.RETR_CCOMP,
                                                      method=cv2.CHAIN_APPROX_TC89_KCOS)

        # find rotate rect of each contour and check if it fits the condition, if fits the condition then save the
        # bounding rectangle of the contour
        rotate_rect_list = []
        bounding_rect_list = []
        for i in range(len(contours)):
            contour = contours[i]
            rotrect = cv2.minAreaRect(contour)
            if RoiExtractor.__is_rrect_valid(rotrect):
                rotate_rect_list.append(rotrect)
                bnd_rect = cv2.boundingRect(contour)
                bounding_rect_list.append(bnd_rect)
        result = {
            'rotate_rect_list': rotate_rect_list,
            'bounding_rect_list': bounding_rect_list
        }
        return result 
Example #24
Source File: privacy.py    From deda with GNU General Public License v3.0 5 votes vote down vote up
def restoreSkewByMarkers(self):
        _,_, angle = cv2.minAreaRect(self._getMagentaMarkers())
        angle = angle%90 if angle%90<45 else angle%90-90
        self._print("Skew correction: rotating by %+f°"%angle)
        self.im = rotateImage(self.im, angle, cv2.INTER_NEAREST) 
Example #25
Source File: postures.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def get_quirkiness(skeletons):
    bad = np.isnan(skeletons[:, 0, 0])
    
    dd = [cv2.minAreaRect(x) for x in skeletons.astype(np.float32)]
    dd = [(L,W) if L >W else (W,L) for _,(L,W),_ in dd]
    L, W = list(map(np.array, zip(*dd)))
    L[bad] = np.nan
    W[bad] = np.nan
    quirkiness = np.sqrt(1 - W**2 / L**2)
    
    return quirkiness, L, W 
Example #26
Source File: distance_to_camera.py    From Hand-Detection-and-Distance-Estimation with MIT License 5 votes vote down vote up
def find_marker(image):
    # convert the image to grayscale, blur it, and detect edges
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 35, 125)

    # find the contours in the edged image and keep the largest one;
    # we'll assume that this is our piece of paper in the image
    cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    c = max(cnts, key = cv2.contourArea)

    # compute the bounding box of the of the paper region and return it
    return cv2.minAreaRect(c) 
Example #27
Source File: getBlobTrajectories.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def getBlobDimesions(worm_cnt, ROI_bbox):
    
    area = float(cv2.contourArea(worm_cnt))
    
    worm_bbox = cv2.boundingRect(worm_cnt)
    bounding_box_xmin = ROI_bbox[0] + worm_bbox[0]
    bounding_box_xmax = bounding_box_xmin + worm_bbox[2]
    bounding_box_ymin = ROI_bbox[1] + worm_bbox[1]
    bounding_box_ymax = bounding_box_ymin + worm_bbox[3]

    # save everything into the the proper output format
    blob_bbox =(bounding_box_xmin, 
                bounding_box_xmax,
                bounding_box_ymin,
                bounding_box_ymax)


    (CMx, CMy), (L, W), angle = cv2.minAreaRect(worm_cnt)
    #adjust CM from the ROI reference frame to the image reference
    CMx += ROI_bbox[0]
    CMy += ROI_bbox[1]

    if W > L:
        L, W = W, L  # switch if width is larger than length
    
    blob_dims = (CMx, CMy, L, W, angle)
    return blob_dims, area, blob_bbox 
Example #28
Source File: synthgen.py    From SynthText with Apache License 2.0 5 votes vote down vote up
def filter(seg,area,label):
        """
        Apply the filter.
        The final list is ranked by area.
        """
        good = label[area > TextRegions.minArea]
        area = area[area > TextRegions.minArea]
        filt,R = [],[]
        for idx,i in enumerate(good):
            mask = seg==i
            xs,ys = np.where(mask)

            coords = np.c_[xs,ys].astype('float32')
            rect = cv2.minAreaRect(coords)          
            box = np.array(cv2.cv.BoxPoints(rect))
            h,w,rot = TextRegions.get_hw(box,return_rot=True)

            f = (h > TextRegions.minHeight 
                and w > TextRegions.minWidth
                and TextRegions.minAspect < w/h < TextRegions.maxAspect
                and area[idx]/w*h > TextRegions.pArea)
            filt.append(f)
            R.append(rot)

        # filter bad regions:
        filt = np.array(filt)
        area = area[filt]
        R = [R[i] for i in xrange(len(R)) if filt[i]]

        # sort the regions based on areas:
        aidx = np.argsort(-area)
        good = good[filt][aidx]
        R = [R[i] for i in aidx]
        filter_info = {'label':good, 'rot':R, 'area': area[aidx]}
        return filter_info 
Example #29
Source File: roomba.py    From Roomba980-Python with MIT License 5 votes vote down vote up
def get_image_parameters(self, image=None, contour=None, final=False):
        '''
        updates angle of image, and centre using cv2 or PIL.
        NOTE: this assumes the floorplan is rectangular! if you live in a
        lighthouse, the angle will not be valid!
        input is cv2 contour or PIL image
        routines find the minnimum area rectangle that fits the image outline
        '''
        if contour is not None and HAVE_CV2:
            # find minnimum area rectangle that fits
            # returns (x,y), (width, height), theta - where (x,y) is the center
            x_y,l_w,angle = cv2.minAreaRect(contour)

        elif image is not None and HAVE_PIL:
            x_y, angle = self.PIL_get_image_parameters(image)

        else:
            return

        if angle < self.angle - 45:
            angle += 90
        if angle > 45-self.angle:
            angle -= 90

        if final:
            self.cx = x_y[0]
            self.cy = x_y[1]
            self.angle = angle
        self.log.info("MAP: image center: x:%d, y:%d, angle %.2f" %
                      (x_y[0], x_y[1], angle)) 
Example #30
Source File: rotate_aug.py    From AerialDetection with Apache License 2.0 5 votes vote down vote up
def mask2poly_single(binary_mask):
    """

    :param binary_mask:
    :return:
    """
    # try:
    contours, hierarchy = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    max_contour = max(contours, key=len)
    rect = cv2.minAreaRect(max_contour)
    poly = cv2.boxPoints(rect)
    poly = TuplePoly2Poly(poly)

    return poly