Python cv2.ellipse() Examples

The following are 30 code examples of cv2.ellipse(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: ChickenVision.py    From ChickenVision with MIT License 13 votes vote down vote up
def getEllipseRotation(image, cnt):
    try:
        # Gets rotated bounding ellipse of contour
        ellipse = cv2.fitEllipse(cnt)
        centerE = ellipse[0]
        # Gets rotation of ellipse; same as rotation of contour
        rotation = ellipse[2]
        # Gets width and height of rotated ellipse
        widthE = ellipse[1][0]
        heightE = ellipse[1][1]
        # Maps rotation to (-90 to 90). Makes it easier to tell direction of slant
        rotation = translateRotation(rotation, widthE, heightE)

        cv2.ellipse(image, ellipse, (23, 184, 80), 3)
        return rotation
    except:
        # Gets rotated bounding rectangle of contour
        rect = cv2.minAreaRect(cnt)
        # Creates box around that rectangle
        box = cv2.boxPoints(rect)
        # Not exactly sure
        box = np.int0(box)
        # Gets center of rotated rectangle
        center = rect[0]
        # Gets rotation of rectangle; same as rotation of contour
        rotation = rect[2]
        # Gets width and height of rotated rectangle
        width = rect[1][0]
        height = rect[1][1]
        # Maps rotation to (-90 to 90). Makes it easier to tell direction of slant
        rotation = translateRotation(rotation, width, height)
        return rotation

#################### FRC VISION PI Image Specific ############# 
Example #2
Source File: mask.py    From dreampower with GNU General Public License v3.0 6 votes vote down vote up
def __draw_ellipse(a_max, a_min, angle, aurmax, aurmin, details, hairmax, hairmin, nipmax, nipmin, obj,
                       titmax, titmin, vagmax, vagmin, x, y):
        # Draw ellipse
        if obj.name == "tit":
            cv2.ellipse(details, (x, y), (titmax, titmin), angle, 0, 360, (0, 205, 0), -1)  # (0,0,0,50)
        elif obj.name == "aur":
            cv2.ellipse(details, (x, y), (aurmax, aurmin), angle, 0, 360, (0, 0, 255), -1)  # red
        elif obj.name == "nip":
            cv2.ellipse(details, (x, y), (nipmax, nipmin), angle, 0, 360, (255, 255, 255), -1)  # white
        elif obj.name == "belly":
            cv2.ellipse(details, (x, y), (a_max, a_min), angle, 0, 360, (255, 0, 255), -1)  # purple
        elif obj.name == "vag":
            cv2.ellipse(details, (x, y), (vagmax, vagmin), angle, 0, 360, (255, 0, 0), -1)  # blue
        elif obj.name == "hair":
            xmin = x - hairmax
            ymin = y - hairmin
            xmax = x + hairmax
            ymax = y + hairmax
            cv2.rectangle(details, (xmin, ymin), (xmax, ymax), (100, 100, 100), -1) 
Example #3
Source File: photometric_augmentation.py    From GIFT with Apache License 2.0 6 votes vote down vote up
def additive_shade(image, nb_ellipses=20, transparency_range=(-0.5, 0.8),
                   kernel_size_range=(250, 350)):

    def _py_additive_shade(img):
        img=img.astype(np.float32)
        min_dim = min(img.shape[:2]) / 4
        mask = np.zeros(img.shape[:2], np.uint8)
        for i in range(nb_ellipses):
            ax = int(max(np.random.rand() * min_dim, min_dim / 5))
            ay = int(max(np.random.rand() * min_dim, min_dim / 5))
            max_rad = max(ax, ay)
            x = np.random.randint(max_rad, img.shape[1] - max_rad)  # center
            y = np.random.randint(max_rad, img.shape[0] - max_rad)
            angle = np.random.rand() * 90
            cv2.ellipse(mask, (x, y), (ax, ay), angle, 0, 360, 255, -1)

        transparency = np.random.uniform(*transparency_range)
        kernel_size = np.random.randint(*kernel_size_range)
        if (kernel_size % 2) == 0:  # kernel_size has to be odd
            kernel_size += 1
        mask = cv2.GaussianBlur(mask.astype(np.float32), (kernel_size, kernel_size), 0)
        shaded = img * (1 - transparency * mask[..., np.newaxis]/255.)
        return np.clip(shaded, 0, 255).astype(np.uint8)

    return _py_additive_shade(image) 
Example #4
Source File: operations.py    From Smart-Surveillance-System-using-Raspberry-Pi with GNU General Public License v3.0 6 votes vote down vote up
def draw_face_ellipse(image, faces_coord):
    """ Draws an ellipse around the face found.
    """
    for (x, y, w, h) in faces_coord:
        center = (x + w / 2, y + h / 2)
        axis_major = h / 2
        axis_minor = w / 2
        cv2.ellipse(image,
                    center=center,
                    axes=(axis_major, axis_minor),
                    angle=0,
                    startAngle=0,
                    endAngle=360,
                    color=(206, 0, 209),
                    thickness=2)
    return image 
Example #5
Source File: AIMakeup.py    From AIMakeup with Apache License 2.0 6 votes vote down vote up
def get_forehead_landmark(self,im_bgr,face_landmark,mask_organs,mask_nose):
        '''
        计算额头坐标
        '''
        #画椭圆
        radius=(np.linalg.norm(face_landmark[0]-face_landmark[16])/2).astype('int32')
        center_abs=tuple(((face_landmark[0]+face_landmark[16])/2).astype('int32'))
        
        angle=np.degrees(np.arctan((lambda l:l[1]/l[0])(face_landmark[16]-face_landmark[0]))).astype('int32')
        mask=np.zeros(mask_organs.shape[:2], dtype=np.float64)
        cv2.ellipse(mask,center_abs,(radius,radius),angle,180,360,1,-1)
        #剔除与五官重合部分
        mask[mask_organs[:,:,0]>0]=0
        #根据鼻子的肤色判断真正的额头面积
        index_bool=[]
        for ch in range(3):
            mean,std=np.mean(im_bgr[:,:,ch][mask_nose[:,:,ch]>0]),np.std(im_bgr[:,:,ch][mask_nose[:,:,ch]>0])
            up,down=mean+0.5*std,mean-0.5*std
            index_bool.append((im_bgr[:,:,ch]<down)|(im_bgr[:,:,ch]>up))
        index_zero=((mask>0)&index_bool[0]&index_bool[1]&index_bool[2])
        mask[index_zero]=0
        index_abs=np.array(np.where(mask>0)[::-1]).transpose()
        landmark=cv2.convexHull(index_abs).squeeze()
        return landmark 
Example #6
Source File: shapes.py    From segmentation-networks-benchmark with MIT License 5 votes vote down vote up
def gen_random_image(patch_size):
    img = np.zeros((patch_size, patch_size, 3), dtype=np.uint8)
    mask = np.zeros((patch_size, patch_size), dtype=np.uint8)

    # Background
    dark_color0 = random.randint(0, 100)
    dark_color1 = random.randint(0, 100)
    dark_color2 = random.randint(0, 100)
    img[:, :, 0] = dark_color0
    img[:, :, 1] = dark_color1
    img[:, :, 2] = dark_color2

    # Object
    light_color0 = random.randint(dark_color0 + 1, 255)
    light_color1 = random.randint(dark_color1 + 1, 255)
    light_color2 = random.randint(dark_color2 + 1, 255)
    center_0 = random.randint(0, patch_size)
    center_1 = random.randint(0, patch_size)
    r1 = random.randint(10, 56)
    r2 = random.randint(10, 56)
    cv2.ellipse(img, (center_0, center_1), (r1, r2), 0, 0, 360, (light_color0, light_color1, light_color2), -1)
    cv2.ellipse(mask, (center_0, center_1), (r1, r2), 0, 0, 360, 1, -1)

    # White noise
    density = random.uniform(0, 0.1)
    for i in range(patch_size):
        for j in range(patch_size):
            if random.random() < density:
                img[i, j, 0] = random.randint(0, 255)
                img[i, j, 1] = random.randint(0, 255)
                img[i, j, 2] = random.randint(0, 255)

    return img, mask 
Example #7
Source File: vis.py    From Detectron-PYTORCH with Apache License 2.0 5 votes vote down vote up
def draw_detection(im, bboxes, scores=None, cls_inds=None, cls_name=None, color=None, thick=None, ellipse=False):
    # draw image
    bboxes = np.round(bboxes).astype(np.int)
    if cls_inds is not None:
        cls_inds = cls_inds.astype(np.int)
    cls_num = len(cls_name) if cls_name is not None else -1

    imgcv = np.copy(im)
    h, w, _ = imgcv.shape
    for i, box in enumerate(bboxes):
        cls_indx = cls_inds[i] if cls_inds is not None else None
        color_ = get_color(cls_indx, cls_num) if color == None else color
        color_ = (0, 0, 0) if cls_indx < 0 else color_

        thick = int((h + w) / 500) if thick == None else thick
        if not ellipse:
            cv2.rectangle(imgcv,
                          (box[0], box[1]), (box[2], box[3]),
                          color_, thick)
        else:
            cv2.ellipse(imgcv,
                        (box[0]/2 + box[2]/2, box[1]/2 + box[3]/2),
                        (box[2]/2 - box[0]/2, box[3]/2 - box[1]/2),
                        0, 0, 360,
                        color=color_, thickness=thick)

        if cls_indx is not None:
            score = scores[i] if scores is not None else 1
            name = cls_name[cls_indx] if cls_name is not None else str(cls_indx)
            name = 'ign' if cls_indx < 0 else name
            mess = '%s: %.2f' % (name[:4], score)
            cv2.putText(imgcv, mess, (box[0], box[1] - 8),
                        0, 1e-3 * h, color_, thick // 3)

    return imgcv 
Example #8
Source File: ellipse_helpers.py    From Zozo-Measurer with GNU General Public License v3.0 5 votes vote down vote up
def find_nearest_point_on_ellipse(semi_major, semi_minor, p):
    ########################################################################
    #                                                                      #
    #  This Code is based on https://github.com/0xfaded/ellipse_demo       #
    #  See Carl Chatfield's blog for an excellent explanation:             #
    #  https://wet-robots.ghost.io/simple-method-for-distance-to-ellipse/  #
    #  This function is licensed under an MIT-License:                     #
    #  https://github.com/0xfaded/ellipse_demo/blob/master/LICENSE         #
    #                                                                      #
    ########################################################################
    px = abs(p[0])
    py = abs(p[1])
    tx = 0.707
    ty = 0.707
    a = semi_major
    b = semi_minor
    for x in range(0, 3):
        x = a * tx
        y = b * ty
        ex = (a*a - b*b) * tx**3 / a
        ey = (b*b - a*a) * ty**3 / b
        rx = x - ex
        ry = y - ey
        qx = px - ex
        qy = py - ey
        r = np.hypot(ry, rx)
        q = np.hypot(qy, qx)
        tx = min(1, max(0, (qx * r / q + ex) / a))
        ty = min(1, max(0, (qy * r / q + ey) / b))
        t = np.hypot(ty, tx)
        tx /= t
        ty /= t
    return np.copysign(a * tx, p[0]), np.copysign(b * ty, p[1]) 
Example #9
Source File: utils_draw.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def draw_random_ellipses(img, N=100):
    lineType = 8
    (h, w) = img.shape[:2]  
    axis_ext = w*0.1  
    for i in range(N):
        cx = np.random.randint( 0, w )
        cy = np.random.randint( 0, h )
        width, height = np.random.randint(0, axis_ext, 2)      
        angle = np.random.randint(0, 180)
        color = tuple(np.random.randint(0,255,3).tolist())        
        thickness = np.random.randint(-1, 9)   
        cv2.ellipse(img, (cx,cy), (width,height), angle, angle - 100, angle + 200, color, thickness, lineType) 
Example #10
Source File: train_infinite_generator.py    From ZF_UNET_224_Pretrained_Model with GNU General Public License v3.0 5 votes vote down vote up
def gen_random_image():
    img = np.zeros((224, 224, 3), dtype=np.uint8)
    mask = np.zeros((224, 224), dtype=np.uint8)

    # Background
    dark_color0 = random.randint(0, 100)
    dark_color1 = random.randint(0, 100)
    dark_color2 = random.randint(0, 100)
    img[:, :, 0] = dark_color0
    img[:, :, 1] = dark_color1
    img[:, :, 2] = dark_color2

    # Object
    light_color0 = random.randint(dark_color0+1, 255)
    light_color1 = random.randint(dark_color1+1, 255)
    light_color2 = random.randint(dark_color2+1, 255)
    center_0 = random.randint(0, 224)
    center_1 = random.randint(0, 224)
    r1 = random.randint(10, 56)
    r2 = random.randint(10, 56)
    cv2.ellipse(img, (center_0, center_1), (r1, r2), 0, 0, 360, (light_color0, light_color1, light_color2), -1)
    cv2.ellipse(mask, (center_0, center_1), (r1, r2), 0, 0, 360, 255, -1)

    # White noise
    density = random.uniform(0, 0.1)
    for i in range(224):
        for j in range(224):
            if random.random() < density:
                img[i, j, 0] = random.randint(0, 255)
                img[i, j, 1] = random.randint(0, 255)
                img[i, j, 2] = random.randint(0, 255)

    return img, mask 
Example #11
Source File: EdgeBased.py    From ImageProcessingProjects with MIT License 5 votes vote down vote up
def filter_contours(contours, min_area=100, max_area=300, angle_thresh=15.0):
    filtered = []
    for cnt in contours:
        if len(cnt) < 5:
            continue
        # rect = cv2.minAreaRect(cnt)
        (x, y), (major, minor), angle = cv2.fitEllipse(cnt)
        area = cv2.contourArea(cnt)
        # cv2.ellipse(image, ((x,y), (major,minor), angle), (0,255,0), 2)

        if abs(angle - 90) < angle_thresh:
            c = cv2.approxPolyDP(cnt, 0.01*cv2.arcLength(cnt, False), False)
            filtered.append(c)
    return filtered 
Example #12
Source File: photometric_augmentation.py    From MVSNet with MIT License 5 votes vote down vote up
def additive_shade(image, nb_ellipses=20, transparency_range=[-0.5, 0.8],
                   kernel_size_range=[250, 350]):

    def _py_additive_shade(img):
        min_dim = min(img.shape[:2]) / 4
        mask = np.zeros(img.shape[:2], np.uint8)
        for i in range(nb_ellipses):
            ax = int(max(np.random.rand() * min_dim, min_dim / 5))
            ay = int(max(np.random.rand() * min_dim, min_dim / 5))
            max_rad = max(ax, ay)
            x = np.random.randint(max_rad, img.shape[1] - max_rad)  # center
            y = np.random.randint(max_rad, img.shape[0] - max_rad)
            angle = np.random.rand() * 90
            cv.ellipse(mask, (x, y), (ax, ay), angle, 0, 360, 255, -1)

        transparency = np.random.uniform(*transparency_range)
        kernel_size = np.random.randint(*kernel_size_range)
        if (kernel_size % 2) == 0:  # kernel_size has to be odd
            kernel_size += 1
        mask = cv.GaussianBlur(mask.astype(np.float32), (kernel_size, kernel_size), 0)
        shaded = img * (1 - transparency * mask[..., np.newaxis]/255.)
        return np.clip(shaded, 0, 255)

    shaded = tf.py_func(_py_additive_shade, [image], tf.float32)
    res = tf.reshape(shaded, tf.shape(image))
    return res 
Example #13
Source File: contours_ellipses.py    From Mastering-OpenCV-4-with-Python with MIT License 5 votes vote down vote up
def eccentricity_from_ellipse(contour):
    """Calculates the eccentricity fitting an ellipse from a contour"""

    (x, y), (MA, ma), angle = cv2.fitEllipse(contour)

    a = ma / 2
    b = MA / 2

    ecc = np.sqrt(a ** 2 - b ** 2) / a
    return ecc 
Example #14
Source File: contours_ellipses.py    From Mastering-OpenCV-4-with-Python with MIT License 5 votes vote down vote up
def build_image_ellipses():
    """Draws ellipses in the image"""

    img = np.zeros((500, 600, 3), dtype="uint8")
    cv2.ellipse(img, (120, 60), (100, 50), 0, 0, 360, (255, 255, 0), -1)
    cv2.ellipse(img, (300, 60), (50, 50), 0, 0, 360, (0, 0, 255), -1)
    cv2.ellipse(img, (425, 200), (50, 150), 0, 0, 360, (255, 0, 0), -1)
    cv2.ellipse(img, (550, 250), (20, 240), 0, 0, 360, (255, 0, 255), -1)
    cv2.ellipse(img, (200, 200), (150, 50), 0, 0, 360, (0, 255, 0), -1)
    cv2.ellipse(img, (250, 400), (200, 50), 0, 0, 360, (0, 255, 255), -1)
    return img 
Example #15
Source File: camshift.py    From PyCV-time with MIT License 5 votes vote down vote up
def run(self):
        while True:
            ret, self.frame = self.cam.read()
            vis = self.frame.copy()
            hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
            mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))

            if self.selection:
                x0, y0, x1, y1 = self.selection
                self.track_window = (x0, y0, x1-x0, y1-y0)
                hsv_roi = hsv[y0:y1, x0:x1]
                mask_roi = mask[y0:y1, x0:x1]
                hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
                cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX);
                self.hist = hist.reshape(-1)
                self.show_hist()

                vis_roi = vis[y0:y1, x0:x1]
                cv2.bitwise_not(vis_roi, vis_roi)
                vis[mask == 0] = 0

            if self.tracking_state == 1:
                self.selection = None
                prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
                prob &= mask
                term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
                track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)

                if self.show_backproj:
                    vis[:] = prob[...,np.newaxis]
                try: cv2.ellipse(vis, track_box, (0, 0, 255), 2)
                except: print track_box

            cv2.imshow('camshift', vis)

            ch = 0xFF & cv2.waitKey(5)
            if ch == 27:
                break
            if ch == ord('b'):
                self.show_backproj = not self.show_backproj
        cv2.destroyAllWindows() 
Example #16
Source File: operations.py    From Smart-Surveillance-System-using-Raspberry-Pi with GNU General Public License v3.0 5 votes vote down vote up
def cut_face_ellipse(image, face_coord):
    """ Cuts the image to just show the face in an ellipse.

    This function takes the rectangle coordinates around a detected face
    or faces and cuts the original image with the face coordenates. It also
    surrounds the face with an ellipse making black all the extra
    background or faces
    """
    images_ellipse = []
    for (x, y, w, h) in face_coord:
        center = (x + w / 2, y + h / 2)
        axis_major = (h / 2)
        axis_minor = (w / 2)
        mask = np.zeros_like(image)
        # create a white filled ellipse
        mask = cv2.ellipse(mask,
                           center=center,
                           axes=(axis_major, axis_minor),
                           angle=0,
                           startAngle=0,
                           endAngle=360,
                           color=(255, 255, 255),
                           thickness=-1)
        # Bitwise AND operation to black out regions outside the mask
        image_ellipse = np.bitwise_and(image, mask)
        images_ellipse.append(image_ellipse[y: y + h, x: x + w])

    return images_ellipse 
Example #17
Source File: gazemap.py    From GazeML with MIT License 5 votes vote down vote up
def from_gaze2d(gaze, output_size, scale=1.0):
    """Generate a normalized pictorial representation of 3D gaze direction."""
    gazemaps = []
    oh, ow = np.round(scale * np.asarray(output_size)).astype(np.int32)
    oh_2 = int(np.round(0.5 * oh))
    ow_2 = int(np.round(0.5 * ow))
    r = int(height_to_eyeball_radius_ratio * oh_2)
    theta, phi = gaze
    theta = -theta
    sin_theta = np.sin(theta)
    cos_theta = np.cos(theta)
    sin_phi = np.sin(phi)
    cos_phi = np.cos(phi)

    # Draw iris
    eyeball_radius = int(height_to_eyeball_radius_ratio * oh_2)
    iris_radius_angle = np.arcsin(0.5 * eyeball_radius_to_iris_diameter_ratio)
    iris_radius = eyeball_radius_to_iris_diameter_ratio * eyeball_radius
    iris_distance = float(eyeball_radius) * np.cos(iris_radius_angle)
    iris_offset = np.asarray([
        -iris_distance * sin_phi * cos_theta,
        iris_distance * sin_theta,
    ])
    iris_centre = np.asarray([ow_2, oh_2]) + iris_offset
    angle = np.degrees(np.arctan2(iris_offset[1], iris_offset[0]))
    ellipse_max = eyeball_radius_to_iris_diameter_ratio * iris_radius
    ellipse_min = np.abs(ellipse_max * cos_phi * cos_theta)
    gazemap = np.zeros((oh, ow), dtype=np.float32)
    gazemap = cv.ellipse(gazemap, box=(iris_centre, (ellipse_min, ellipse_max), angle),
                         color=1.0, thickness=-1, lineType=cv.LINE_AA)
    gazemaps.append(gazemap)

    # Draw eyeball
    gazemap = np.zeros((oh, ow), dtype=np.float32)
    gazemap = cv.circle(gazemap, (ow_2, oh_2), r, color=1, thickness=-1)
    gazemaps.append(gazemap)

    return np.asarray(gazemaps) 
Example #18
Source File: contours.py    From PyCV-time with MIT License 5 votes vote down vote up
def make_image():
    img = np.zeros((500, 500), np.uint8)
    black, white = 0, 255
    for i in xrange(6):
        dx = (i%2)*250 - 30
        dy = (i/2)*150

        if i == 0:
            for j in xrange(11):
                angle = (j+5)*np.pi/21
                c, s = np.cos(angle), np.sin(angle)
                x1, y1 = np.int32([dx+100+j*10-80*c, dy+100-90*s])
                x2, y2 = np.int32([dx+100+j*10-30*c, dy+100-30*s])
                cv2.line(img, (x1, y1), (x2, y2), white)

        cv2.ellipse( img, (dx+150, dy+100), (100,70), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+115, dy+70), (30,20), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+185, dy+70), (30,20), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+115, dy+70), (15,15), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+185, dy+70), (15,15), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+115, dy+70), (5,5), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+185, dy+70), (5,5), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+150, dy+100), (10,5), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+150, dy+150), (40,10), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+27, dy+100), (20,35), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+273, dy+100), (20,35), 0, 0, 360, white, -1 )
    return img 
Example #19
Source File: photometric_augmentation.py    From hfnet with MIT License 5 votes vote down vote up
def additive_shade(image, nb_ellipses=20, transparency_range=[-0.5, 0.8],
                   kernel_size_range=[250, 350], prob=0.5):

    def _py_additive_shade(img):
        min_dim = min(img.shape[:2]) / 4
        mask = np.zeros(img.shape[:2], np.uint8)
        for i in range(nb_ellipses):
            ax = int(max(np.random.rand() * min_dim, min_dim / 5))
            ay = int(max(np.random.rand() * min_dim, min_dim / 5))
            max_rad = max(ax, ay)
            x = np.random.randint(max_rad, img.shape[1] - max_rad)  # center
            y = np.random.randint(max_rad, img.shape[0] - max_rad)
            angle = np.random.rand() * 90
            cv.ellipse(mask, (x, y), (ax, ay), angle, 0, 360, 255, -1)

        transparency = np.random.uniform(*transparency_range)
        kernel_size = np.random.randint(*kernel_size_range)
        if (kernel_size % 2) == 0:  # kernel_size has to be odd
            kernel_size += 1
        mask = cv.GaussianBlur(mask.astype(np.float32), (kernel_size, kernel_size), 0)
        shaded = img * (1 - transparency * mask[..., np.newaxis]/255.)
        return np.clip(shaded, 0, 255)

    f = lambda: tf.reshape(
        tf.py_func(_py_additive_shade, [image], tf.float32), tf.shape(image))
    rand = tf.random_uniform(())
    return tf.cond(rand < prob, f, lambda: tf.identity(image)) 
Example #20
Source File: gaussian_mix.py    From PyCV-time with MIT License 5 votes vote down vote up
def draw_gaussain(img, mean, cov, color):
    x, y = np.int32(mean)
    w, u, vt = cv2.SVDecomp(cov)
    ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)
    s1, s2 = np.sqrt(w)*3.0
    cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.LINE_AA) 
Example #21
Source File: draw_on_image.py    From geosolver with Apache License 2.0 5 votes vote down vote up
def draw_arc(image, arc, offset=(0, 0), color=(0, 0, 255), thickness=1):
    caa = cartesian_angle(arc.circle.center, arc.a) * 180/np.pi
    cab = cartesian_angle(arc.circle.center, arc.b) * 180/np.pi
    if caa > cab:
        caa -= 360
    center = tuple(round_vector(np.array(arc.circle.center) + offset))
    radius = int(round(arc.circle.radius))
    cv2.ellipse(image, center, (radius, radius), 0, caa, cab, color, thickness) 
Example #22
Source File: gaussian_mix.py    From PyCV-time with MIT License 5 votes vote down vote up
def draw_gaussain(img, mean, cov, color):
    x, y = np.int32(mean)
    w, u, vt = cv2.SVDecomp(cov)
    ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)
    s1, s2 = np.sqrt(w)*3.0
    cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.CV_AA) 
Example #23
Source File: contours.py    From PyCV-time with MIT License 5 votes vote down vote up
def make_image():
    img = np.zeros((500, 500), np.uint8)
    black, white = 0, 255
    for i in xrange(6):
        dx = (i%2)*250 - 30
        dy = (i/2)*150

        if i == 0:
            for j in xrange(11):
                angle = (j+5)*np.pi/21
                c, s = np.cos(angle), np.sin(angle)
                x1, y1 = np.int32([dx+100+j*10-80*c, dy+100-90*s])
                x2, y2 = np.int32([dx+100+j*10-30*c, dy+100-30*s])
                cv2.line(img, (x1, y1), (x2, y2), white)

        cv2.ellipse( img, (dx+150, dy+100), (100,70), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+115, dy+70), (30,20), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+185, dy+70), (30,20), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+115, dy+70), (15,15), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+185, dy+70), (15,15), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+115, dy+70), (5,5), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+185, dy+70), (5,5), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+150, dy+100), (10,5), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+150, dy+150), (40,10), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+27, dy+100), (20,35), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+273, dy+100), (20,35), 0, 0, 360, white, -1 )
    return img 
Example #24
Source File: create_mask_im.py    From PConv_in_tf with Apache License 2.0 5 votes vote down vote up
def draw_oval(imraw,num_oval):
    centers = np.random.randint(0,512,[num_oval,2])
    lens = np.random.randint(50,300,[num_oval,2])
    total_rots = np.random.randint(-181,181,[num_oval,3])
    thick = np.random.randint(10,30,num_oval)
    for i in range(num_oval):
        cv2.ellipse(imraw,tuple(centers[i]),tuple(lens[i]),total_rots[i][0],total_rots[i][1],total_rots[i][2],0,thick[i])            
    return imraw 
Example #25
Source File: tgc_visualizer.py    From TGC-Designer-Tools with Apache License 2.0 5 votes vote down vote up
def drawBrushesOnImage(brushes, color, im, pc, image_scale, fill=True):
    for brush in brushes:
        center = pc.tgcToCV2(brush["position"]["x"], brush["position"]["z"], image_scale)
        center = (center[1], center[0]) # In point coordinates, not pixel
        width = brush["scale"]["x"] / image_scale
        height = brush["scale"]["z"] / image_scale
        rotation = - brush["rotation"]["y"] # Inverted degrees, cv2 bounding_box uses degrees

        thickness = 4
        if fill:
            thickness = -1 # Negative thickness is a filled ellipse

        brush_type_name = tgc_definitions.brushes.get(int(brush["type"]), "unknown")

        if 'square' in brush_type_name:
            box_points = cv2.boxPoints((center, (2.0*width, 2.0*height), rotation)) # Squares seem to be larger than circles
            box_points = np.int32([box_points]) # Bug with fillPoly, needs explict cast to 32bit

            if fill:
                cv2.fillPoly(im, box_points, color, lineType=cv2.LINE_AA)
            else:
                cv2.polylines(im, box_points, True, color, thickness, lineType=cv2.LINE_AA)
        else: # Draw as ellipse for now
            '''center – The rectangle mass center.
            size – Width and height of the rectangle.
            angle – The rotation angle in a clockwise direction. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle.'''
            bounding_box =  (center, (1.414*width, 1.414*height), rotation) # Circles seem to scale according to radius
            cv2.ellipse(im, bounding_box, color, thickness=thickness, lineType=cv2.LINE_AA) 
Example #26
Source File: tgc_visualizer.py    From TGC-Designer-Tools with Apache License 2.0 5 votes vote down vote up
def drawObjectsOnImage(objects, color, im, pc, image_scale):
    for ob in objects:
        for item in ob["Value"]["items"]:
            # Assuming all items are ellipses for now
            center = pc.tgcToCV2(item["position"]["x"], item["position"]["z"], image_scale)
            center = (center[1], center[0]) # In point coordinates, not pixel
            width = max(item["scale"]["x"] / image_scale, 8.0)
            height = max(item["scale"]["z"] / image_scale, 8.0)
            rotation = - item["rotation"]["y"] * math.pi / 180.0 # Inverted degrees, cv2 uses clockwise radians

            '''center – The rectangle mass center.
            size – Width and height of the rectangle.
            angle – The rotation angle in a clockwise direction. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle.'''

            bounding_box_of_ellipse =  (center, (width, height), rotation)

            cv2.ellipse(im, bounding_box_of_ellipse, color, thickness=-1, lineType=cv2.LINE_AA)

        for cluster in ob["Value"]["clusters"]:
            # Assuming all items are ellipses for now
            center = pc.tgcToCV2(cluster["position"]["x"], cluster["position"]["z"], image_scale)
            center = (center[1], center[0]) # In point coordinates, not pixel
            width = cluster["radius"] / image_scale
            height = cluster["radius"] / image_scale
            rotation = - cluster["rotation"]["y"] * math.pi / 180.0 # Inverted degrees, cv2 uses clockwise radians

            '''center – The rectangle mass center.
            size – Width and height of the rectangle.
            angle – The rotation angle in a clockwise direction. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle.'''

            bounding_box_of_ellipse =  (center, (width, height), rotation)

            cv2.ellipse(im, bounding_box_of_ellipse, color, thickness=-1, lineType=cv2.LINE_AA) 
Example #27
Source File: colordescriptor.py    From flask-image-search with MIT License 5 votes vote down vote up
def describe(self, image):
        # convert the image to the HSV color space and initialize
        # the features used to quantify the image
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        features = []

        # grab the dimensions and compute the center of the image
        (h, w) = image.shape[:2]
        (cX, cY) = (int(w * 0.5), int(h * 0.5))

        # divide the image into four rectangles/segments (top-left,
        # top-right, bottom-right, bottom-left)
        segments = [(0, cX, 0, cY), (cX, w, 0, cY),
                    (cX, w, cY, h), (0, cX, cY, h)]

        # construct an elliptical mask representing the center of the
        # image
        (axesX, axesY) = (int(w * 0.75) / 2, int(h * 0.75) / 2)
        ellipMask = np.zeros(image.shape[:2], dtype="uint8")
        cv2.ellipse(ellipMask, (cX, cY), (axesX, axesY), 0, 0, 360, 255, -1)

        # loop over the segments
        for (startX, endX, startY, endY) in segments:
            # construct a mask for each corner of the image, subtracting
            # the elliptical center from it
            cornerMask = np.zeros(image.shape[:2], dtype="uint8")
            cv2.rectangle(cornerMask, (startX, startY), (endX, endY), 255, -1)
            cornerMask = cv2.subtract(cornerMask, ellipMask)

            # extract a color histogram from the image, then update the
            # feature vector
            hist = self.histogram(image, cornerMask)
            features.extend(hist)

        # extract a color histogram from the elliptical region and
        # update the feature vector
        hist = self.histogram(image, ellipMask)
        features.extend(hist)

        # return the feature vector
        return features 
Example #28
Source File: gaussian_mix.py    From OpenCV-Python-Tutorial with MIT License 5 votes vote down vote up
def draw_gaussain(img, mean, cov, color):
    x, y = np.int32(mean)
    w, u, vt = cv2.SVDecomp(cov)
    ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)
    s1, s2 = np.sqrt(w)*3.0
    cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.LINE_AA) 
Example #29
Source File: contours.py    From OpenCV-Python-Tutorial with MIT License 5 votes vote down vote up
def make_image():
    img = np.zeros((500, 500), np.uint8)
    black, white = 0, 255
    for i in xrange(6):
        dx = int((i%2)*250 - 30)
        dy = int((i/2.)*150)

        if i == 0:
            for j in xrange(11):
                angle = (j+5)*np.pi/21
                c, s = np.cos(angle), np.sin(angle)
                x1, y1 = np.int32([dx+100+j*10-80*c, dy+100-90*s])
                x2, y2 = np.int32([dx+100+j*10-30*c, dy+100-30*s])
                cv2.line(img, (x1, y1), (x2, y2), white)

        cv2.ellipse( img, (dx+150, dy+100), (100,70), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+115, dy+70), (30,20), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+185, dy+70), (30,20), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+115, dy+70), (15,15), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+185, dy+70), (15,15), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+115, dy+70), (5,5), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+185, dy+70), (5,5), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+150, dy+100), (10,5), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+150, dy+150), (40,10), 0, 0, 360, black, -1 )
        cv2.ellipse( img, (dx+27, dy+100), (20,35), 0, 0, 360, white, -1 )
        cv2.ellipse( img, (dx+273, dy+100), (20,35), 0, 0, 360, white, -1 )
    return img 
Example #30
Source File: CutImageClass.py    From water-meter-system-complete with MIT License 5 votes vote down vote up
def DrawROIOLDOLDOLD(self, url):
        im = cv2.imread(url)

        d = 2
        d_eclipse = 1

        x = self.reference_p0[0]
        y = self.reference_p0[1]
        h, w =  self.ref0.shape[:2]
        cv2.rectangle(im,(x-d,y-d),(x+w+2*d,y+h+2*d),(0,0,255),d)
        cv2.putText(im,'ref0',(x,y-5),0,0.4,(0,0,255))

        x = self.reference_p1[0]
        y = self.reference_p1[1]
        h, w =  self.ref1.shape[:2]
        cv2.rectangle(im,(x-d,y-d),(x+w+2*d,y+h+2*d),(0,0,255),d)
        cv2.putText(im,'ref1',(x,y-5),0,0.4,(0,0,255))
        
        x = self.reference_p2[0]
        y = self.reference_p2[1]
        h, w =  self.ref2.shape[:2]
        cv2.rectangle(im,(x-d,y-d),(x+w+2*d,y+h+2*d),(0,0,255),d)
        cv2.putText(im,'ref2',(x,y-5),0,0.4,(0,0,255))

        if self.AnalogReadOutEnabled:
            for zeiger in self.Analog_Counter:
                x, y, w, h = zeiger[1]
                cv2.rectangle(im,(x-d,y-d),(x+w+2*d,y+h+2*d),(0,255,0),d)
                xct = int(x+w/2)+1
                yct = int(y+h/2)+1
                cv2.line(im,(xct-5,yct),(xct+5,yct),(0,255,0),1)
                cv2.line(im,(xct,yct-5),(xct,yct+5),(0,255,0),1)
                cv2.ellipse(im, (xct, yct), (int(w/2)+2*d_eclipse, int(h/2)+2*d_eclipse), 0, 0, 360, (0,255,0), d_eclipse)
                cv2.putText(im,zeiger[0],(x,y-5),0,0.4,(0,255,0))
        for zeiger in self.Digital_Digit:
            x, y, w, h = zeiger[1]
            cv2.rectangle(im,(x-d,y-d),(x+w+2*d,y+h+2*d),(0,255,0),d)
            cv2.putText(im,zeiger[0],(x,y-5),0,0.4,(0,255,0))
        cv2.imwrite('./image_tmp/roi.jpg', im)