Python cv2.CV_AA Examples

The following are 30 code examples of cv2.CV_AA(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: sunrgbd_utils.py    From H3DNet with MIT License 6 votes vote down vote up
def draw_projected_box3d(image, qs, color=(255,255,255), thickness=2):
    ''' Draw 3d bounding box in image
        qs: (8,2) array of vertices for the 3d box in following order:
            1 -------- 0
           /|         /|
          2 -------- 3 .
          | |        | |
          . 5 -------- 4
          |/         |/
          6 -------- 7
    '''
    qs = qs.astype(np.int32)
    for k in range(0,4):
       #http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
       i,j=k,(k+1)%4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA) # use LINE_AA for opencv3

       i,j=k+4,(k+1)%4 + 4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)

       i,j=k,k+4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)
    return image 
Example #2
Source File: utils.py    From reading-frustum-pointnets-code with Apache License 2.0 6 votes vote down vote up
def draw_projected_box3d(image, qs, color=(255,255,255), thickness=2):
    ''' Draw 3d bounding box in image
        qs: (8,2) array of vertices for the 3d box in following order:
            1 -------- 0
           /|         /|
          2 -------- 3 .
          | |        | |
          . 5 -------- 4
          |/         |/
          6 -------- 7
    '''
    qs = qs.astype(np.int32)
    for k in range(0,4):
       #http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
       i,j=k,(k+1)%4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA) # use LINE_AA for opencv3

       i,j=k+4,(k+1)%4 + 4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)

       i,j=k,k+4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)
    return image 
Example #3
Source File: kitti_util.py    From reading-frustum-pointnets-code with Apache License 2.0 6 votes vote down vote up
def draw_projected_box3d(image, qs, color=(255,255,255), thickness=2):
    ''' Draw 3d bounding box in image
        qs: (8,3) array of vertices for the 3d box in following order:
            1 -------- 0
           /|         /|
          2 -------- 3 .
          | |        | |
          . 5 -------- 4
          |/         |/
          6 -------- 7
    '''
    qs = qs.astype(np.int32)
    for k in range(0,4):
       # Ref: http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
       i,j=k,(k+1)%4
       # use LINE_AA for opencv3
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)

       i,j=k+4,(k+1)%4 + 4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)

       i,j=k,k+4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)
    return image 
Example #4
Source File: kitti_util.py    From frustum-pointnets with Apache License 2.0 6 votes vote down vote up
def draw_projected_box3d(image, qs, color=(255,255,255), thickness=2):
    ''' Draw 3d bounding box in image
        qs: (8,3) array of vertices for the 3d box in following order:
            1 -------- 0
           /|         /|
          2 -------- 3 .
          | |        | |
          . 5 -------- 4
          |/         |/
          6 -------- 7
    '''
    qs = qs.astype(np.int32)
    for k in range(0,4):
       # Ref: http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
       i,j=k,(k+1)%4
       # use LINE_AA for opencv3
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)

       i,j=k+4,(k+1)%4 + 4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)

       i,j=k,k+4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)
    return image 
Example #5
Source File: test.py    From yolo_v1_tensorflow_guiyu with MIT License 6 votes vote down vote up
def draw_result(self, img, result):   #输出结果
        print("hell")
        print(len(result))
        for i in range(len(result)):
            x = int(result[i][1])
            y = int(result[i][2])
            w = int(result[i][3] / 2)
            h = int(result[i][4] / 2)
            cv2.rectangle(img, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
            cv2.rectangle(img, (x - w, y - h - 20),
                          (x + w, y - h), (125, 125, 125), -1)
            lineType = cv2.LINE_AA if cv2.__version__ > '3' else cv2.CV_AA
            cv2.putText(
                img, result[i][0] + ' : %.2f' % result[i][5],
                (x - w + 5, y - h - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (0, 0, 0), 1, lineType) 
Example #6
Source File: utils.py    From frustum-pointnets with Apache License 2.0 6 votes vote down vote up
def draw_projected_box3d(image, qs, color=(255,255,255), thickness=2):
    ''' Draw 3d bounding box in image
        qs: (8,2) array of vertices for the 3d box in following order:
            1 -------- 0
           /|         /|
          2 -------- 3 .
          | |        | |
          . 5 -------- 4
          |/         |/
          6 -------- 7
    '''
    qs = qs.astype(np.int32)
    for k in range(0,4):
       #http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
       i,j=k,(k+1)%4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA) # use LINE_AA for opencv3

       i,j=k+4,(k+1)%4 + 4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)

       i,j=k,k+4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)
    return image 
Example #7
Source File: vis.py    From DetectAndTrack with Apache License 2.0 6 votes vote down vote up
def vis_mask(img, mask, col, alpha=0.4, show_border=True, border_thick=1):
    """Visualizes a single binary mask."""

    img = img.astype(np.float32)
    idx = np.nonzero(mask)

    img[idx[0], idx[1], :] *= 1.0 - alpha
    img[idx[0], idx[1], :] += alpha * col

    if show_border:
        contours, _ = cv2.findContours(
            mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
        cv2.drawContours(img, contours, -1, _WHITE, border_thick,
                         cv2.CV_AA if cv2.__version__.startswith('2') else
                         cv2.LINE_AA)

    return img.astype(np.uint8) 
Example #8
Source File: vis.py    From DetectAndTrack with Apache License 2.0 6 votes vote down vote up
def vis_class(img, pos, class_str, font_scale=0.35):
    """Visualizes the class."""
    x0, y0 = int(pos[0]), int(pos[1])
    # Compute text size.
    txt = class_str
    font = cv2.FONT_HERSHEY_SIMPLEX
    ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)
    # Place text background.
    back_tl = x0, y0 - int(1.3 * txt_h)
    back_br = x0 + txt_w, y0
    cv2.rectangle(img, back_tl, back_br, _GREEN, -1)
    # Show text.
    txt_tl = x0, y0 - int(0.3 * txt_h)
    cv2.putText(img, txt, txt_tl, font, font_scale, _GRAY,
                lineType=cv2.CV_AA if cv2.__version__.startswith('2') else
                cv2.LINE_AA)
    return img 
Example #9
Source File: sunrgbd_utils.py    From votenet with MIT License 6 votes vote down vote up
def draw_projected_box3d(image, qs, color=(255,255,255), thickness=2):
    ''' Draw 3d bounding box in image
        qs: (8,2) array of vertices for the 3d box in following order:
            1 -------- 0
           /|         /|
          2 -------- 3 .
          | |        | |
          . 5 -------- 4
          |/         |/
          6 -------- 7
    '''
    qs = qs.astype(np.int32)
    for k in range(0,4):
       #http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
       i,j=k,(k+1)%4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA) # use LINE_AA for opencv3

       i,j=k+4,(k+1)%4 + 4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)

       i,j=k,k+4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)
    return image 
Example #10
Source File: viz.py    From Object_Detection_Tracking with Apache License 2.0 6 votes vote down vote up
def draw_mask(im, mask, alpha=0.5, color=None, show_border=True,border_thick=1):
	"""
	Overlay a mask on top of the image.

	Args:
		im: a 3-channel uint8 image in BGR
		mask: a binary 1-channel image of the same size
		color: if None, will choose automatically
	"""
	if color is None:
		color = PALETTE_RGB[np.random.choice(len(PALETTE_RGB))][::-1]


	im = np.where(np.squeeze(np.repeat((mask > 0)[:, :, None], 3, axis=2)),
				  im * (1 - alpha) + color * alpha, im)
	if show_border:
		if cv2.__version__.startswith("2"):
			contours, _ = cv2.findContours(mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
		else: # cv 3
			_,contours, _ = cv2.findContours(mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
		cv2.drawContours(im, contours, -1, (255,255,255), border_thick, lineType=cv2.CV_AA)

	im = im.astype('uint8')
	return im 
Example #11
Source File: kitti_util.py    From Geo-CNN with Apache License 2.0 6 votes vote down vote up
def draw_projected_box3d(image, qs, color=(255,255,255), thickness=2):
    ''' Draw 3d bounding box in image
        qs: (8,3) array of vertices for the 3d box in following order:
            1 -------- 0
           /|         /|
          2 -------- 3 .
          | |        | |
          . 5 -------- 4
          |/         |/
          6 -------- 7
    '''
    qs = qs.astype(np.int32)
    for k in range(0,4):
       # Ref: http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
       i,j=k,(k+1)%4
       # use LINE_AA for opencv3
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)

       i,j=k+4,(k+1)%4 + 4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)

       i,j=k,k+4
       cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)
    return image 
Example #12
Source File: common.py    From ImageAnalysis with MIT License 5 votes vote down vote up
def draw_str(dst, pt, s):
    x = pt[0]
    y = pt[1]
    cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA)
    cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA) 
Example #13
Source File: deconvolution.py    From PyCV-time with MIT License 5 votes vote down vote up
def defocus_kernel(d, sz=65):
    kern = np.zeros((sz, sz), np.uint8)
    cv2.circle(kern, (sz, sz), d, 255, -1, cv2.CV_AA, shift=1)
    kern = np.float32(kern) / 255.0
    return kern 
Example #14
Source File: gaussian_mix.py    From PyCV-time with MIT License 5 votes vote down vote up
def draw_gaussain(img, mean, cov, color):
    x, y = np.int32(mean)
    w, u, vt = cv2.SVDecomp(cov)
    ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)
    s1, s2 = np.sqrt(w)*3.0
    cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.CV_AA) 
Example #15
Source File: video.py    From PyCV-time with MIT License 5 votes vote down vote up
def draw_quads(self, img, quads, color = (0, 255, 0)):
        img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
        img_quads.shape = quads.shape[:2] + (2,)
        for q in img_quads:
            cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.CV_AA, shift=2) 
Example #16
Source File: contours.py    From PyCV-time with MIT License 5 votes vote down vote up
def update(levels):
        vis = np.zeros((h, w, 3), np.uint8)
        levels = levels - 3
        cv2.drawContours( vis, contours, (-1, 3)[levels <= 0], (128,255,255),
            3, cv2.CV_AA, hierarchy, abs(levels) )
        cv2.imshow('contours', vis) 
Example #17
Source File: test.py    From yolo_tensorflow with MIT License 5 votes vote down vote up
def draw_result(self, img, result):
        for i in range(len(result)):
            x = int(result[i][1])
            y = int(result[i][2])
            w = int(result[i][3] / 2)
            h = int(result[i][4] / 2)
            cv2.rectangle(img, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
            cv2.rectangle(img, (x - w, y - h - 20),
                          (x + w, y - h), (125, 125, 125), -1)
            lineType = cv2.LINE_AA if cv2.__version__ > '3' else cv2.CV_AA
            cv2.putText(
                img, result[i][0] + ' : %.2f' % result[i][5],
                (x - w + 5, y - h - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (0, 0, 0), 1, lineType) 
Example #18
Source File: main_frame.py    From Rule-based_Expert_System with GNU General Public License v2.0 5 votes vote down vote up
def draw_lines(image, facts, contour_num):
    for i in range(contour_num):
        for fact in facts['Contour' + str(i)]:
            for line in fact.about:
                cv2.line(image, line.point1, line.point2, (0, 255, 0), 2, cv2.CV_AA) 
Example #19
Source File: tube_automask_monitor.py    From ethoscope with GNU General Public License v3.0 5 votes vote down vote up
def draw_rois(im, all_rois):
    for roi in all_rois:
        x,y = roi.offset
        y += roi.rectangle[3]/2
        x += roi.rectangle[2]/2
        cv2.putText(im, str(roi.idx), (x,y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,0))
        black_colour,roi_colour = (0, 0,0), (0, 255,0)
        cv2.drawContours(im,[roi.polygon],-1, black_colour, 3, cv2.CV_AA)
        cv2.drawContours(im,[roi.polygon],-1, roi_colour, 1, cv2.CV_AA) 
Example #20
Source File: automask_monitor.py    From ethoscope with GNU General Public License v3.0 5 votes vote down vote up
def draw_rois(im, all_rois):
    for roi in all_rois:
        x,y = roi.offset
        y += roi.rectangle[3]/2
        x += roi.rectangle[2]/2
        cv2.putText(im, str(roi.idx), (x,y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,0))
        black_colour,roi_colour = (0, 0,0), (0, 255,0)
        cv2.drawContours(im,[roi.polygon],-1, black_colour, 3, cv2.CV_AA)
        cv2.drawContours(im,[roi.polygon],-1, roi_colour, 1, cv2.CV_AA) 
Example #21
Source File: target_detector.py    From ethoscope with GNU General Public License v3.0 5 votes vote down vote up
def draw_rois(im, all_rois):
    for roi in all_rois:
        x,y = roi.offset
        y += roi.rectangle[3]/2
        x += roi.rectangle[2]/2
        cv2.putText(im, str(roi.idx), (x,y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,0))
        black_colour,roi_colour = (0, 0,0), (0, 255,0)
        cv2.drawContours(im,[roi.polygon],-1, black_colour, 3, cv2.CV_AA)
        cv2.drawContours(im,[roi.polygon],-1, roi_colour, 1, cv2.CV_AA) 
Example #22
Source File: kalman_1.py    From Python-Code with MIT License 5 votes vote down vote up
def drawCross(img, center, r, g, b):
    '''
    Draws a cross a the specified X,Y coordinates with color RGB
    '''
 
    d = 5
    t = 2
 
    color = (r, g, b)
 
    ctrx = center[0]
    ctry = center[1]
 
    cv2.line(img, (ctrx - d, ctry - d), (ctrx + d, ctry + d), color, t, cv2.CV_AA)
    cv2.line(img, (ctrx + d, ctry - d), (ctrx - d, ctry + d), color, t, cv2.CV_AA) 
Example #23
Source File: main.py    From YOLO-Object-Detection-Tensorflow with MIT License 5 votes vote down vote up
def draw_result(img, result):
    for i in range(len(result)):
        x = int(result[i][1])
        y = int(result[i][2])
        w = int(result[i][3] / 2)
        h = int(result[i][4] / 2)
        cv2.rectangle(img, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
        cv2.rectangle(img, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
        cv2.putText(img, result[i][0] + ' : %.2f' % result[i][5], (x - w + 5, y - h - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.CV_AA) 
Example #24
Source File: aircv.py    From Airtest with Apache License 2.0 5 votes vote down vote up
def mark_point(img, point, circle=False, color=100, radius=20):
    """ 调试用的: 标记一个点 """
    x, y = point
    # cv2.rectangle(img, (x, y), (x+10, y+10), 255, 1, lineType=cv2.CV_AA)
    if circle:
        cv2.circle(img, (x, y), radius, 255, thickness=2)
    cv2.line(img, (x - radius, y), (x + radius, y), color)  # x line
    cv2.line(img, (x, y - radius), (x, y + radius), color)  # y line
    return img 
Example #25
Source File: tc_common.py    From rpitelecine with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def display_shadow_text(img,x,y,text):
    """
    Displays with a grey shadow at point x,y
    """
    text_color = (255,255,255) #color as (B,G,R)
    text_shadow = (0,0,0)
    text_pos = (x,y)
    shadow_pos = (x+1,y+1)
    cv2.putText(img, text, shadow_pos, cv2.FONT_HERSHEY_PLAIN, 1.25, text_shadow, thickness=1, lineType=cv2.CV_AA)
    cv2.putText(img, text, text_pos, cv2.FONT_HERSHEY_PLAIN, 1.25, text_color, thickness=1, lineType=cv2.CV_AA)
    return img 
Example #26
Source File: hostage.py    From MADRL with MIT License 4 votes vote down vote up
def render(self, screen_size=800, rate=10):
        import cv2
        img = np.empty((screen_size, screen_size, 3), dtype=np.uint8)
        img[...] = 255

        # Stationary objects
        if not self.is_gate_open:
            color = (128, 128, 200)
            cv2.line(img, tuple(np.array([0 * screen_size, 0.5 * screen_size]).astype(int)),
                     tuple(np.array([1 * screen_size, 0.5 * screen_size]).astype(int)), color, 1,
                     lineType=cv2.CV_AA)

            keyx_2 = np.squeeze(self.key_loc)
            assert keyx_2.shape == (2,)
            kcolor = (0, 0, 255)
            cv2.circle(img, tuple((keyx_2 * screen_size).astype(int)),
                       int(self.radius * screen_size), kcolor, -1, lineType=cv2.CV_AA)

        for ibomb, bombx_2 in enumerate(self.bomb_loc):
            assert bombx_2.shape == (2,)
            color = (0, 0, 255)
            cv2.circle(img, tuple((bombx_2 * screen_size).astype(int)),
                       int(self.bomb_radius * screen_size), color, -1, lineType=cv2.CV_AA)

        for hostage in np.asarray(self._hostages)[~self.curr_host_saved_mask]:
            color = (255, 120, 0)
            cv2.circle(img, tuple((hostage.position * screen_size).astype(int)),
                       int(hostage._radius * screen_size), color, -1, lineType=cv2.CV_AA)

        for criminal in self._criminals:
            color = (50, 50, 200)
            cv2.circle(img, tuple((criminal.position * screen_size).astype(int)),
                       int(criminal._radius * screen_size), color, -1, lineType=cv2.CV_AA)

        for rescuer in self._rescuers:
            for k in range(rescuer._n_sensors):
                color = (0, 0, 0)
                cv2.line(img, tuple((rescuer.position * screen_size).astype(int)),
                         tuple(((rescuer.position + rescuer._sensor_range * rescuer.sensors[k]) *
                                screen_size).astype(int)), color, 1, lineType=cv2.CV_AA)
                cv2.circle(img, tuple((rescuer.position * screen_size).astype(int)),
                           int(rescuer._radius * screen_size), (255, 0, 0), -1, lineType=cv2.CV_AA)

        opacity = 0.4
        bg = np.ones((screen_size, screen_size, 3), dtype=np.uint8) * 255
        cv2.addWeighted(bg, opacity, img, 1 - opacity, 0, img)
        cv2.imshow('Hostage', img)
        cv2.waitKey(rate) 
Example #27
Source File: waterworld.py    From MADRL with MIT License 4 votes vote down vote up
def render(self, screen_size=800, rate=10, mode='human'):
        import cv2
        img = np.empty((screen_size, screen_size, 3), dtype=np.uint8)
        img[...] = 255
        # Obstacles
        for iobs, obstaclex_2 in enumerate(self.obstaclesx_No_2):
            assert obstaclex_2.shape == (2,)
            color = (128, 128, 0)
            cv2.circle(img,
                       tuple((obstaclex_2 * screen_size).astype(int)),
                       int(self.obstacle_radius * screen_size), color, -1, lineType=cv2.CV_AA)
        # Pursuers
        for pursuer in self._pursuers:
            for k in range(pursuer._n_sensors):
                color = (0, 0, 0)
                cv2.line(img,
                         tuple((pursuer.position * screen_size).astype(int)),
                         tuple(((pursuer.position + pursuer._sensor_range * pursuer.sensors[k]) *
                                screen_size).astype(int)), color, 1, lineType=cv2.CV_AA)
                cv2.circle(img,
                           tuple((pursuer.position * screen_size).astype(int)),
                           int(pursuer._radius * screen_size), (255, 0, 0), -1, lineType=cv2.CV_AA)
        # Evaders
        for evader in self._evaders:
            color = (0, 255, 0)
            cv2.circle(img,
                       tuple((evader.position * screen_size).astype(int)),
                       int(evader._radius * screen_size), color, -1, lineType=cv2.CV_AA)

        # Poison
        for poison in self._poisons:
            color = (0, 0, 255)
            cv2.circle(img,
                       tuple((poison.position * screen_size).astype(int)),
                       int(poison._radius * screen_size), color, -1, lineType=cv2.CV_AA)

        opacity = 0.4
        bg = np.ones((screen_size, screen_size, 3), dtype=np.uint8) * 255
        cv2.addWeighted(bg, opacity, img, 1 - opacity, 0, img)
        cv2.imshow('Waterworld', img)
        cv2.waitKey(rate)
        return np.asarray(img)[..., ::-1] 
Example #28
Source File: object-tracking.py    From rpi-opencv with GNU General Public License v3.0 4 votes vote down vote up
def run_main():
    cap = cv2.VideoCapture('crash-480.mp4')
    #cap.set(3,320)
    #cap.set(4,240)

    # Read the first frame of the video
    ret, frame = cap.read()

    # Set the ROI (Region of Interest). Actually, this is a
    # rectangle of the building that we're tracking
    c,r,w,h = 427,240,50,50
    track_window = (c,r,w,h)

    # Create mask and normalized histogram
    roi = frame[r:r+h, c:c+w]
    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, np.array((0., 30.,32.)), np.array((180.,255.,255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
    
    while True:
        t = cv2.getTickCount()
        ret, frame = cap.read()

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)

        ret, track_window = cv2.meanShift(dst, track_window, term_crit)

        x,y,w,h = track_window
        cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2)
        cv2.putText(frame, 'Tracked', (x-25,y-10), cv2.FONT_HERSHEY_SIMPLEX,
            .5, (255,255,255), 1, cv2.CV_AA)
        
        t = cv2.getTickCount() - t
        print "detection time = %gms" % (t/(cv2.getTickFrequency()*1000.))
        cv2.imshow('Tracking', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows() 
Example #29
Source File: handpose_evaluation.py    From deep-prior-pp with GNU General Public License v3.0 4 votes vote down vote up
def plotJoints(self, ax, joint, color='nice', jcolor=None, annoscale=1):
        """
        Plot connected joints
        :param ax: axis to plot on
        :param joint: joints to connect
        :param color: line color
        """

        if joint.shape[0] >= numpy.max(self.jointConnections):
            for i in range(len(self.jointConnections)):
                if isinstance(ax, numpy.ndarray):
                    if color == 'nice':
                        lc = tuple((self.jointConnectionColors[i]*255.).astype(int))
                    elif color == 'gray':
                        lc = tuple((rgb_to_gray(self.jointConnectionColors[i])*255.).astype(int))
                    else:
                        lc = color
                    cv2.line(ax, (int(numpy.rint(joint[self.jointConnections[i][0], 0])),
                                  int(numpy.rint(joint[self.jointConnections[i][0], 1]))),
                             (int(numpy.rint(joint[self.jointConnections[i][1], 0])),
                              int(numpy.rint(joint[self.jointConnections[i][1], 1]))),
                             lc, thickness=3*annoscale, lineType=cv2.CV_AA)
                else:
                    if color == 'nice':
                        lc = self.jointConnectionColors[i]
                    elif color == 'gray':
                        lc = rgb_to_gray(self.jointConnectionColors[i])
                    else:
                        lc = color
                    ax.plot(numpy.hstack((joint[self.jointConnections[i][0], 0], joint[self.jointConnections[i][1], 0])),
                            numpy.hstack((joint[self.jointConnections[i][0], 1], joint[self.jointConnections[i][1], 1])),
                            c=lc, linewidth=3.0*annoscale)
        for i in range(joint.shape[0]):
            if isinstance(ax, numpy.ndarray):
                if jcolor == 'nice':
                    jc = tuple((self.jointColors[i]*255.).astype(int))
                elif jcolor == 'gray':
                    jc = tuple((rgb_to_gray(self.jointColors[i])*255.).astype(int))
                else:
                    jc = jcolor
                cv2.circle(ax, (int(numpy.rint(joint[i, 0])), int(numpy.rint(joint[i, 1]))), 6*annoscale,
                           jc, thickness=-1, lineType=cv2.CV_AA)
            else:
                if jcolor == 'nice':
                    jc = self.jointColors[i]
                elif jcolor == 'gray':
                    jc = rgb_to_gray(self.jointColors[i])
                else:
                    jc = jcolor

                ax.scatter(joint[i, 0], joint[i, 1], marker='o', s=100,
                           c=jc) 
Example #30
Source File: viz.py    From Object_Detection_Tracking with Apache License 2.0 4 votes vote down vote up
def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
	"""Visualizes keypoints (adapted from vis_one_image).
	kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
	"""
	dataset_keypoints, _ = get_keypoints()
	kp_lines = kp_connections(dataset_keypoints)

	# Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
	cmap = plt.get_cmap('rainbow')
	colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
	colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]

	# Perform the drawing on a copy of the image, to allow for blending.
	kp_mask = np.copy(img)

	# Draw mid shoulder / mid hip first for better visualization.
	mid_shoulder = (
		kps[:2, dataset_keypoints.index('right_shoulder')] +
		kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
	sc_mid_shoulder = np.minimum(
		kps[2, dataset_keypoints.index('right_shoulder')],
		kps[2, dataset_keypoints.index('left_shoulder')])
	mid_hip = (
		kps[:2, dataset_keypoints.index('right_hip')] +
		kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
	sc_mid_hip = np.minimum(
		kps[2, dataset_keypoints.index('right_hip')],
		kps[2, dataset_keypoints.index('left_hip')])
	nose_idx = dataset_keypoints.index('nose')
	if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:

		cv2.line(
			kp_mask, int_it(tuple(mid_shoulder)), int_it(tuple(kps[:2, nose_idx])),
			color=colors[len(kp_lines)], thickness=2, lineType=cv2.CV_AA)
	if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
		cv2.line(
			kp_mask, int_it(tuple(mid_shoulder)), int_it(tuple(mid_hip)),
			color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.CV_AA)

	# Draw the keypoints.
	for l in range(len(kp_lines)):
		i1 = kp_lines[l][0]
		i2 = kp_lines[l][1]
		p1 = int(kps[0, i1]), int(kps[1, i1])
		p2 = int(kps[0, i2]), int(kps[1, i2])
		if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:

			cv2.line(
				kp_mask, p1, p2,
				color=colors[l], thickness=2, lineType=cv2.CV_AA)
		if kps[2, i1] > kp_thresh:
			cv2.circle(
				kp_mask, p1,
				radius=3, color=colors[l], thickness=-1, lineType=cv2.CV_AA)
		if kps[2, i2] > kp_thresh:
			cv2.circle(
				kp_mask, p2,
				radius=3, color=colors[l], thickness=-1, lineType=cv2.CV_AA)

	# Blend the keypoints.
	return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)