Python cv2.drawMarker() Examples

The following are 6 code examples of cv2.drawMarker(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: Fic.py    From RENAT with Apache License 2.0 7 votes vote down vote up
def click_on(self,point,element=u'//canvas',mark_screen=False):
        """ Click on a screen coordinate of an element

        Default element is `//cannvas`
        """
        x,y = point
        el = self._selenium.get_webelement(element)
        action = ActionChains(self._selenium.driver)
        action.move_to_element_with_offset(el,0,0).perform()
        action.move_to_element_with_offset(el,x,y).perform()
        if mark_screen:
            BuiltIn().log("Marked to screen on (%d,%d)" % (x,y))
            img_file = self.capture_screenshot(extra='_click')
            img = cv2.imread(Common.get_result_path() + '/' + img_file)
            cv2.drawMarker(img, (int(x),int(y)), color=(0,255,0), markerType=cv2.MARKER_CROSS, thickness=2)
            cv2.imwrite(Common.get_result_path() + '/' + img_file,img)
        action.click().perform()
        self.wait_until_loaded()
        BuiltIn().log("Clicked on element %s at (%d,%d)" % (element,x,y)) 
Example #2
Source File: iva.py    From ActionAI with GNU General Public License v3.0 5 votes vote down vote up
def annotate(self, image):
        x1, y1, x2, y2 = self.bbox
        image = cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255), 3)
        image = cv2.putText(image, self.activity, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
        image = cv2.drawMarker(image, self.centroid, (255, 0, 0), 0, 30, 4)
        return image 
Example #3
Source File: utils.py    From ActionAI with GNU General Public License v3.0 5 votes vote down vote up
def annotate(self, tracker, image, boxes):
        '''
        Used to return image with
        person instances designated 
        by the bounding box and a 
        marker at the centroid.
        Annotated with tracker id
        and activity label
        '''
        for row in topology:
            try:
                a_idx, b_idx = row[2:]
                a_part, b_part = cfg.body_dict[int(a_idx.data.cpu().numpy())], cfg.body_dict[int(b_idx.data.cpu().numpy())]
                a_coord, b_coord = tracker.pose_dict[a_part], tracker.pose_dict[b_part]
                cv2.line(image, a_coord, b_coord, tracker.skeleton_color, 2)
            except KeyError:
                pass

        if boxes:
            try:
                x1, y1, x2, y2 = tracker.bbox
                image = cv2.rectangle(image, (x1 - self.offset, y1 - self.offset), 
                                             (x2 + self.offset, y2 + self.offset), 
                                             self.box_color, 2) 
                image = cv2.drawMarker(image, tracker.centroid, self.centroid_color, 0, 30, self.thickness) 
                cv2.putText(image, tracker.id, (x1 - self.offset, y1 - self.offset), \
                                   cv2.FONT_HERSHEY_SIMPLEX, self.fontScale, self.text_color, self.thickness) 
                cv2.putText(image, str(tracker.activity), (x1 - self.offset, y1 - self.offest), \
                                   cv2.FONT_HERSHEY_SIMPLEX, self.fontScale, self.text_color, self.thickness) 
            except:
                pass

        return image 
Example #4
Source File: Fic.py    From RENAT with Apache License 2.0 5 votes vote down vote up
def move_to(self,x=u'0',y=u'0',delay=u'1s',element=u'//canvas',mark_screen=False):
        """ Moves the pointer to screen coodinate of the element

        Default element is `canvas`
        """
        action = ActionChains(self._selenium.driver)
        action.move_to_element_with_offset(self._selenium.get_webelement(element), 0, 0).perform()
        time.sleep(5)
        action.move_to_element_with_offset(self._selenium.get_webelement(element), int(x),int(y)).perform()
        time.sleep(DateTime.convert_time(delay))
        if mark_screen:
            BuiltIn().log("Marked to screen on (%d,%d)" % (x,y))
            img_file = self.capture_screenshot(extra='_move')
            img = cv2.imread(Common.get_result_path() + '/' + img_file)
            cv2.drawMarker(img, (int(x),int(y)), color=(0,255,0), markerType=cv2.MARKER_CROSS, thickness=2)
            cv2.imwrite(Common.get_result_path() + '/' + img_file,img)
        BuiltIn().log('Moved the pointer to (%d,%d)' % (x,y)) 
Example #5
Source File: extract_pose.py    From HumanRecognition with MIT License 5 votes vote down vote up
def draw_marker(img, position):
    cv2.drawMarker(img, (position[1], position[0]), (0,255,0)) 
Example #6
Source File: mtcnn_frame_set_select_extract_plugin.py    From deepstar with BSD 3-Clause Clear License 4 votes vote down vote up
def _extract_faces(self, frame_set_path, frame_id, transform_set_path,
                       transform_set_id, detector, offset_percent,
                       min_confidence, debug_):
        """
        This method extracts faces from a frame.

        :param str frame_set_path:The frame set path.
        :param int frame_id: The frame ID.
        :param str transform_set_path: The transform set path.
        :param transform_set_id: The transform set ID.
        :param MTCNN detector: The detector to use to detect faces.
        :param float offset_percent:
        :param float min_confidence: The minimum confidence value required to
            accept/reject a detected face.
        :param bool debug_: True if should place markers on landmarks else
            False if should not.
        :rtype: None
        """

        frame_path = FrameFile.path(frame_set_path, frame_id, 'jpg')
        img = cv2.imread(frame_path)
        img_height, img_width = img.shape[:2]

        results = detector.detect_faces(img)
        for r in results:
            if r['confidence'] < min_confidence:
                continue

            x, y, width, height = r['box']

            adjusted_x = int(max(0, x - (0.5 * width * offset_percent)))
            adjusted_y = int(max(0, y - (0.5 * height * offset_percent)))
            t = x + width + (0.5 * width * offset_percent)
            adjusted_right_x = int(min(img_width, t))
            t = y + height + (0.5 * height * offset_percent)
            adjusted_bottom_y = int(min(img_height, t))

            metadata = {'face': {k: [v[0] - adjusted_x, v[1] - adjusted_y]
                                 for k, v in r['keypoints'].items()}}

            transform_id = TransformModel().insert(transform_set_id, frame_id,
                                                   json.dumps(metadata), 0)

            face_crop = img[adjusted_y:adjusted_bottom_y,
                            adjusted_x:adjusted_right_x]
            output_path = TransformFile.path(transform_set_path, transform_id,
                                             'jpg')

            if debug_ is True:
                for _, v in metadata['face'].items():
                    cv2.drawMarker(face_crop, tuple(v), (0, 0, 255),
                                   markerType=cv2.MARKER_DIAMOND,
                                   markerSize=15, thickness=2)

            cv2.imwrite(output_path, face_crop,
                        [cv2.IMWRITE_JPEG_QUALITY, 100])

            debug(f'Transform with ID {transform_id:08d} at {output_path} '
                  f'extracted from frame with ID {frame_id:08d} at '
                  f'{frame_path}', 4)