Python cv2.FONT_HERSHEY_SIMPLEX Examples

The following are 30 code examples for showing how to use cv2.FONT_HERSHEY_SIMPLEX(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module cv2 , or try the search function .

Example 1
Project: Tensorflow-YOLOv3   Author: kcosta42   File: utils.py    License: MIT License 9 votes vote down vote up
def draw_boxes_frame(frame, frame_size, boxes_dicts, class_names, input_size):
  """Draws detected boxes in a video frame"""
  boxes_dict = boxes_dicts[0]
  resize_factor = (frame_size[0] / input_size[1], frame_size[1] / input_size[0])
  for cls in range(len(class_names)):
    boxes = boxes_dict[cls]
    color = (0, 0, 255)
    if np.size(boxes) != 0:
      for box in boxes:
        xy = box[:4]
        xy = [int(xy[i] * resize_factor[i % 2]) for i in range(4)]
        cv2.rectangle(frame, (xy[0], xy[1]), (xy[2], xy[3]), color[::-1], 2)
        (test_width, text_height), baseline = cv2.getTextSize(class_names[cls],
                                                              cv2.FONT_HERSHEY_SIMPLEX,
                                                              0.75, 1)
        cv2.rectangle(frame,
                      (xy[0], xy[1]),
                      (xy[0] + test_width, xy[1] - text_height - baseline),
                      color[::-1],
                      thickness=cv2.FILLED)
        cv2.putText(frame, class_names[cls], (xy[0], xy[1] - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 1) 
Example 2
Project: Parsing-R-CNN   Author: soeaver   File: vis.py    License: MIT License 7 votes vote down vote up
def vis_class(img, pos, class_str, bg_color):
    """Visualizes the class."""
    font_color = cfg.VIS.SHOW_CLASS.COLOR
    font_scale = cfg.VIS.SHOW_CLASS.FONT_SCALE

    x0, y0 = int(pos[0]), int(pos[1])
    # Compute text size.
    txt = class_str
    font = cv2.FONT_HERSHEY_SIMPLEX
    ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)
    # Place text background.
    back_tl = x0, y0 - int(1.3 * txt_h)
    back_br = x0 + txt_w, y0
    cv2.rectangle(img, back_tl, back_br, bg_color, -1)
    # Show text.
    txt_tl = x0, y0 - int(0.3 * txt_h)
    cv2.putText(img, txt, txt_tl, font, font_scale, font_color, lineType=cv2.LINE_AA)

    return img 
Example 3
Project: OpenCV-Computer-Vision-Projects-with-Python   Author: PacktPublishing   File: chapter2.py    License: MIT License 6 votes vote down vote up
def ProcessFrame(self, frame):
        # segment arm region
        segment = self.SegmentArm(frame)

        # make a copy of the segmented image to draw on
        draw = cv2.cvtColor(segment, cv2.COLOR_GRAY2RGB)

        # draw some helpers for correctly placing hand
        cv2.circle(draw,(self.imgWidth/2,self.imgHeight/2),3,[255,102,0],2)       
        cv2.rectangle(draw, (self.imgWidth/3,self.imgHeight/3), (self.imgWidth*2/3, self.imgHeight*2/3), [255,102,0],2)

        # find the hull of the segmented area, and based on that find the
        # convexity defects
        [contours,defects] = self.FindHullDefects(segment)

        # detect the number of fingers depending on the contours and convexity defects
        # draw defects that belong to fingers green, others red
        [nofingers,draw] = self.DetectNumberFingers(contours, defects, draw)

        # print number of fingers on image
        cv2.putText(draw, str(nofingers), (30,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))
        return draw 
Example 4
Project: ICDAR-2019-SROIE   Author: zzzDavid   File: main.py    License: MIT License 6 votes vote down vote up
def draw():
    filenames = [os.path.splitext(f)[0] for f in glob.glob("for_task3/*.txt")]
    txt_files = [s + ".txt" for s in filenames]
    for txt in txt_files:
        image = cv2.imread('test_original/'+ txt.split('/')[1].split('.')[0]+'.jpg', cv2.IMREAD_COLOR)
        with open(txt, 'r') as txt_file:
            for line in csv.reader(txt_file):
                box = [int(string, 10) for string in line[0:8]]
                if len(line) < 9:
                    print(txt)
                cv2.rectangle(image, (box[0], box[1]), (box[4], box[5]), (0,255,0), 2)
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(image, line[8].upper(), (box[0],box[1]), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
        cv2.imwrite('task2_result_draw/'+ txt.split('/')[1].split('.')[0]+'.jpg', image) 
Example 5
Project: exposure   Author: yuanming-hu   File: net.py    License: MIT License 6 votes vote down vote up
def draw_value_reward_score(self, img, value, reward, score):
    img = img.copy()
    # Average with 0.5 for semi-transparent background
    img[:14] = img[:14] * 0.5 + 0.25
    img[50:] = img[50:] * 0.5 + 0.25
    if self.cfg.gan == 'ls':
      red = -np.tanh(float(score) / 1) * 0.5 + 0.5
    else:
      red = -np.tanh(float(score) / 10.0) * 0.5 + 0.5
    top = '%+.2f %+.2f' % (value, reward)
    cv2.putText(img, top, (3, 7), cv2.FONT_HERSHEY_SIMPLEX, 0.25,
                (1.0, 1.0 - red, 1.0 - red))
    score = '%+.3f' % score
    cv2.putText(img, score, (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                (1.0, 1.0 - red, 1.0 - red))
    return img 
Example 6
Project: exposure   Author: yuanming-hu   File: util.py    License: MIT License 6 votes vote down vote up
def vis_images_and_indexs(images, features, dir, name):
  # indexs = np.reshape(indexs, (len(indexs),))
  # print('visualizing images and indexs: ', images.shape, indexs.shape)
  id_imgs = []
  for feature in features:
    img = np.ones((64, 64, 3))
    cv2.putText(img,
                str(feature), (4, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.25,
                (1.0, 0.0, 0.0))
    id_imgs.append(img)
  id_imgs = np.stack(id_imgs, axis=0)
  # print('id imgs: ', id_imgs.shape)

  vis_imgs = np.vstack([images, id_imgs])
  image = make_image_grid(vis_imgs, per_row=images.shape[0])
  vis_dir = dir
  try:
    os.mkdir(vis_dir)
  except:
    pass
  cv2.imwrite(os.path.join(vis_dir, name + '.png'), image[:, :, ::-1] * 255.0) 
Example 7
Project: Res2Net-maskrcnn   Author: Res2Net   File: predictor.py    License: MIT License 6 votes vote down vote up
def overlay_class_names(self, image, predictions):
        """
        Adds detected class names and scores in the positions defined by the
        top-left corner of the predicted bounding box

        Arguments:
            image (np.ndarray): an image as returned by OpenCV
            predictions (BoxList): the result of the computation by the model.
                It should contain the field `scores` and `labels`.
        """
        scores = predictions.get_field("scores").tolist()
        labels = predictions.get_field("labels").tolist()
        labels = [self.CATEGORIES[i] for i in labels]
        boxes = predictions.bbox

        template = "{}: {:.2f}"
        for box, score, label in zip(boxes, scores, labels):
            x, y = box[:2]
            s = template.format(label, score)
            cv2.putText(
                image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1
            )

        return image 
Example 8
Project: Advanced_Lane_Lines   Author: ChengZhongShen   File: image_process.py    License: MIT License 6 votes vote down vote up
def test_yellow_white_thresh_images(src, dst, y_low=(10,50,0), y_high=(30,255,255), w_low=(180,180,180), w_high=(255,255,255)):
	"""
	apply the thresh to images in a src folder and output to dst foler
	"""
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_threshed = yellow_white_thresh(img, y_low, y_high, w_low, w_high)
		
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		# convert  binary to RGB, *255, to visiual, 1 will not visual after write to file
		image_threshed = cv2.cvtColor(image_threshed*255, cv2.COLOR_GRAY2RGB)
		
		# HSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
		# V = HSV[:,:,2]
		# brightness = np.mean(V)
		# info_str = "brightness is: {}".format(int(brightness))
		# cv2.putText(image_threshed, info_str, (50,700), cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,255),2)
		
		cv2.imwrite(out_image, image_threshed) 
Example 9
Project: R2CNN.pytorch   Author: Xiangyu-CAS   File: predictor.py    License: MIT License 6 votes vote down vote up
def overlay_class_names(self, image, predictions):
        """
        Adds detected class names and scores in the positions defined by the
        top-left corner of the predicted bounding box

        Arguments:
            image (np.ndarray): an image as returned by OpenCV
            predictions (BoxList): the result of the computation by the model.
                It should contain the field `scores` and `labels`.
        """
        scores = predictions.get_field("scores").tolist()
        labels = predictions.get_field("labels").tolist()
        labels = [self.CATEGORIES[i] for i in labels]
        boxes = predictions.bbox

        template = "{}: {:.2f}"
        for box, score, label in zip(boxes, scores, labels):
            x, y = box[:2]
            s = template.format(label, score)
            cv2.putText(
                image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1
            )

        return image 
Example 10
Project: R2CNN.pytorch   Author: Xiangyu-CAS   File: inference_engine.py    License: MIT License 6 votes vote down vote up
def overlay_class_names(self, image, predictions):
        """
        Adds detected class names and scores in the positions defined by the
        top-left corner of the predicted bounding box

        Arguments:
            image (np.ndarray): an image as returned by OpenCV
            predictions (BoxList): the result of the computation by the model.
                It should contain the field `scores` and `labels`.
        """
        scores = predictions.get_field("scores").tolist()
        labels = predictions.get_field("labels").tolist()
        labels = [self.CATEGORIES[i] for i in labels]
        boxes = predictions.bbox

        template = "{}: {:.2f}"
        for box, score, label in zip(boxes, scores, labels):
            x, y = box[:2]
            s = template.format(label, score)
            cv2.putText(
                image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1
            )

        return image 
Example 11
Project: TFFRCNN   Author: CharlesShang   File: kitti2pascalvoc.py    License: MIT License 6 votes vote down vote up
def _draw_on_image(img, objs, class_sets_dict):
    colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
              (151, 0, 255), (243, 223, 48), (0, 117, 255),\
              (58, 184, 14), (86, 67, 140), (121, 82, 6),\
              (174, 29, 128), (115, 154, 81), (86, 255, 234)]
    font = cv2.FONT_HERSHEY_SIMPLEX
    for ind, obj in enumerate(objs):
        if obj['box'] is None: continue
        x1, y1, x2, y2 = obj['box'].astype(int)
        cls_id = class_sets_dict[obj['class']]
        if obj['class'] == 'dontcare':
            cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 1)
            continue
        cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), colors[cls_id % len(colors)], 1)
        text = '{:s}*|'.format(obj['class'][:3]) if obj['difficult'] == 1 else '{:s}|'.format(obj['class'][:3])
        text += '{:.1f}|'.format(obj['truncation'])
        text += str(obj['occlusion'])
        cv2.putText(img, text, (x1-2, y2-2), font, 0.5, (255, 0, 255), 1)
    return img 
Example 12
Project: TFFRCNN   Author: CharlesShang   File: train.py    License: MIT License 6 votes vote down vote up
def _draw_boxes_to_image(im, res):
    colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
              (151, 0, 255), (243, 223, 48), (0, 117, 255),\
              (58, 184, 14), (86, 67, 140), (121, 82, 6),\
              (174, 29, 128), (115, 154, 81), (86, 255, 234)]
    font = cv2.FONT_HERSHEY_SIMPLEX
    image = np.copy(im)
    cnt = 0
    for ind, r in enumerate(res):
        if r['dets'] is None: continue
        dets = r['dets']
        for i in range(0, dets.shape[0]):
            (x1, y1, x2, y2, score) = dets[i, :]
            cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), colors[ind % len(colors)], 2)
            text = '{:s} {:.2f}'.format(r['class'], score)
            cv2.putText(image, text, (x1, y1), font, 0.6, colors[ind % len(colors)], 1)
            cnt = (cnt + 1)
    return image 
Example 13
Project: buzzard   Author: airware   File: examples.py    License: Apache License 2.0 6 votes vote down vote up
def create_text_mask(text, font_face=cv2.FONT_HERSHEY_SIMPLEX, font_scale=2, thickness=2):
    """Build a binary image with text drawn in it"""
    color = [1]

    (w, h), _ = cv2.getTextSize(
        text, fontFace=font_face, fontScale=font_scale, thickness=thickness
    )
    border = 30
    dst = np.zeros((h + border, w + border), dtype='uint8')
    cv2.putText(
        dst, text=text, org=(border // 2, h + border // 2),
        fontFace=font_face, fontScale=font_scale,
        thickness=thickness, color=color
    )

    ymask = dst.any(1).cumsum()
    ymask = (ymask != 0) & (ymask != ymask[-1])
    xmask = dst.any(0).cumsum()
    xmask = (xmask != 0) & (xmask != xmask[-1])
    dst = dst[ymask][:, xmask]

    return dst.astype(bool) 
Example 14
Project: TripletLossFace   Author: aangfanboy   File: face_recognition_tester.py    License: MIT License 6 votes vote down vote up
def show_who_in_image(self, path, get_face: bool = True, show: bool = True, turn_rgb: bool = True):
		min_im, image, all_frames = self.detect_which(path, get_face)

		for (confidance, who), frame in zip(min_im, all_frames):
			color = self.colors[who]
			x1, x2, y1, y2 = frame
			cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
			cv2.putText(image, f"{who}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)}

		if turn_rgb:
			image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

		if show:
			cv2.imshow("a", image)
			cv2.waitKey(0)

		return image 
Example 15
Project: TripletLossFace   Author: aangfanboy   File: main_engine.py    License: MIT License 6 votes vote down vote up
def show_who_in_image(self, path, get_face: bool = True, show: bool = True, turn_rgb: bool = True):
		min_im, image, all_frames = self.index_image(path, get_face)

		for (confidance, who), frame in zip(min_im, all_frames):
			try:
				color = self.colors[str(who)]
				x1, x2, y1, y2 = frame
				cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
				cv2.putText(image, f"id: {str(who)}- conf:{abs(round(float(confidance), 2))}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)}
			except KeyError:
				continue

		if turn_rgb:
			image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

		if show:
			cv2.imshow("a", image)
			cv2.waitKey(1)

		return image, min_im, all_frames 
Example 16
Project: OpenCV-Python-Tutorial   Author: makelove   File: QR_Scaner1.py    License: MIT License 6 votes vote down vote up
def main():
    fp = 'macbookPro.jpg'
    # image = Image.open(fp)
    # image.show()
    image = cv2.imread(fp)
    barcodes = decode(image)
    decoded = barcodes[0]
    print(decoded)
    #
    url: bytes = decoded.data
    url = url.decode()
    print(url)
    # rect
    rect = decoded.rect
    print(rect)  # Rect(left=19, top=19, width=292, height=292)

    # loop over the detected barcodes
    for barcode in barcodes:
        # extract the bounding box location of the barcode and draw the
        # bounding box surrounding the barcode on the image
        (x, y, w, h) = barcode.rect
        cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)

        # the barcode data is a bytes object so if we want to draw it on
        # our output image we need to convert it to a string first
        barcodeData = barcode.data.decode("utf-8")
        barcodeType = barcode.type

        # draw the barcode data and barcode type on the image
        text = "{} ({})".format(barcodeData, barcodeType)
        cv2.putText(image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
                    0.5, (0, 0, 255), 2)

        # print the barcode type and data to the terminal
        print("[INFO] Found {} barcode: {}".format(barcodeType, barcodeData))

    # show the output image
    cv2.imshow("Image", image)
    # cv2.imwrite('macbook_qr_rect.jpg', image)
    cv2.waitKey(0)  # 按任意键退出 
Example 17
Project: centerpose   Author: tensorboy   File: debugger.py    License: MIT License 6 votes vote down vote up
def add_coco_bbox(self, bbox, cat, conf=1, show_txt=True, img_id='default'): 
        bbox = np.array(bbox, dtype=np.int32)
        # cat = (int(cat) + 1) % 80
        cat = int(cat)
        # print('cat', cat, self.names[cat])
        c = self.colors[cat][0][0].tolist()
        if self.theme == 'white':
            c = (255 - np.array(c)).tolist()
        txt = '{}{:.1f}'.format(self.names[cat], conf)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
        cv2.rectangle(
          self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), c, 2)
        if show_txt:
            cv2.rectangle(self.imgs[img_id],
                        (bbox[0], bbox[1] - cat_size[1] - 2),
                        (bbox[0] + cat_size[0], bbox[1] - 2), c, -1)
            cv2.putText(self.imgs[img_id], txt, (bbox[0], bbox[1] - 2), 
                      font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA) 
Example 18
Project: generative_adversary   Author: ermongroup   File: utils.py    License: GNU General Public License v3.0 6 votes vote down vote up
def label_images(images, labels):
    font = cv.FONT_HERSHEY_SIMPLEX
    new_imgs = []
    for i, img in enumerate(images):
        new_img = ((img.copy() + 1.) * 127.5).astype(np.uint8)
        if new_img.shape[-1] == 3:
            new_img = new_img[..., ::-1]
            new_img = cv.resize(new_img, (100, 100), interpolation=cv.INTER_LINEAR)
            new_img = cv.putText(new_img, str(labels[i]), (10, 30), font, 1, (255, 255, 255), 2, cv.LINE_AA)
            new_img = cv.copyMakeBorder(new_img, top=2, bottom=2, left=2, right=2, borderType=cv.BORDER_CONSTANT,
                                        value=(255, 255, 255))
        else:
            new_img = np.squeeze(new_img)
            new_img = cv.resize(new_img, (100, 100), interpolation=cv.INTER_LINEAR)
            new_img = cv.putText(new_img, str(labels[i]), (10, 30), font, 1, (255), 2, cv.LINE_AA)
            new_img = new_img[..., None]

        new_img = (new_img / 127.5 - 1.0).astype(np.float32)
        new_imgs.append(new_img[..., ::-1])
    return np.stack(new_imgs, axis=0) 
Example 19
Project: tf_ctpn   Author: Sanster   File: demo.py    License: MIT License 6 votes vote down vote up
def draw_rpn_boxes(img, img_name, boxes, scores, im_scale, nms, save_dir):
    """
    :param boxes: [(x1, y1, x2, y2)]
    """
    boxes = recover_scale(boxes, im_scale)

    base_name = img_name.split('/')[-1]
    color = (0, 255, 0)
    out = img.copy()

    if nms:
        boxes, scores = TextDetector.pre_process(boxes, scores)
        file_name = "%s_rpn_nms.jpg" % base_name
    else:
        file_name = "%s_rpn.jpg" % base_name

    for i, box in enumerate(boxes):
        cv2.rectangle(out, (box[0], box[1]), (box[2], box[3]), color, 2)
        cx = int((box[0] + box[2]) / 2)
        cy = int((box[1] + box[3]) / 2)
        cv2.putText(out, "%.01f" % scores[i], (cx, cy), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (255, 0, 0))

    cv2.imwrite(os.path.join(save_dir, file_name), out) 
Example 20
Project: RetinaNet   Author: xmyqsh   File: train.py    License: MIT License 6 votes vote down vote up
def _draw_boxes_to_image(im, res):
    colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
              (151, 0, 255), (243, 223, 48), (0, 117, 255),\
              (58, 184, 14), (86, 67, 140), (121, 82, 6),\
              (174, 29, 128), (115, 154, 81), (86, 255, 234)]
    font = cv2.FONT_HERSHEY_SIMPLEX
    image = np.copy(im)
    cnt = 0
    for ind, r in enumerate(res):
        if r['dets'] is None: continue
        dets = r['dets']
        for i in range(0, dets.shape[0]):
            (x1, y1, x2, y2, score) = dets[i, :]
            cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), colors[ind % len(colors)], 2)
            text = '{:s} {:.2f}'.format(r['class'], score)
            cv2.putText(image, text, (x1, y1), font, 0.6, colors[ind % len(colors)], 1)
            cnt = (cnt + 1)
    return image 
Example 21
Project: EdgeRealtimeVideoAnalytics   Author: RedisGears   File: server.py    License: Apache License 2.0 5 votes vote down vote up
def get_last(self):
        ''' Gets latest from camera and model '''
        p = self.conn.pipeline()
        p.xrevrange(self.camera, count=1)  # Latest frame
        p.xrevrange(self.boxes, count=1)   # Latest boxes
        cmsg, bmsg = p.execute()
        if cmsg:
            last_id = cmsg[0][0].decode('utf-8')
            label = f'{self.camera}:{last_id}'
            data = io.BytesIO(cmsg[0][1][self.field])
            img = Image.open(data)
            if bmsg:
                boxes = np.fromstring(bmsg[0][1]['boxes'.encode('utf-8')][1:-1], sep=',')
                label += ' people: {}'.format(bmsg[0][1]['people'.encode('utf-8')].decode('utf-8'))
                for box in range(int(bmsg[0][1]['people'.encode('utf-8')])):  # Draw boxes
                    x1 = boxes[box*4]
                    y1 = boxes[box*4+1]
                    x2 = boxes[box*4+2]
                    y2 = boxes[box*4+3]
                    draw = ImageDraw.Draw(img)
                    draw.rectangle(((x1, y1), (x2, y2)), width=5, outline='red')
            arr = np.array(img)
            arr = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)
            cv2.putText(arr, label, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 1, cv2.LINE_AA)
            ret, img = cv2.imencode('.jpg', arr)
            return img.tobytes()
        else:
            # TODO: put an 'we're experiencing technical difficlties' image
            pass 
Example 22
Project: python--   Author: Leezhen2014   File: BlurDetection.py    License: GNU General Public License v3.0 5 votes vote down vote up
def _drawImgFonts(self, img, strContent):
        """
        绘制图像
        :param img: cv下的图片对象
        :param strContent: 书写的图片内容
        :return:
        """
        font = cv2.FONT_HERSHEY_SIMPLEX
        fontSize = 5
        # 照片 添加的文字    /左上角坐标   字体   字体大小   颜色        字体粗细
        cv2.putText(img, strContent, (0, 200), font, fontSize, (0, 255, 0), 6)

        return img 
Example 23
Project: RPiNDVI   Author: robintw   File: ndvi.py    License: MIT License 5 votes vote down vote up
def label(image, text):
    """
    Labels the given image with the given text
    """
    return cv2.putText(image, text, (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 255) 
Example 24
Project: HorizonNet   Author: sunset1995   File: panostretch.py    License: MIT License 5 votes vote down vote up
def visualize_pano_stretch(stretched_img, stretched_cor, title):
    '''
    Helper function for visualizing the effect of pano_stretch
    '''
    thikness = 2
    color = (0, 255, 0)
    for i in range(4):
        xys = pano_connect_points(stretched_cor[i*2], stretched_cor[(i*2+2) % 8], z=-50)
        xys = xys.astype(int)
        blue_split = np.where((xys[1:, 0] - xys[:-1, 0]) < 0)[0]
        if len(blue_split) == 0:
            cv2.polylines(stretched_img, [xys], False, color, 2)
        else:
            t = blue_split[0] + 1
            cv2.polylines(stretched_img, [xys[:t]], False, color, thikness)
            cv2.polylines(stretched_img, [xys[t:]], False, color, thikness)

    for i in range(4):
        xys = pano_connect_points(stretched_cor[i*2+1], stretched_cor[(i*2+3) % 8], z=50)
        xys = xys.astype(int)
        blue_split = np.where((xys[1:, 0] - xys[:-1, 0]) < 0)[0]
        if len(blue_split) == 0:
            cv2.polylines(stretched_img, [xys], False, color, 2)
        else:
            t = blue_split[0] + 1
            cv2.polylines(stretched_img, [xys[:t]], False, color, thikness)
            cv2.polylines(stretched_img, [xys[t:]], False, color, thikness)

    cv2.putText(stretched_img, title, (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                (0, 0, 0), 2, cv2.LINE_AA)

    return stretched_img.astype(np.uint8) 
Example 25
Project: exposure   Author: yuanming-hu   File: filters.py    License: MIT License 5 votes vote down vote up
def draw_high_res_text(self, text, canvas):
    cv2.putText(
        canvas,
        text, (30, 128),
        cv2.FONT_HERSHEY_SIMPLEX,
        0.8, (0, 0, 0),
        thickness=5)
    return canvas 
Example 26
Project: exposure   Author: yuanming-hu   File: filters.py    License: MIT License 5 votes vote down vote up
def visualize_filter(self, debug_info, canvas):
    exposure = debug_info['filter_parameters'][0]
    if canvas.shape[0] == 64:
      cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
      cv2.putText(canvas, 'EV %+.2f' % exposure, (8, 48),
                  cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0))
    else:
      self.draw_high_res_text('Exposure %+.2f' % exposure, canvas) 
Example 27
Project: exposure   Author: yuanming-hu   File: filters.py    License: MIT License 5 votes vote down vote up
def visualize_filter(self, debug_info, canvas):
    gamma = debug_info['filter_parameters']
    cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
    cv2.putText(canvas, 'G 1/%.2f' % (1.0 / gamma), (8, 48),
                cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0)) 
Example 28
Project: exposure   Author: yuanming-hu   File: filters.py    License: MIT License 5 votes vote down vote up
def visualize_filter(self, debug_info, canvas):
    exposure = debug_info['filter_parameters'][0]
    cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
    cv2.putText(canvas, 'B&W%+.2f' % exposure, (8, 48),
                cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0)) 
Example 29
Project: exposure   Author: yuanming-hu   File: filters.py    License: MIT License 5 votes vote down vote up
def visualize_filter(self, debug_info, canvas):
    level = list(map(float, debug_info['filter_parameters']))
    level[1] += 1
    cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
    cv2.putText(canvas, '%.2f %.2f' % tuple(level), (8, 48),
                cv2.FONT_HERSHEY_SIMPLEX, 0.25, (0, 0, 0)) 
Example 30
Project: exposure   Author: yuanming-hu   File: filters.py    License: MIT License 5 votes vote down vote up
def visualize_filter(self, debug_info, canvas):
    exposure = debug_info['filter_parameters'][0]
    if canvas.shape[0] == 64:
      cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
      cv2.putText(canvas, 'S %+.2f' % exposure, (8, 48),
                  cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0))
    else:
      self.draw_high_res_text('Saturation %+.2f' % exposure, canvas)