Python cv2.rectangle() Examples

The following are code examples for showing how to use cv2.rectangle(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: objectDetectorYOLO.py    MIT License 9 votes vote down vote up
def drawBoundingBox(self,imgcv,result):
        for box in result:
            # print(box)
            x1,y1,x2,y2 = (box['topleft']['x'],box['topleft']['y'],box['bottomright']['x'],box['bottomright']['y'])
            conf = box['confidence']
            # print(conf)
            label = box['label']
            if conf < self.predictThresh:
                continue
            # print(x1,y1,x2,y2,conf,label)
            cv2.rectangle(imgcv,(x1,y1),(x2,y2),(0,255,0),6)
            labelSize=cv2.getTextSize(label,cv2.FONT_HERSHEY_COMPLEX,0.5,2)
            # print('labelSize>>',labelSize)
            _x1 = x1
            _y1 = y1#+int(labelSize[0][1]/2)
            _x2 = _x1+labelSize[0][0]
            _y2 = y1-int(labelSize[0][1])
            cv2.rectangle(imgcv,(_x1,_y1),(_x2,_y2),(0,255,0),cv2.FILLED)
            cv2.putText(imgcv,label,(x1,y1),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,0,0),1)
        return imgcv 
Example 2
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: misc.py    MIT License 8 votes vote down vote up
def show(im, allobj, S, w, h, cellx, celly):
    for obj in allobj:
        a = obj[5] % S
        b = obj[5] // S
        cx = a + obj[1]
        cy = b + obj[2]
        centerx = cx * cellx
        centery = cy * celly
        ww = obj[3]**2 * w
        hh = obj[4]**2 * h
        cv2.rectangle(im,
            (int(centerx - ww/2), int(centery - hh/2)),
            (int(centerx + ww/2), int(centery + hh/2)),
            (0,0,255), 2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 3
Project: coco-json-converter   Author: hazirbas   File: generate_coco_json.py    GNU General Public License v3.0 8 votes vote down vote up
def __get_annotation__(self, mask, image=None):

        _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        segmentation = []
        for contour in contours:
            # Valid polygons have >= 6 coordinates (3 points)
            if contour.size >= 6:
                segmentation.append(contour.flatten().tolist())
        RLEs = cocomask.frPyObjects(segmentation, mask.shape[0], mask.shape[1])
        RLE = cocomask.merge(RLEs)
        # RLE = cocomask.encode(np.asfortranarray(mask))
        area = cocomask.area(RLE)
        [x, y, w, h] = cv2.boundingRect(mask)

        if image is not None:
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            cv2.drawContours(image, contours, -1, (0,255,0), 1)
            cv2.rectangle(image,(x,y),(x+w,y+h), (255,0,0), 2)
            cv2.imshow("", image)
            cv2.waitKey(1)

        return segmentation, [x, y, w, h], area 
Example 4
Project: PyBoof   Author: lessthanoptimal   File: object_tracking.py    Apache License 2.0 7 votes vote down vote up
def click_rect(event, x, y, flags, param):
    global refPt, state

    # Click and drag a rectangle
    if event == cv2.EVENT_LBUTTONDOWN:
        refPt = [(x, y),(x,y)]
        state = 1
    elif event == 0:
        refPt[1] = (x,y)
    elif event == cv2.EVENT_LBUTTONUP:
        if abs(refPt[0][0]-refPt[1][0]) <= 5 or abs(refPt[0][1]-refPt[1][1]) <= 5:
            state = 0
        else:
            state = 2

# Creates a quadrilateral from the clicked points 
Example 5
Project: cascade-rcnn_Pytorch   Author: guoruoqian   File: net_utils.py    MIT License 7 votes vote down vote up
def vis_det_and_mask(im, class_name, dets, masks, thresh=0.8):
    """Visual debugging of detections."""
    num_dets = np.minimum(10, dets.shape[0])
    colors_mask = random_colors(num_dets)
    colors_bbox = np.round(np.random.rand(num_dets, 3) * 255)
    # sort rois according to the coordinates, draw upper bbox first
    draw_mask = np.zeros(im.shape[:2], dtype=np.uint8)

    for i in range(1):
        bbox = tuple(int(np.round(x)) for x in dets[i, :4])
        mask = masks[i, :, :]
        full_mask = unmold_mask(mask, bbox, im.shape)

        score = dets[i, -1]
        if score > thresh:
            word_width = len(class_name)
            cv2.rectangle(im, bbox[0:2], bbox[2:4], colors_bbox[i], 2)
            cv2.rectangle(im, bbox[0:2], (bbox[0] + 18 + word_width*8, bbox[1]+15), colors_bbox[i], thickness=cv2.FILLED)
            apply_mask(im, full_mask, draw_mask, colors_mask[i], 0.5)
            draw_mask += full_mask
            cv2.putText(im, '%s' % (class_name), (bbox[0]+5, bbox[1] + 12), cv2.FONT_HERSHEY_PLAIN,
								1.0, (255,255,255), thickness=1)
    return im 
Example 6
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: misc.py    MIT License 6 votes vote down vote up
def show2(im, allobj):
    for obj in allobj:
        cv2.rectangle(im,
            (obj[1], obj[2]), 
            (obj[3], obj[4]), 
            (0,0,255),2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 7
Project: Automatic-Identification-and-Counting-of-Blood-Cells   Author: MahmudulAlam   File: misc.py    GNU General Public License v3.0 6 votes vote down vote up
def show(im, allobj, S, w, h, cellx, celly):
    for obj in allobj:
        a = obj[5] % S
        b = obj[5] // S
        cx = a + obj[1]
        cy = b + obj[2]
        centerx = cx * cellx
        centery = cy * celly
        ww = obj[3]**2 * w
        hh = obj[4]**2 * h
        cv2.rectangle(im,
            (int(centerx - ww/2), int(centery - hh/2)),
            (int(centerx + ww/2), int(centery + hh/2)),
            (0,0,255), 2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 8
Project: Tensorflow-YOLOv3   Author: kcosta42   File: utils.py    MIT License 6 votes vote down vote up
def draw_boxes_frame(frame, frame_size, boxes_dicts, class_names, input_size):
  """Draws detected boxes in a video frame"""
  boxes_dict = boxes_dicts[0]
  resize_factor = (frame_size[0] / input_size[1], frame_size[1] / input_size[0])
  for cls in range(len(class_names)):
    boxes = boxes_dict[cls]
    color = (0, 0, 255)
    if np.size(boxes) != 0:
      for box in boxes:
        xy = box[:4]
        xy = [int(xy[i] * resize_factor[i % 2]) for i in range(4)]
        cv2.rectangle(frame, (xy[0], xy[1]), (xy[2], xy[3]), color[::-1], 2)
        (test_width, text_height), baseline = cv2.getTextSize(class_names[cls],
                                                              cv2.FONT_HERSHEY_SIMPLEX,
                                                              0.75, 1)
        cv2.rectangle(frame,
                      (xy[0], xy[1]),
                      (xy[0] + test_width, xy[1] - text_height - baseline),
                      color[::-1],
                      thickness=cv2.FILLED)
        cv2.putText(frame, class_names[cls], (xy[0], xy[1] - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 1) 
Example 9
Project: faceUnionVoiceRecognition   Author: tsstss123   File: verification.py    GNU General Public License v3.0 6 votes vote down vote up
def flush_video_display(self):
        """paint to QLabel Widget
        """
        _, frame = self.capture.read()

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame = cv2.flip(frame, 1)

        frame = cv2.resize(frame, (1600, 450))

        if len(self.dets) == 2:
            for k, d in enumerate(self.dets):
                cv2.rectangle(frame, (d.left(), d.top()), (d.right(), d.bottom()), (0, 255, 0))
            shape_left = sp(frame, self.dets[0])
            shape_right = sp(frame, self.dets[1])
            draw_two_shape(frame, shape_left, shape_right, 30)

        image = QImage(frame.tobytes(), frame.shape[1], frame.shape[0], QImage.Format_RGB888)

        self.image_label.setPixmap(QPixmap.fromImage(image)) 
Example 10
Project: MobileNetv2-SSDLite   Author: PINTO0309   File: demo_caffe.py    MIT License 6 votes vote down vote up
def detect(imgfile):
    origimg = cv2.imread(imgfile)
    img = preprocess(origimg)
    
    img = img.astype(np.float32)
    img = img.transpose((2, 0, 1))

    net.blobs['data'].data[...] = img
    out = net.forward() 
    box, conf, cls = postprocess(origimg, out)

    for i in range(len(box)):
       p1 = (box[i][0], box[i][1])
       p2 = (box[i][2], box[i][3])
       cv2.rectangle(origimg, p1, p2, (0,255,0))
       p3 = (max(p1[0], 15), max(p1[1], 15))
       title = "%s:%.2f" % (COCO_CLASSES[int(cls[i])], conf[i])
       cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
    cv2.imshow("SSD", origimg)
 
    k = cv2.waitKey(0) & 0xff
        #Exit if ESC pressed
    if k == 27 : return False
    return True 
Example 11
Project: MobileNetv2-SSDLite   Author: PINTO0309   File: demo_caffe_voc.py    MIT License 6 votes vote down vote up
def detect(imgfile):
    origimg = cv2.imread(imgfile)
    img = preprocess(origimg)
    
    img = img.astype(np.float32)
    img = img.transpose((2, 0, 1))

    net.blobs['data'].data[...] = img
    out = net.forward() 
    box, conf, cls = postprocess(origimg, out)

    for i in range(len(box)):
       p1 = (box[i][0], box[i][1])
       p2 = (box[i][2], box[i][3])
       cv2.rectangle(origimg, p1, p2, (0,255,0))
       p3 = (max(p1[0], 15), max(p1[1], 15))
       title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i])
       cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
    cv2.imshow("SSD", origimg)
 
    k = cv2.waitKey(0) & 0xff
        #Exit if ESC pressed
    if k == 27 : return False
    return True 
Example 12
Project: pytorch-lstd   Author: JiasiWang   File: eval.py    MIT License 6 votes vote down vote up
def vis_detections(im, index, dets, thresh=0.3):
    '''
    import numpy as np
    import matplotlib.pyplot as plt
    npimg = im.cpu().numpy()
    npimg = (npimg + 128)/255
    print(npimg)
    print('here')
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()
    '''
    #im = im.transpose(1,2,0)
    #im = im.copy() 
    """Visual debugging of detections."""
    for i in range(np.minimum(10, dets.shape[0])):
        bbox = tuple(int(np.round(x)) for x in dets[i, :4])
        score = dets[i, -1]
        if score > thresh:
            cv2.rectangle(im, (bbox[0],bbox[1]), (bbox[2], bbox[3]), (0, 204, 0), 2)
    cv2.imwrite('./vis/test'+index+'.jpg', im) 
Example 13
Project: pytorch-lstd   Author: JiasiWang   File: eval_rpn.py    MIT License 6 votes vote down vote up
def vis_detections(im, index, dets, thresh=0.8):
    import numpy as np
    import matplotlib.pyplot as plt
    npimg = im.cpu().numpy()
    npimg = (npimg + 128)/255
    print(npimg)
    print('here')
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()
    '''
    im = im.transpose(1,2,0)
    im = im.copy()
    """Visual debugging of detections."""
    for i in range(np.minimum(10, dets.shape[0])):
        bbox = tuple(int(np.round(x)) for x in dets[i, :4])
        score = dets[i, -1]
        if score > thresh:
            cv2.rectangle(im, (bbox[0],bbox[1]), (bbox[2], bbox[3]), (0, 204, 0), 2)
    cv2.imwrite('./vis/test'+index+'.jpg', im)
    ''' 
Example 14
Project: pytorch-lstd   Author: JiasiWang   File: multibox_loss.py    MIT License 6 votes vote down vote up
def vis(self, img, prior, proposal, truth, labels, cls_t, idx):
        im = img.cpu().data
        im = im.numpy()
        im = im.transpose(1,2,0) 
        img = im[:, :, (2, 1, 0)]
        im = img.copy()
        write_flag = False
        for i in range(truth.size(0)):
            bbox = truth[i,:]
            label = labels[i]
            cv2.rectangle(im, (int(bbox[0]*300), int(bbox[1]*300)),(int(bbox[2]*300), int(bbox[3]*300)),(0,255,0),1)
        '''    
        for j in range(proposal.size(0)):
            write_flag = True
            cv2.rectangle(im, (int(proposal[j][0]*300), int(proposal[j][1]*300)), (int(proposal[j][2]*300), int(proposal[j][3]*300)),(255,255,255),1)
        '''
        for j in range(proposal.size(0)): 
            if cls_t[j]>0:
                write_flag = True
                cv2.rectangle(im, (int(proposal[j][0]), int(proposal[j][1])), (int(proposal[j][2]), int(proposal[j][3])),(255,0,0),2)   
                cv2.putText(im, str(cls_t[j]), (int(proposal[j][0]) + int((proposal[j][2] - proposal[j][0])/2) ,int(proposal[j][1]) + int((proposal[j][3] - proposal[j][1])/2)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255)) 
        if write_flag:
            cv2.imwrite('./vis/'+str(idx)+'.jpg', im)
            #cv2.imshow('./vis/'+str(idx)+'.jpg', im) 
Example 15
Project: object-detection   Author: cristianpb   File: ssd_detection.py    MIT License 6 votes vote down vote up
def draw_boxes(self, image, df):
        for idx, box in df.iterrows():
            print('--> Detected: ({}:{}) - Score: {:.3f}'
                  .format(box['class_id'],
                          box['class_name'],
                          box['confidence'])
                  )
            color = self.colors[int(box['class_id'])]
            cv2.rectangle(
                    image,
                    (box['x1'], box['y1']),
                    (box['x2'], box['y2']),
                    color, 6)
            cv2.putText(
                    image,
                    box['label'],
                    (box['x1'], box['y1'] - 5),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
        return image 
Example 16
Project: face-attendance-machine   Author: matiji66   File: facerec_from_webcam_faster.py    Apache License 2.0 5 votes vote down vote up
def face_process():
    myprint("face process start",time.time())
    # Find all the faces and face encodings in the current frame of video
    # face_locations = face_recognition.face_locations(rgb_small_frame, model="cnn")
    myprint('face_locations start', time.time())
    face_locations = face_recognition.face_locations(rgb_small_frame, model="hog")
    myprint('face_locations end', time.time())
    myprint('face_encodings start', time.time())
    face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
    myprint('face_encodings end', time.time())
    face_names = []
    for face_encoding in face_encodings:
        # optimize start 采用KNN 排名*权重, 在类别上进行叠加,然后排序取出top1
        name, dis = vote_class(face_encoding)
        # optimize end 采用 排名*权重, 在类别上进行叠加,然后排序取出top1
        face_names.append(name)  # 将人脸数据

    # Display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Scale back up face locations since the frame we detected in was scaled to 1/4 size
        top *= 4
        right *= 4
        bottom *= 4
        left *= 4
        myprint('putText start', time.time())
        # Draw a box around the face
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
        # Draw a label with a name below the face
        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
        myprint("putText end " + name, time.time())
        # say hello and save record to file
        myprint('process_face_records start', time.time())
        process_face_records(name)
        myprint('process_face_records end', time.time())

    # Display the resulting image
    cv2.imshow('Video', frame)
    myprint("face process end", time.time()) 
Example 17
Project: yolov3-tf2   Author: zzh8829   File: utils.py    MIT License 5 votes vote down vote up
def draw_outputs(img, outputs, class_names):
    boxes, objectness, classes, nums = outputs
    boxes, objectness, classes, nums = boxes[0], objectness[0], classes[0], nums[0]
    wh = np.flip(img.shape[0:2])
    for i in range(nums):
        x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
        x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
        img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
        img = cv2.putText(img, '{} {:.4f}'.format(
            class_names[int(classes[i])], objectness[i]),
            x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
    return img 
Example 18
Project: yolov3-tf2   Author: zzh8829   File: utils.py    MIT License 5 votes vote down vote up
def draw_labels(x, y, class_names):
    img = x.numpy()
    boxes, classes = tf.split(y, (4, 1), axis=-1)
    classes = classes[..., 0]
    wh = np.flip(img.shape[0:2])
    for i in range(len(boxes)):
        x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
        x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
        img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
        img = cv2.putText(img, class_names[classes[i]],
                          x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL,
                          1, (0, 0, 255), 2)
    return img 
Example 19
Project: YOLOv1_tensorflow_windows   Author: FatherRen   File: main.py    GNU General Public License v3.0 5 votes vote down vote up
def draw_result(self, img, result):
        for i in range(len(result)):
            x = int(result[i][1])
            y = int(result[i][2])
            w = int(result[i][3] / 2)
            h = int(result[i][4] / 2)
            cv2.rectangle(img, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
            cv2.rectangle(img, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
            lineType = cv2.LINE_AA
            cv2.putText(img, result[i][0] + ' : %.2f' % result[i][5],
                        (x - w + 5, y - h - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (0, 0, 0), 1, lineType) 
Example 20
Project: vehicle_blind_spot_monitoring   Author: abdullahalsaidi16   File: blind spot monitoring.py    GNU General Public License v3.0 5 votes vote down vote up
def my_meanshift():
 global term_crit , n_chnl , frame , track_window , roi_hist ,hsv,x,y
 hsv = cv2.cvtColor(frame , cv2.COLOR_BGR2HSV)
 hsv[:,:,n_chnl] = cv2.equalizeHist(hsv[:,:,n_chnl])
 dst = cv2.calcBackProject([hsv] , [n_chnl] , roi_hist , [ 0,180 ], 1)

 ret , track_window = cv2.meanShift(dst,track_window , term_crit)
 x , y , w , h = track_window
 img = cv2.rectangle(frame , (x,y) , (x+w ,y+h) , 255 , 2)
 return img 
Example 21
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: demo.py    Apache License 2.0 5 votes vote down vote up
def draw_detection(frame, det, class_names):
    (klass, score, x0, y0, x1, y1) = det
    klass_name = class_names[int(klass)]
    h = frame.shape[0]
    w = frame.shape[1]
    # denormalize detections from [0,1] to the frame size
    p0 = tuple(map(int, (x0*w,y0*h)))
    p1 = tuple(map(int, (x1*w,y1*h)))
    logging.info("detection: %s %s", klass_name, score)
    cv2.rectangle(frame, p0, p1, (0,0,255), 2)
    # Where to draw the text, a few pixels above the top y coordinate
    tp0 = (p0[0], p0[1]-5)
    draw_text = "{} {}".format(klass_name, score)
    cv2.putText(frame, draw_text, tp0, cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.5, (0,0,255)) 
Example 22
Project: pypriv   Author: soeaver   File: vis_mask_rcnn.py    MIT License 5 votes vote down vote up
def vis_class(img, pos, class_str, font_scale=0.75):
    """Visualizes the class."""
    x0, y0 = int(pos[0]), int(pos[1])
    # Compute text size.
    txt = class_str
    font = cv2.FONT_HERSHEY_SIMPLEX
    ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)
    # Place text background.
    back_tl = x0, y0 - int(1.3 * txt_h)
    back_br = x0 + txt_w, y0
    cv2.rectangle(img, back_tl, back_br, _GREEN, -1)
    # Show text.
    txt_tl = x0, y0 - int(0.3 * txt_h)
    cv2.putText(img, txt, txt_tl, font, font_scale, _GRAY, lineType=cv2.LINE_AA)
    return img 
Example 23
Project: pypriv   Author: soeaver   File: vis_mask_rcnn.py    MIT License 5 votes vote down vote up
def vis_bbox(img, bbox, thick=2):
    """Visualizes a bounding box."""
    (x0, y0, w, h) = bbox
    x1, y1 = int(x0 + w), int(y0 + h)
    x0, y0 = int(x0), int(y0)
    cv2.rectangle(img, (x0, y0), (x1, y1), _GREEN, thickness=thick)
    return img 
Example 24
Project: pypriv   Author: soeaver   File: visualize.py    MIT License 5 votes vote down vote up
def draw_bbox(im, objs, max_obj=100, draw_text=True):
    # im = im.astype(np.float32, copy=True)
    for i in xrange(min(len(objs), max_obj)):
        bbox = objs[i]['bbox']
        color = objs[i]['attribute']['color']
        class_name = objs[i]['attribute']['class_name']
        cv2.rectangle(im, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), objs[i]['color'], 2)
        if draw_text:
            cv2.putText(im, '{:s} {:.3f}'.format(str(class_name), objs[i]['bbox_confidence']),
                        (int(bbox[0] + 5), int(bbox[1] + 15)), CVFONT0, 0.5, color, thickness=1)

    return im 
Example 25
Project: pypriv   Author: soeaver   File: visualize.py    MIT License 5 votes vote down vote up
def draw_fancybbox(im, objs, max_obj=100, alpha=0.4, attri=None):
    vis = Image.fromarray(im)
    draw1 = ImageDraw.Draw(vis)
    mask = Image.fromarray(im.copy())
    draw2 = ImageDraw.Draw(mask)
    for i in xrange(min(len(objs), max_obj)):
        bbox = objs[i]['bbox']
        color = objs[i]['attribute']['color']
        class_name = objs[i]['attribute']['class_name']
        draw1.rectangle((int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])), outline=tuple(color))
        draw2.rectangle((int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[1]) + 24), fill=tuple(color))
        draw2.text((int(bbox[0] + 5), int(bbox[1]) + 2),
                   '{:s} {:.3f}'.format(str(class_name), objs[i]['bbox_confidence']), fill=(255, 255, 255), font=FONT20)

        if attri:
            y_shift = min(im.shape[0] - (int(bbox[1]) + 25 + 25 * len(attri)), 0)
            left_top = (int(bbox[0]) - 110, int(bbox[1]) + 25 + y_shift)
            right_bottom = (int(bbox[0]) - 10, int(bbox[1]) + 25 + y_shift + 25 * len(attri))
            draw1.rectangle((left_top[0], left_top[1], right_bottom[0], right_bottom[1]), fill=(32, 32, 32))
            for j in xrange(len(attri)):
                try:
                    draw1.text((left_top[0] + 5, left_top[1] + 2 + j * 25), '{}: {}'.
                               format(attri[j], objs[i]['attribute'][attri[j]]), fill=(255, 255, 255), font=FONT20)
                except:
                    pass

    return np.array(Image.blend(vis, mask, alpha)) 
Example 26
Project: NAO   Author: AISTLAB   File: wsNaoVisionMT.py    MIT License 5 votes vote down vote up
def run(self):
    if self._showWindows:
      cv2.namedWindow("raw")
      cv2.namedWindow("hsv")
      cv2.namedWindow("ball")
      cv2.namedWindow("gate")
      cv2.setMouseCallback("hsv",self.on_mouse)
    else:
      cv2.destroyAllWindows()
    while self._startMonitor:
      self.getRawImage()
      time.sleep(0.1)
      self.getHSV()
      self._threshBall=self.getROI(self._ball_min,self._ball_max)
      self._threshGate=self.getROI(self._gate_min,self._gate_max)
      gx1,gy1,gw1,gh1=self.getBoundingRectangle(self._threshGate.copy())
      
      self.getLines(self.getROI(self._ball_min,self._ball_max))
      
      self._gateBounding=(gx1,gy1,gw1,gh1)
      gx2,gy2,gw2,gh2=self.getNearstRectangle(self._threshGate)
      self._nearstObstacleBounding=(gx2,gy2,gw2,gh2)
      
      x,y,r=self.getBallImageInfo()
      self._ballImageInfo=(x,y,r)
      self._ballSpaceDistance=self.getSpaceBallDistance()
      
      if self._showWindows:
        cv2.circle(self._raw,(x,y),r,(255,255,0),2)
        cv2.circle(self._raw,(x,y),2,(0,255,255),2)
        cv2.rectangle(self._raw,(gx1,gy1),(gx1+gw1,gy1+gh1),(0,255,0),2)
        cv2.rectangle(self._raw,(gx2,gy2),(gx2+gw2,gy2+gh2),(0,0,255),2)
        cv2.putText(self._raw,"%.2f %.2f %.2f %.2f"%(gx2,gy2,gw2,gh2),\
                    (10,20),cv2.FONT_HERSHEY_PLAIN,1.2,(0,0,255))
        cv2.line(self._raw, self._line[0],self._line[1], (0, 0, 255),2)
        cv2.imshow("raw",self._raw)
        cv2.imshow("hsv",self._hsv)
        cv2.imshow("ball",self._threshBall)
        cv2.imshow("gate",self._threshGate)
        cv2.waitKey(10) 
Example 27
Project: ARPET   Author: juliagarriga   File: cmt_tracker.py    MIT License 5 votes vote down vote up
def track(self, out):

        cv2.namedWindow(self.window_name)

        stop = False

        while not stop:

            # Obtain new frame
            frame, t = self.frames.next()

            if frame is not None:

                found_persons = self.locate(frame)

                if found_persons:
                    # TODO: IF MORE THAN ONE PERSON IS FOUND, CHECK FOR SIMILARITIES BETWEEN THE TRACKING
                    (startX, startY, endX, endY) = found_persons[0].astype(int)

                    # Detection bounding box
                    cv2.rectangle(frame, (startX, startY), (endX, endY), (0,255,0), 2, 1)

                    rect = np.array([startX, startY, endX - startX, endY - startY])

                    stop = self.track_object(frame, out, rect)

                frame = cv2.cvtColor(cv2.resize(frame, (0, 0), fx=3, fy=3), cv2.COLOR_RGB2BGR)
                cv2.imshow(self.window_name, frame)

                # Exit if ESC pressed
                k = cv2.waitKey(1) & 0xff
                if k == 27 : break

        cv2.destroyWindow(self.window_name) 
Example 28
Project: ARPET   Author: juliagarriga   File: util.py    MIT License 5 votes vote down vote up
def get_rect(im, title='get_rect'):
	mouse_params = {'tl': None, 'br': None, 'current_pos': None,
		'released_once': False}

	cv2.namedWindow(title)
	cv2.moveWindow(title, 100, 100)

	def onMouse(event, x, y, flags, param):

		param['current_pos'] = (x, y)

		if param['tl'] is not None and not (flags & cv2.EVENT_FLAG_LBUTTON):
			param['released_once'] = True

		if flags & cv2.EVENT_FLAG_LBUTTON:
			if param['tl'] is None:
				param['tl'] = param['current_pos']
			elif param['released_once']:
				param['br'] = param['current_pos']

	cv2.setMouseCallback(title, onMouse, mouse_params)
	cv2.imshow(title, im)

	while mouse_params['br'] is None:
		im_draw = np.copy(im)

		if mouse_params['tl'] is not None:
			cv2.rectangle(im_draw, mouse_params['tl'],
				mouse_params['current_pos'], (255, 0, 0))

		cv2.imshow(title, im_draw)
		_ = cv2.waitKey(10)

	cv2.destroyWindow(title)

	tl = (min(mouse_params['tl'][0], mouse_params['br'][0]),
		min(mouse_params['tl'][1], mouse_params['br'][1]))
	br = (max(mouse_params['tl'][0], mouse_params['br'][0]),
		max(mouse_params['tl'][1], mouse_params['br'][1]))

	return (tl, br) 
Example 29
Project: ARPET   Author: juliagarriga   File: tracker.py    MIT License 5 votes vote down vote up
def track(self, out=None):

        cv2.namedWindow(self.window_name)

        stop = False

        while not stop:

            # Obtain new frame
            frame, t = self.frames.next()

            if frame is not None:

                found_persons = self.locate(frame)

                frame = cv2.cvtColor(cv2.resize(frame, (0, 0), fx=3, fy=3), cv2.COLOR_RGB2BGR)
                cv2.imshow(self.window_name, frame)

                if out is not None:
                    out.write(frame) 

                if found_persons:
                    (startX, startY, endX, endY) = found_persons[0].astype(int)

                    # Detection bounding box
                    cv2.rectangle(frame, (startX, startY), (endX, endY), (0,255,0), 2, 1)

                    rect = np.array([startX, startY, endX - startX, endY - startY])           

                    stop = self.track_object(frame, out, rect)

                # Exit if ESC pressed
                k = cv2.waitKey(1) & 0xff
                if k == 27 : break

        cv2.destroyWindow(self.window_name) 
Example 30
Project: motion-tracking   Author: dansbecker   File: crop_vis.py    MIT License 5 votes vote down vote up
def show_img(img, boxes=None, window_name="Happy Dance Image", msec_to_show_for=1500, 
             save=False, filepath='None'):
    """Show an image, potentially with surrounding bounding boxes

    Args:
    ----
        img: np.ndarray
        boxes (optional): dct of bounding boxes where the keys hold the name (actual
            or predicted) and the values the coordinates of the boxes
        window_name (optional): str
        msec_to_show_for (optioanl): int
    """

    img_copy = img.copy() # Any drawing is inplace. Draw on copy to protect original.
    if boxes:
        color_dct = {'actual': (125, 255, 0), 'predicted': (0, 25, 255)}
        for box_type, box_coords  in boxes.items():
            cv2.rectangle(img_copy,
                          pt1=(box_coords[0], box_coords[1]),
                          pt2=(box_coords[2], box_coords[3]),
                          color=color_dct[box_type],
                          thickness=2)
    if not save: 
        cv2.imshow(window_name, img_copy)
        cv2.waitKey(msec_to_show_for)
        cv2.destroyWindow(window_name)
    else: 
        cv2.imwrite(filepath, img_copy) 
Example 31
Project: cascade-rcnn_Pytorch   Author: guoruoqian   File: net_utils.py    MIT License 5 votes vote down vote up
def vis_detections(im, class_name, dets, thresh=0.8):
    """Visual debugging of detections."""
    for i in range(np.minimum(10, dets.shape[0])):
        bbox = tuple(int(np.round(x)) for x in dets[i, :4])
        score = dets[i, -1]
        if score > thresh:
            cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 204, 0), 2)
            cv2.putText(im, '%s: %.3f' % (class_name, score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,
                        1.0, (0, 0, 255), thickness=1)
    return im

# Borrow from matterport mask R-CNN implementation 
Example 32
Project: Deformable-ConvNets   Author: guanfuchen   File: tester.py    MIT License 5 votes vote down vote up
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1):
    """
    visualize all detections in one image
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param class_names: list of names in imdb
    :param scale: visualize the scaled image
    :return:
    """
    import cv2
    import random
    color_white = (255, 255, 255)
    im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
    # change to bgr
    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    for j, name in enumerate(class_names):
        if name == '__background__':
            continue
        color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))  # generate a random color
        dets = detections[j]
        for det in dets:
            bbox = det[:4] * scale
            score = det[-1]
            if score < threshold:
                continue
            bbox = map(int, bbox)
            cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
            cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
                        color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
    return im 
Example 33
Project: Deformable-ConvNets   Author: guanfuchen   File: tester.py    MIT License 5 votes vote down vote up
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1):
    """
    visualize all detections in one image
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param class_names: list of names in imdb
    :param scale: visualize the scaled image
    :return:
    """
    import cv2
    import random
    color_white = (255, 255, 255)
    im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
    # change to bgr
    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    for j, name in enumerate(class_names):
        if name == '__background__':
            continue
        color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))  # generate a random color
        dets = detections[j]
        for det in dets:
            bbox = det[:4] * scale
            score = det[-1]
            if score < threshold:
                continue
            bbox = map(int, bbox)
            cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
            cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
                        color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
    return im 
Example 34
Project: Deformable-ConvNets   Author: guanfuchen   File: tester.py    MIT License 5 votes vote down vote up
def draw_all_detection(im_array, detections, class_names, scale, cfg, threshold=1e-1):
    """
    visualize all detections in one image
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param class_names: list of names in imdb
    :param scale: visualize the scaled image
    :return:
    """
    import cv2
    import random
    color_white = (255, 255, 255)
    im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
    # change to bgr
    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    for j, name in enumerate(class_names):
        if name == '__background__':
            continue
        color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))  # generate a random color
        dets = detections[j]
        for det in dets:
            bbox = det[:4] * scale
            score = det[-1]
            if score < threshold:
                continue
            bbox = map(int, bbox)
            cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
            cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
                        color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
    return im 
Example 35
Project: Automatic-Identification-and-Counting-of-Blood-Cells   Author: MahmudulAlam   File: misc.py    GNU General Public License v3.0 5 votes vote down vote up
def show2(im, allobj):
    for obj in allobj:
        cv2.rectangle(im,
            (obj[1], obj[2]), 
            (obj[3], obj[4]), 
            (0,0,255),2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 36
Project: ICDAR-2019-SROIE   Author: zzzDavid   File: boxing.py    MIT License 5 votes vote down vote up
def draw():
    f = open(box_path + 'jpglist.txt')

    # read each image and its label
    line = f.readline()
    line_num =0
    while line:
        line_num=line_num+1
        print('Image:', line_num)
        name = line.strip('\n')
        img = cv2.imread(image_path + name)
        img_size = img.shape
        img_size = img_size[0]*img_size[1]

        # read each coordinate and draw box
        f_txt = open(image_path + name.strip('.jpg') + '.txt')
        #line_txt = f_txt.readline()  # pass the first ROI information
        line_txt = f_txt.readline()
        while line_txt:
            coor = line_txt.split(',')
            x1 = int(coor[0].strip('\''))
            y1 = int(coor[1].strip('\''))
            x3 = int(coor[4].strip('\''))
            y3 = int(coor[5].strip('\''))
            text = coor[8].strip('\n').strip('\'')
            text_show = text + '(' + str(x1) + ',' + str(y1) +')'

            cv2.rectangle(img, (x1, y1), (x3, y3), (255, 0, 0), 1)
            #cv2.putText(img, text_show, (x1, y1 - 1),
              #          cv2.FONT_HERSHEY_TRIPLEX, 0.35, (0, 0, 255), 1)
            line_txt = f_txt.readline()
        cv2.imwrite(box_path + name, img)
        line = f.readline()
        # img = cv2.imshow('image', img)
        # cv2.waitKey(0) 
Example 37
Project: ICDAR-2019-SROIE   Author: zzzDavid   File: data_provider.py    MIT License 5 votes vote down vote up
def generator(vis=False):
    image_list = np.array(get_training_data())
    print('{} training images in {}'.format(image_list.shape[0], DATA_FOLDER))
    index = np.arange(0, image_list.shape[0])
    while True:
        np.random.shuffle(index)
        for i in index:
            try:
                im_fn = image_list[i]
                im = cv2.imread(im_fn)
                h, w, c = im.shape
                im_info = np.array([h, w, c]).reshape([1, 3])

                _, fn = os.path.split(im_fn)
                fn, _ = os.path.splitext(fn)
                txt_fn = os.path.join(DATA_FOLDER, "label", fn + '.txt')
                if not os.path.exists(txt_fn):
                    print("Ground truth for image {} not exist!".format(im_fn))
                    continue
                bbox = load_annoataion(txt_fn)
                if len(bbox) == 0:
                    print("Ground truth for image {} empty!".format(im_fn))
                    continue

                if vis:
                    for p in bbox:
                        cv2.rectangle(im, (p[0], p[1]), (p[2], p[3]), color=(0, 0, 255), thickness=1)
                    fig, axs = plt.subplots(1, 1, figsize=(30, 30))
                    axs.imshow(im[:, :, ::-1])
                    axs.set_xticks([])
                    axs.set_yticks([])
                    plt.tight_layout()
                    plt.show()
                    plt.close()
                yield [im], bbox, im_info

            except Exception as e:
                print(e)
                continue 
Example 38
Project: ICDAR-2019-SROIE   Author: zzzDavid   File: main.py    MIT License 5 votes vote down vote up
def draw():
    filenames = [os.path.splitext(f)[0] for f in glob.glob("for_task3/*.txt")]
    txt_files = [s + ".txt" for s in filenames]
    for txt in txt_files:
        image = cv2.imread('test_original/'+ txt.split('/')[1].split('.')[0]+'.jpg', cv2.IMREAD_COLOR)
        with open(txt, 'r') as txt_file:
            for line in csv.reader(txt_file):
                box = [int(string, 10) for string in line[0:8]]
                if len(line) < 9:
                    print(txt)
                cv2.rectangle(image, (box[0], box[1]), (box[4], box[5]), (0,255,0), 2)
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(image, line[8].upper(), (box[0],box[1]), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
        cv2.imwrite('task2_result_draw/'+ txt.split('/')[1].split('.')[0]+'.jpg', image) 
Example 39
Project: deep_sort   Author: nwojke   File: image_viewer.py    GNU General Public License v3.0 5 votes vote down vote up
def rectangle(self, x, y, w, h, label=None):
        """Draw a rectangle.

        Parameters
        ----------
        x : float | int
            Top left corner of the rectangle (x-axis).
        y : float | int
            Top let corner of the rectangle (y-axis).
        w : float | int
            Width of the rectangle.
        h : float | int
            Height of the rectangle.
        label : Optional[str]
            A text label that is placed at the top left corner of the
            rectangle.

        """
        pt1 = int(x), int(y)
        pt2 = int(x + w), int(y + h)
        cv2.rectangle(self.image, pt1, pt2, self._color, self.thickness)
        if label is not None:
            text_size = cv2.getTextSize(
                label, cv2.FONT_HERSHEY_PLAIN, 1, self.thickness)

            center = pt1[0] + 5, pt1[1] + 5 + text_size[0][1]
            pt2 = pt1[0] + 10 + text_size[0][0], pt1[1] + 10 + \
                text_size[0][1]
            cv2.rectangle(self.image, pt1, pt2, self._color, -1)
            cv2.putText(self.image, label, center, cv2.FONT_HERSHEY_PLAIN,
                        1, (255, 255, 255), self.thickness) 
Example 40
Project: Tensorflow-YOLOv3   Author: kcosta42   File: utils.py    MIT License 5 votes vote down vote up
def draw_boxes(img_name, boxes_dict, class_names, input_size):
  """Draws detected boxes"""
  img = Image.open(img_name)
  draw = ImageDraw.Draw(img)
  font = ImageFont.truetype(font="./data/Roboto-Black.ttf", size=(img.size[0] + img.size[1]) // 100)
  resize_factor = (img.size[0] / input_size[0], img.size[1] / input_size[1])

  for cls in range(len(class_names)):
    boxes = boxes_dict[cls]

    if np.size(boxes) != 0:
      for box in boxes:
        xy, confidence = box[:4], box[4]
        xy = [xy[i] * resize_factor[i % 2] for i in range(4)]
        x0, y0 = xy[0], xy[1]
        thickness = (img.size[0] + img.size[1]) // 300

        for t in np.linspace(0, 1, thickness):
          xy[0], xy[1] = xy[0] + t, xy[1] + t
          xy[2], xy[3] = xy[2] - t, xy[3] - t
          draw.rectangle(xy, outline="blue")

        text = f"{class_names[cls]} {(confidence * 100):.1f}%"
        text_size = draw.textsize(text, font=font)
        draw.rectangle([x0, y0 - text_size[1], x0 + text_size[0], y0], fill="blue")
        draw.text((x0, y0 - text_size[1]), text, fill="black", font=font)

        print(text)

  rgb_img = img.convert('RGB')
  rgb_img.save('./detections/image_output.jpg')
  print("Image Saved at \"" + './detections/image_output.jpg' + "\"")
  rgb_img.show() 
Example 41
Project: mtcnn-face-detect   Author: ResByte   File: webcam.py    MIT License 5 votes vote down vote up
def main():
    # Capture device. Usually 0 will be webcam and 1 will be usb cam.
    video_capture = cv2.VideoCapture(0)
    video_capture.set(3, 640)
    video_capture.set(4, 480)

    minsize = 25 # minimum size of face
    threshold = [ 0.6, 0.7, 0.7 ]  # three steps's threshold
    factor = 0.709 # scale factor


    sess = tf.Session()
    with sess.as_default():
        pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
        while(True):
            ret, frame = video_capture.read()
            if not ret:
                break
            # Display the resulting frame
            img = frame[:,:,0:3]
            boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
            print(boxes)
            for i in range(boxes.shape[0]):
                pt1 = (int(boxes[i][0]), int(boxes[i][1]))
                pt2 = (int(boxes[i][2]), int(boxes[i][3]))
                
                cv2.rectangle(frame, pt1, pt2, color=(0, 255, 0))
            cv2.imshow('Video', frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    video_capture.release()
    cv2.destroyAllWindows() 
Example 42
Project: faceUnionVoiceRecognition   Author: tsstss123   File: identityInput.py    GNU General Public License v3.0 5 votes vote down vote up
def display_video_stream(self):
        """Read frame from camera and repaint QLabel widget.
        """
        _, frame = self.capture.read()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame = cv2.flip(frame, 1)
        dets = detector(frame, 1)
        for k, d in enumerate(dets):
            cv2.rectangle(frame, (d.left(), d.top()), (d.right(), d.bottom()), (0, 255, 0))
        image = QImage(frame, frame.shape[1], frame.shape[0], 
                       frame.strides[0], QImage.Format_RGB888)

        if self.checking_state.value == 0:
            if len(dets) == 1:
                self.text_label.setText('可以进行身份录入')
                self.check_button.setEnabled(True)
            else:
                self.text_label.setText('未检测到唯一人脸')
                self.check_button.setDisabled(True)
        elif self.checking_state.value == 1:
            self.text_label.setText('录音中')
            self.check_button.setDisabled(True)
        elif self.checking_state.value == 2:
            self.text_label.setText('提取特征中')
            self.check_button.setDisabled(True)
        elif self.checking_state.value == 3:
            self.text_label.setText('保存中')
            self.check_button.setDisabled(True)
        elif self.checking_state.value == 4:
            self.checking_state.value = 0
            self.check_button.setEnabled(True)
            self.write_button.setEnabled(True)
            change_salt()

        self.salt_label.setText(salt)
        self.image_label.setPixmap(QPixmap.fromImage(image))
        self.center() 
Example 43
Project: faceUnionVoiceRecognition   Author: tsstss123   File: verification.py    GNU General Public License v3.0 5 votes vote down vote up
def draw_rectangle(frame, det, color=(100, 200, 100)):
    cv2.rectangle(frame, (det.left(), det.top()), (det.right(), det.bottom()), color) 
Example 44
Project: Fall-Detection-with-CNN   Author: munnam77   File: subtraction.py    MIT License 5 votes vote down vote up
def find_background_image(class_name, path):
	"""Given a directory and a class name, search for a viable background image"""
	# This method based on: http://www.pyimagesearch.com/2015/11/09/pedestrian-detection-opencv/
	 
	# initialize the HOG descriptor/person detector
	hog = cv2.HOGDescriptor()
	hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

	# loop over the image paths for each class
	class_path = os.path.join(path, class_name) 
	for imagePath in paths.list_images(class_path):
		# load the image and resize it to [faster, more accurate]
		image = cv2.imread(imagePath)
		image = imutils.resize(image, width=min(400, image.shape[1]))
		orig = image.copy() 

		# detect people in the image
		(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),
			padding=(8, 8), scale=1.05)

		# draw bounding boxes
		for (x, y, w, h) in rects:
			cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)

		# If there are no people (no boxes), return the path of this image
		if len(rects) == 0:
			return imagePath 
Example 45
Project: selfieexpression   Author: andrewjtimmons   File: face.py    MIT License 5 votes vote down vote up
def _draw_rectangle(self, image, pt1, pt2, color, thickness = 2):
    """ Draw rectangle around a region of interests with a arbitrary color. """
    cv2.rectangle(image, pt1, pt2, color, thickness) 
Example 46
Project: selfieexpression   Author: andrewjtimmons   File: face.py    MIT License 5 votes vote down vote up
def _draw_rectangle(self, image, pt1, pt2, color, thickness = 2):
    """ Draw rectangle around a region of interests with a arbitrary color. """
    cv2.rectangle(image, pt1, pt2, color, thickness) 
Example 47
Project: pytorch-lstd   Author: JiasiWang   File: box_utils.py    MIT License 5 votes vote down vote up
def vis(img, proposal, truth, idx):
    im = img.cpu().data
    im = im.numpy()
    im = im.transpose(1,2,0)
    img = im[:, :, (2, 1, 0)]
    im = img.copy()
    for i in range(truth.size(0)):
        bbox = truth[i,:]
     
        cv2.rectangle(im, (int(bbox[0]*300), int(bbox[1]*300)),(int(bbox[2]*300), int(bbox[3]*300)),(0,255,0),1)
    for j in range(proposal.size(0)):
        cv2.rectangle(im, (int(proposal[j][0]*300), int(proposal[j][1]*300)), (int(proposal[j][2]*300), int(proposal[j][3]*300)),(255,255,255),1)

    cv2.imwrite('./vis/'+str(idx)+'.jpg', im) 
Example 48
Project: exposure   Author: yuanming-hu   File: filters.py    MIT License 5 votes vote down vote up
def visualize_filter(self, debug_info, canvas):
    gamma = debug_info['filter_parameters']
    cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
    cv2.putText(canvas, 'G 1/%.2f' % (1.0 / gamma), (8, 48),
                cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0)) 
Example 49
Project: exposure   Author: yuanming-hu   File: filters.py    MIT License 5 votes vote down vote up
def visualize_filter(self, debug_info, canvas):
    scaling = debug_info['filter_parameters']
    s = canvas.shape[0]
    cv2.rectangle(canvas, (int(s * 0.2), int(s * 0.4)), (int(s * 0.8), int(
        s * 0.6)), list(map(float, scaling)), cv2.FILLED) 
Example 50
Project: exposure   Author: yuanming-hu   File: filters.py    MIT License 5 votes vote down vote up
def visualize_filter(self, debug_info, canvas):
    brightness = float(debug_info['filter_parameters'][0])
    cv2.rectangle(canvas, (8, 40), (56, 52), (brightness, brightness,
                                              brightness), cv2.FILLED)