Python cv2.groupRectangles() Examples

The following are 5 code examples of cv2.groupRectangles(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: facerec_train.py    From deepvisualminer with MIT License 6 votes vote down vote up
def detect(img_file, detector_xml_path, dest_img_file):
    img = cv2.imread(img_file)
    
    gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    
    detector = cv2.CascadeClassifier(detector_xml_path)
    
    min_size = (min(50, gray_img.shape[0] // 10), min(50, gray_img.shape[1] // 10))
    hits = detector.detectMultiScale(gray_img, 1.1, 4, 0, min_size)
    #cv2.groupRectangles(hits, 2)
    print(hits)
    
    hits_img = np.copy(img)
    for (x,y,w,h) in hits:
        cv2.rectangle(hits_img, (x,y), (x+w, y+h), (0,0,255), 2)
    cv2.imwrite(dest_img_file, hits_img) 
Example #2
Source File: proposal_helpers.py    From raster-deep-learning with Apache License 2.0 5 votes vote down vote up
def filterRois(rects, img_w, img_h, roi_min_area, roi_max_area, roi_min_side, roi_max_side, roi_max_aspect_ratio):
    filteredRects = []
    filteredRectsSet = set()
    for rect in rects:
        if tuple(rect) in filteredRectsSet: # excluding rectangles with same co-ordinates
            continue

        x, y, x2, y2 = rect
        w = x2 - x
        h = y2 - y
        assert(w>=0 and h>=0)

        # apply filters
        if h == 0 or w == 0 or \
           x2 > img_w or y2 > img_h or \
           w < roi_min_side or h < roi_min_side or \
           w > roi_max_side or h > roi_max_side or \
           w * h < roi_min_area or w * h > roi_max_area or \
           w / h > roi_max_aspect_ratio or h / w > roi_max_aspect_ratio:
               continue
        filteredRects.append(rect)
        filteredRectsSet.add(tuple(rect))

    # could combine rectangles using non-maximum surpression or with similar co-ordinates
    # groupedRectangles, weights = cv2.groupRectangles(np.asanyarray(rectsInput, np.float).tolist(), 1, 0.3)
    # groupedRectangles = nms_python(np.asarray(rectsInput, np.float), 0.5)
    assert(len(filteredRects) > 0)
    return filteredRects 
Example #3
Source File: facerec_train.py    From deepvisualminer with MIT License 5 votes vote down vote up
def detectvideo(vid_file, detector_xml_path, dest_img_dir):
    
    if not os.path.exists(dest_img_dir):
        os.makedirs(dest_img_dir)

    detector = cv2.CascadeClassifier(detector_xml_path)
    
    vid = imageio.get_reader(vid_file, 'ffmpeg')
    # If size and source_size are not equal, then device was probably
    # rotated (like a mobile) and we should compensate for the rotation.
    # Images will have 'source_size' dimensions but we need 'size'.
    metadata = vid.get_meta_data()
    rotate = False
    if metadata['source_size'] != metadata['size']:
        print('Rotating')
        rotate = True
    
    for i, img in enumerate(vid):
        if rotate:
            #img = np.transpose(img, axes=(1, 0, 2)).copy()
            img = np.rot90(img).copy()
            
        print('Frame ',i, img.shape)
        
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        
        min_size = (min(20, gray_img.shape[0] // 10), min(20, gray_img.shape[1] // 10))
        hits = detector.detectMultiScale(gray_img, 1.1, 3, 0, min_size)
        #cv2.groupRectangles(hits, 2)
        print(len(hits), ' hits')

        hits_img = np.copy(img)
        
        if len(hits) > 0:
            for (x,y,w,h) in hits:
                cv2.rectangle(hits_img, (x,y), (x+w, y+h), (0,0,255), 2)

        cv2.imwrite(os.path.join(dest_img_dir, 'frame-%d.png'%(i)), hits_img) 
Example #4
Source File: read_image.py    From image_text_reader with MIT License 5 votes vote down vote up
def get_text_with_location(boxed_image, contours, img):
    image_text_dict = {}
    for contour in contours:
        # get rectangle bounding contour
        [x, y, w, h] = cv2.boundingRect(contour)
        # cv2.groupRectangles

        # draw rectangle around contour on original image

        # if w < 20 or h < 20:
        # 	continue
        if w > 300:
            continue

        cv2.rectangle(
            boxed_image, (x, y), (x + w + 10, y + h + 10),
            thickness=2,
            color=(0, 123, 123))
        """This writes the bounding box on image.
		"""

        box_read = extract_image_from_location(img, x, y, w, h)
        box_read = box_read.strip()
        image_text_dict[(x, y)] = box_read

    return image_text_dict 
Example #5
Source File: tracking.py    From OpenCV-Computer-Vision-Projects-with-Python with MIT License 4 votes vote down vote up
def advance_frame(self, frame, proto_objects_map):
        """Advances the algorithm by a single frame

            This method tracks all objects via the following steps:
             - adds all bounding boxes from saliency map as potential
               targets
             - finds bounding boxes from previous frame in current frame
               via mean-shift tracking
             - combines the two lists by removing duplicates

            certain targets are discarded:
             - targets that are too small
             - targets that don't move

            :param frame: New input RGB frame
            :param proto_objects_map: corresponding proto-objects map of the
                                      frame
            :returns: frame annotated with bounding boxes around all objects
                      that are being tracked
        """
        self.tracker = copy.deepcopy(frame)

        # build a list of all bounding boxes
        box_all = []

        # append to the list all bounding boxes found from the
        # current proto-objects map
        box_all = self._append_boxes_from_saliency(proto_objects_map, box_all)

        # find all bounding boxes extrapolated from last frame
        # via mean-shift tracking
        box_all = self._append_boxes_from_meanshift(frame, box_all)

        # only keep those that are both salient and in mean shift
        if len(self.object_roi) == 0:
            group_thresh = 0  # no previous frame: keep all form saliency
        else:
            group_thresh = 1  # previous frame + saliency
        box_grouped, _ = cv2.groupRectangles(box_all, group_thresh, 0.1)

        # update mean-shift bookkeeping for remaining boxes
        self._update_mean_shift_bookkeeping(frame, box_grouped)

        # draw remaining boxes
        for (x, y, w, h) in box_grouped:
            cv2.rectangle(self.tracker, (x, y), (x + w, y + h),
                          (0, 255, 0), 2)

        return self.tracker