Python cv2.error() Examples

The following are 30 code examples of cv2.error(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: videos_to_tfrecords.py    From object_detection_with_tensorflow with MIT License 6 votes vote down vote up
def JpegString(image, jpeg_quality=90):
  """Returns given PIL.Image instance as jpeg string.

  Args:
    image: A PIL image.
    jpeg_quality: The image quality, on a scale from 1 (worst) to 95 (best).

  Returns:
    a jpeg_string.
  """
  # This fix to PIL makes sure that we don't get an error when saving large
  # jpeg files. This is a workaround for a bug in PIL. The value should be
  # substantially larger than the size of the image being saved.
  ImageFile.MAXBLOCK = 640 * 512 * 64

  output_jpeg = StringIO()
  image.save(output_jpeg, 'jpeg', quality=jpeg_quality, optimize=True)
  return output_jpeg.getvalue() 
Example #2
Source File: videos_to_tfrecords.py    From multilabel-image-classification-tensorflow with MIT License 6 votes vote down vote up
def JpegString(image, jpeg_quality=90):
  """Returns given PIL.Image instance as jpeg string.

  Args:
    image: A PIL image.
    jpeg_quality: The image quality, on a scale from 1 (worst) to 95 (best).

  Returns:
    a jpeg_string.
  """
  # This fix to PIL makes sure that we don't get an error when saving large
  # jpeg files. This is a workaround for a bug in PIL. The value should be
  # substantially larger than the size of the image being saved.
  ImageFile.MAXBLOCK = 640 * 512 * 64

  output_jpeg = StringIO()
  image.save(output_jpeg, 'jpeg', quality=jpeg_quality, optimize=True)
  return output_jpeg.getvalue() 
Example #3
Source File: tracking_engine.py    From DetectAndTrack with Apache License 2.0 6 votes vote down vote up
def run_farneback(frames):
    try:
        return cv2.calcOpticalFlowFarneback(
            frames[0], frames[1],
            # options, defaults
            None,  # output
            0.5,  # pyr_scale, 0.5
            10,  # levels, 3
            min(frames[0].shape[:2]) // 5,  # winsize, 15
            10,  # iterations, 3
            7,  # poly_n, 5
            1.5,  # poly_sigma, 1.2
            cv2.OPTFLOW_FARNEBACK_GAUSSIAN,  # flags, 0
        )
    except cv2.error:
        return None 
Example #4
Source File: thug.py    From thug-memes with MIT License 6 votes vote down vote up
def _draw_on_top(self, img, x, y, sub_img, sub_name=''):
        h, w, _ = sub_img.shape
        mask = sub_img[:, :, 3]
        mask_inv = cv2.bitwise_not(mask)
        sub_img_ = sub_img[:, :, :3]

        background = img[y:y + h, x:x + w]
        try:
            background = cv2.bitwise_and(background, background, mask=mask_inv)
        except cv2.error as e:
            raise ThugError(
                'Can not draw {}, please try with smaller {}.'.format(
                    sub_name, sub_name))
        foreground = cv2.bitwise_and(sub_img_, sub_img_, mask=mask)
        sum_ = cv2.add(background, foreground)

        img[y:y + h, x:x + w] = sum_ 
Example #5
Source File: thug.py    From thug-memes with MIT License 6 votes vote down vote up
def create(self, res_file, show=True):
        img = cv2.imread(self._img_path)

        for thug in self._thugs:
            if thug.eyes_available:
                try:
                    self._draw_glasses(img, thug)
                except ThugError as e:
                    logger.error(e)

                if thug.mouth_available:
                    try:
                        self._draw_cigar(img, thug)  # depends also on eyes
                    except ThugError as e:
                        logger.error(e)

        cv2.imwrite(res_file, img)
        self._img_path = res_file

        return super().create(res_file, show) 
Example #6
Source File: videos_to_tfrecords.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def JpegString(image, jpeg_quality=90):
  """Returns given PIL.Image instance as jpeg string.

  Args:
    image: A PIL image.
    jpeg_quality: The image quality, on a scale from 1 (worst) to 95 (best).

  Returns:
    a jpeg_string.
  """
  # This fix to PIL makes sure that we don't get an error when saving large
  # jpeg files. This is a workaround for a bug in PIL. The value should be
  # substantially larger than the size of the image being saved.
  ImageFile.MAXBLOCK = 640 * 512 * 64

  output_jpeg = StringIO()
  image.save(output_jpeg, 'jpeg', quality=jpeg_quality, optimize=True)
  return output_jpeg.getvalue() 
Example #7
Source File: TableRecognition.py    From OTR with GNU General Public License v3.0 6 votes vote down vote up
def does_page_have_valid_table(self, min_fract_area=.2, min_cells=50):
        """
        Analyzes whether the image contains a table by evaluating the
        coarse table outline and its children
        """
        try: # Some CV2 operations may fail e.g. if no correct supernode has been recognized
            # Check fractional area of table compared to image
            img_area = self.imgshape[0] * self.imgshape[1]
            supernode_area = cv2.contourArea(self.supernode_bbox)
            if supernode_area < img_area * min_fract_area:
                return False
            # Check minimum number of cells (ncells = degree of coarse outline node)
            ncells = self.g.degree(self.supernode_idx)
            return ncells >= min_cells
        except cv2.error:
            return False 
Example #8
Source File: ImageDataGenerator.py    From Kaggle-Avito-NN with MIT License 6 votes vote down vote up
def __data_generation(self, item_ids_temp):
        'Generates data containing batch_size samples' 
        # Initialization
        X = np.empty((len(item_ids_temp), *self.dim, self.n_channels))

        # Generate data
        for i, item_id in enumerate(item_ids_temp):
            image_id = self.image_ids[item_id]
           
            fname = f'{self.dir}/{image_id}.jpg'
            if os.path.isfile(fname):
                img = cv2.imread(fname)
                try:
                    img = cv2.resize(img, self.dim, interpolation = cv2.INTER_LINEAR)
                except cv2.error as e:
                    img = np.zeros([*self.dim, self.n_channels])
            else: 
                img = np.zeros([*self.dim, self.n_channels])

            
            X[i,] = img


        return X 
Example #9
Source File: videos_to_tfrecords.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def JpegString(image, jpeg_quality=90):
  """Returns given PIL.Image instance as jpeg string.

  Args:
    image: A PIL image.
    jpeg_quality: The image quality, on a scale from 1 (worst) to 95 (best).

  Returns:
    a jpeg_string.
  """
  # This fix to PIL makes sure that we don't get an error when saving large
  # jpeg files. This is a workaround for a bug in PIL. The value should be
  # substantially larger than the size of the image being saved.
  ImageFile.MAXBLOCK = 640 * 512 * 64

  output_jpeg = StringIO()
  image.save(output_jpeg, 'jpeg', quality=jpeg_quality, optimize=True)
  return output_jpeg.getvalue() 
Example #10
Source File: videos_to_tfrecords.py    From g-tensorflow-models with Apache License 2.0 6 votes vote down vote up
def JpegString(image, jpeg_quality=90):
  """Returns given PIL.Image instance as jpeg string.

  Args:
    image: A PIL image.
    jpeg_quality: The image quality, on a scale from 1 (worst) to 95 (best).

  Returns:
    a jpeg_string.
  """
  # This fix to PIL makes sure that we don't get an error when saving large
  # jpeg files. This is a workaround for a bug in PIL. The value should be
  # substantially larger than the size of the image being saved.
  ImageFile.MAXBLOCK = 640 * 512 * 64

  output_jpeg = StringIO()
  image.save(output_jpeg, 'jpeg', quality=jpeg_quality, optimize=True)
  return output_jpeg.getvalue() 
Example #11
Source File: videos_to_tfrecords.py    From models with Apache License 2.0 6 votes vote down vote up
def JpegString(image, jpeg_quality=90):
  """Returns given PIL.Image instance as jpeg string.

  Args:
    image: A PIL image.
    jpeg_quality: The image quality, on a scale from 1 (worst) to 95 (best).

  Returns:
    a jpeg_string.
  """
  # This fix to PIL makes sure that we don't get an error when saving large
  # jpeg files. This is a workaround for a bug in PIL. The value should be
  # substantially larger than the size of the image being saved.
  ImageFile.MAXBLOCK = 640 * 512 * 64

  output_jpeg = StringIO()
  image.save(output_jpeg, 'jpeg', quality=jpeg_quality, optimize=True)
  return output_jpeg.getvalue() 
Example #12
Source File: ImageDataGenerator.py    From Kaggle-Avito-NN with MIT License 5 votes vote down vote up
def _load_image(self, item_id):
        image_id = self.image_ids[item_id]
        try:
            fname = f'{self.dir}/{image_id}.jpg'
            img = cv2.imread(fname)
            img = cv2.resize(img, self.dim, interpolation = cv2.INTER_LINEAR)
            return img
        except cv2.error as e:
            return np.zeros([*self.dim, self.n_channels])
        except:
            return np.zeros([*self.dim, self.n_channels]) 
Example #13
Source File: videos_to_tfrecords.py    From object_detection_with_tensorflow with MIT License 5 votes vote down vote up
def ParallelPreprocessing(args):
  """Parallel preprocessing: rotation, resize and jpeg encoding to string."""
  (vid_path, timestep, num_timesteps, view) = args
  try:
    image = GetSpecificFrame(vid_path, timestep)

    # Resizing.
    resize_str = ''
    if FLAGS.resize_min_edge > 0:
      resize_str += ', resize ' + shapestring(image)
      image = cv2resizeminedge(image, FLAGS.resize_min_edge)
      resize_str += ' => ' + shapestring(image)

    # Rotating.
    rotate = None
    if FLAGS.rotate:
      rotate = FLAGS.rotate
      if FLAGS.rotate_if_matching is not None:
        rotate = None
        patt = re.compile(FLAGS.rotate_if_matching)
        if patt.match(vid_path) is not None:
          rotate = FLAGS.rotate
      if rotate is not None:
        image = cv2rotateimage(image, FLAGS.rotate)

    # Jpeg encoding.
    image = Image.fromarray(image)
    im_string = bytes_feature([JpegString(image)])

    if timestep % FLAGS.log_frequency == 0:
      tf.logging.info('Loaded frame %d / %d for %s (rotation %s%s) from %s' %
                      (timestep, num_timesteps, view, str(rotate), resize_str,
                       vid_path))
    return im_string
  except cv2.error as e:
    tf.logging.error('Error while loading frame %d of %s: %s' %
                     (timestep, vid_path, str(e)))
    return None 
Example #14
Source File: train.py    From face with MIT License 5 votes vote down vote up
def _load_recognizer(recognizer, filename=config.RECOGNIZER_OUTPUT_FILE):
    try:
        recognizer.read(filename)
        return True
    except (cv2.error):
        return False 
Example #15
Source File: videos_to_tfrecords.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def ParallelPreprocessing(args):
  """Parallel preprocessing: rotation, resize and jpeg encoding to string."""
  (vid_path, timestep, num_timesteps, view) = args
  try:
    image = GetSpecificFrame(vid_path, timestep)

    # Resizing.
    resize_str = ''
    if FLAGS.resize_min_edge > 0:
      resize_str += ', resize ' + shapestring(image)
      image = cv2resizeminedge(image, FLAGS.resize_min_edge)
      resize_str += ' => ' + shapestring(image)

    # Rotating.
    rotate = None
    if FLAGS.rotate:
      rotate = FLAGS.rotate
      if FLAGS.rotate_if_matching is not None:
        rotate = None
        patt = re.compile(FLAGS.rotate_if_matching)
        if patt.match(vid_path) is not None:
          rotate = FLAGS.rotate
      if rotate is not None:
        image = cv2rotateimage(image, FLAGS.rotate)

    # Jpeg encoding.
    image = Image.fromarray(image)
    im_string = bytes_feature([JpegString(image)])

    if timestep % FLAGS.log_frequency == 0:
      tf.logging.info('Loaded frame %d / %d for %s (rotation %s%s) from %s' %
                      (timestep, num_timesteps, view, str(rotate), resize_str,
                       vid_path))
    return im_string
  except cv2.error as e:
    tf.logging.error('Error while loading frame %d of %s: %s' %
                     (timestep, vid_path, str(e)))
    return None 
Example #16
Source File: videos_to_tfrecords.py    From models with Apache License 2.0 5 votes vote down vote up
def ParallelPreprocessing(args):
  """Parallel preprocessing: rotation, resize and jpeg encoding to string."""
  (vid_path, timestep, num_timesteps, view) = args
  try:
    image = GetSpecificFrame(vid_path, timestep)

    # Resizing.
    resize_str = ''
    if FLAGS.resize_min_edge > 0:
      resize_str += ', resize ' + shapestring(image)
      image = cv2resizeminedge(image, FLAGS.resize_min_edge)
      resize_str += ' => ' + shapestring(image)

    # Rotating.
    rotate = None
    if FLAGS.rotate:
      rotate = FLAGS.rotate
      if FLAGS.rotate_if_matching is not None:
        rotate = None
        patt = re.compile(FLAGS.rotate_if_matching)
        if patt.match(vid_path) is not None:
          rotate = FLAGS.rotate
      if rotate is not None:
        image = cv2rotateimage(image, FLAGS.rotate)

    # Jpeg encoding.
    image = Image.fromarray(image)
    im_string = bytes_feature([JpegString(image)])

    if timestep % FLAGS.log_frequency == 0:
      tf.logging.info('Loaded frame %d / %d for %s (rotation %s%s) from %s' %
                      (timestep, num_timesteps, view, str(rotate), resize_str,
                       vid_path))
    return im_string
  except cv2.error as e:
    tf.logging.error('Error while loading frame %d of %s: %s' %
                     (timestep, vid_path, str(e)))
    return None 
Example #17
Source File: toolbox.py    From findit with MIT License 5 votes vote down vote up
def turn_grey(old: np.ndarray) -> np.ndarray:
    try:
        return cv2.cvtColor(old, cv2.COLOR_RGB2GRAY)
    except cv2.error:
        return old 
Example #18
Source File: trainer_matches.py    From Yugioh-bot with MIT License 5 votes vote down vote up
def get_matches(self, train, corr):
        train_img = cv2.imread(train, 0)
        query_img = self.query
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(train_img, None)
        kp2, des2 = sift.detectAndCompute(query_img, None)
        if des1 is None or des2 is None:
            return False
        # create BFMatcher object
        bf = cv2.BFMatcher()
        try:
            matches = bf.knnMatch(des1, des2, k=2)
        except cv2.error:
            return False
        good_matches = []
        cluster = []
        for m, n in matches:
            img2_idx = m.trainIdx
            img1_idx = m.queryIdx
            (x1, y1) = kp1[img1_idx].pt
            (x2, y2) = kp2[img2_idx].pt
            # print("Comare %d to %d and %d to %d" % (x1,x2,y1,y2))
            if m.distance < 0.8 * n.distance and y2 > self.yThreshold and x2 < self.xThreshold:
                good_matches.append([m])
                cluster.append([int(x2), int(y2)])
        if len(cluster) <= corr:
            return False
        self.kmeans = KMeans(n_clusters=1, random_state=0).fit(cluster)
        new_cluster, new_matches = self.compare_distances(train_img, cluster, good_matches)
        if len(new_cluster) == 0 or len(new_cluster) / len(cluster) < .5:
            return False
        img3 = cv2.drawMatchesKnn(
            train_img, kp1, query_img, kp2, new_matches, None, flags=2)
        if self._debug:
            self.images.append(img3)
            self.debug_matcher(img3)
        return True 
Example #19
Source File: trainer_matches.py    From Yugioh-bot with MIT License 5 votes vote down vote up
def get_matches(self, train, corr):
        train_img = cv2.imread(train, 0)
        query_img = self.query
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(train_img, None)
        kp2, des2 = sift.detectAndCompute(query_img, None)
        if des1 is None or des2 is None:
            return False
        # create BFMatcher object
        bf = cv2.BFMatcher()
        try:
            matches = bf.knnMatch(des1, des2, k=2)
        except cv2.error:
            return False
        good_matches = []
        cluster = []
        for m, n in matches:
            img2_idx = m.trainIdx
            img1_idx = m.queryIdx
            (x1, y1) = kp1[img1_idx].pt
            (x2, y2) = kp2[img2_idx].pt
            # print("Comare %d to %d and %d to %d" % (x1,x2,y1,y2))
            if m.distance < 0.8 * n.distance and self.in_box(x2, y2):
                good_matches.append([m])
                cluster.append([int(x2), int(y2)])
        if len(cluster) <= corr:
            return False
        self.kmeans = KMeans(n_clusters=1, random_state=0).fit(cluster)
        new_cluster, new_matches = self.compare_distances(train_img, cluster, good_matches)
        if len(new_cluster) == 0 or len(new_cluster) / len(cluster) < .5:
            return False
        img3 = cv2.drawMatchesKnn(
            train_img, kp1, query_img, kp2, new_matches, None, flags=2)
        if self._debug:
            self.images.append(img3)
            self.debug_matcher(img3)
        return True 
Example #20
Source File: pytorch_cnn_features.py    From DetectAndTrack with Apache License 2.0 5 votes vote down vote up
def prepare_image(im):
    im = im[..., (2, 1, 0)]  # convert to rgb
    try:
        im = cv2.resize(im, (224, 224))
    except cv2.error:
        im = np.zeros((224, 224, 3))  # dummy image
        logger.warning('Invalid patch, replaced with 0 image.')
    im = im.transpose(2, 0, 1)
    mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
    std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)
    im = (im / 255.0 - mean) / std
    im = torch.FloatTensor(im).cuda()
    im = torch.autograd.Variable(im, volatile=True)
    return im 
Example #21
Source File: videos_to_tfrecords.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def ParallelPreprocessing(args):
  """Parallel preprocessing: rotation, resize and jpeg encoding to string."""
  (vid_path, timestep, num_timesteps, view) = args
  try:
    image = GetSpecificFrame(vid_path, timestep)

    # Resizing.
    resize_str = ''
    if FLAGS.resize_min_edge > 0:
      resize_str += ', resize ' + shapestring(image)
      image = cv2resizeminedge(image, FLAGS.resize_min_edge)
      resize_str += ' => ' + shapestring(image)

    # Rotating.
    rotate = None
    if FLAGS.rotate:
      rotate = FLAGS.rotate
      if FLAGS.rotate_if_matching is not None:
        rotate = None
        patt = re.compile(FLAGS.rotate_if_matching)
        if patt.match(vid_path) is not None:
          rotate = FLAGS.rotate
      if rotate is not None:
        image = cv2rotateimage(image, FLAGS.rotate)

    # Jpeg encoding.
    image = Image.fromarray(image)
    im_string = bytes_feature([JpegString(image)])

    if timestep % FLAGS.log_frequency == 0:
      tf.logging.info('Loaded frame %d / %d for %s (rotation %s%s) from %s' %
                      (timestep, num_timesteps, view, str(rotate), resize_str,
                       vid_path))
    return im_string
  except cv2.error as e:
    tf.logging.error('Error while loading frame %d of %s: %s' %
                     (timestep, vid_path, str(e)))
    return None 
Example #22
Source File: toolbox.py    From findit with MIT License 5 votes vote down vote up
def load_grey_from_cv2_object(pic_object: np.ndarray) -> np.ndarray:
    """ preparation for cv2 object (force turn it into gray) """
    pic_object = pic_object.astype(np.uint8)
    try:
        # try to turn it into grey
        grey_pic = cv2.cvtColor(pic_object, cv2.COLOR_BGR2GRAY)
    except cv2.error:
        # already grey
        return pic_object
    return grey_pic 
Example #23
Source File: toolbox.py    From stagesepx with MIT License 5 votes vote down vote up
def turn_grey(old: np.ndarray) -> np.ndarray:
    try:
        return cv2.cvtColor(old, cv2.COLOR_RGB2GRAY)
    except cv2.error:
        return old 
Example #24
Source File: app_unsynced.py    From cvcalib with Apache License 2.0 5 votes vote down vote up
def __seek_calib_limit(self, video, frame_range, max_miss_count=3, verbose=True):
        frame_range_signed_length = frame_range[1] - frame_range[0]
        sample_interval_frames = frame_range_signed_length // 2
        failed_attempts = 0
        while sample_interval_frames != 0:
            miss_count = 0
            try:
                if verbose:
                    if sample_interval_frames < 0:
                        print("\nSampling every {:d} frames within {:s}, backwards."
                              .format(-sample_interval_frames, str((frame_range[1], frame_range[0]))))
                    else:
                        print("\nSampling every {:d} frames within {:s}.".format(sample_interval_frames,
                                                                                 str(frame_range)))
                for i_frame in range(frame_range[0], frame_range[1], sample_interval_frames):
                    video.read_at_pos(i_frame)
                    if verbose:
                        print('.', end="", flush=True)
                    if video.try_approximate_corners(self.board_dims):
                        frame_range[0] = i_frame
                        miss_count = 0
                    else:
                        miss_count += 1
                        if miss_count > max_miss_count:
                            # too many frames w/o calibration board, highly unlikely those are all bad frames,
                            # go to finer scan
                            frame_range[1] = i_frame
                            break
                sample_interval_frames = round(sample_interval_frames / 2)
            except cv2.error as e:
                failed_attempts += 1
                if failed_attempts > 2:
                    raise RuntimeError("Too many failed attempts. Frame index: " + str(i_frame))
                print("FFmpeg hickup, attempting to reopen video.")
                video.reopen()  # workaround for ffmpeg AVC/H.264 bug
        return frame_range[0] 
Example #25
Source File: active_weather.py    From aggregation with Apache License 2.0 5 votes vote down vote up
def __region_mask__(self,reference_image,horizontal_grid,vertical_grid):
        """
        use the first and last horizontal/vertical grid lines to make a mask around the desired region/table
        :return:
        """
        reference_shape = reference_image.shape
        # [:2] in case we read in the image in colour format - doesn't seem necessary to throw an error
        # the first mask will be an outline of the region, sort of like #. The second mask will fill in the
        # central interior box
        mask = np.zeros(reference_shape[:2],np.uint8)
        mask2 = np.zeros(mask.shape,np.uint8)
        # draw the first and last horizontal/vertical grid lines to create a box
        cv2.drawContours(mask,horizontal_grid,0,255,-1)
        cv2.drawContours(mask,horizontal_grid,len(horizontal_grid)-2,255,-1)
        cv2.drawContours(mask,vertical_grid,0,255,-1)
        cv2.drawContours(mask,vertical_grid,len(vertical_grid)-1,255,-1)

        # find the (hopefully) one interior contour - should be our mask
        _,contours, hier = cv2.findContours(mask.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

        assert len(contours) == 1
        for c,h in zip(contours,hier[0]):
            if h[-1] == -1:
                continue

            cv2.drawContours(mask2,[c],0,255,-1)

        return mask2 
Example #26
Source File: vis_tool.py    From ya_mxdet with MIT License 5 votes vote down vote up
def show_detection_result(data, label, bboxes, cls_scores, class_name_list):
    data = data[0].as_in_context(mx.cpu(0))
    data[0] = data[0] * 0.229 + 0.485
    data[1] = data[1] * 0.224 + 0.456
    data[2] = data[2] * 0.225 + 0.406
    label = label[0].asnumpy()
    img = data.asnumpy()
    img = np.array(np.round(img * 255), dtype=np.uint8)
    img = np.transpose(img, (1, 2, 0))
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    bboxes = bboxes.asnumpy()
    cls_scores = cls_scores.asnumpy()

    # Show ground truth
    for item in label:
        cv2.rectangle(img, (int(item[0]), int(item[1])), (int(item[2]), int(item[3])), color=(255, 0, 0), thickness=2)
        cv2.putText(img, class_name_list[int(item[4])], (int(item[0]), int(item[3])),0, 0.5,(0, 255, 0))

    # NMS by class
    for cls_id in range(1, len(class_name_list)):
        cur_scores = cls_scores[:, cls_id]
        bboxes_pick = bboxes[:, cls_id * 4: (cls_id+1)*4]
        cur_scores, bboxes_pick = nms(cur_scores, bboxes_pick, cfg.rcnn_nms_thresh)
        for i in range(len(cur_scores)):
            if cur_scores[i] >= cfg.rcnn_score_thresh:
                bbox = bboxes_pick[i]
                cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color=(0, 0, 255), thickness=1)
                cv2.putText(img, "{}: {:.4}".format(class_name_list[cls_id], cur_scores[i]), (int(bbox[0]), int(bbox[3])),0, 0.5,(255, 255, 0))
    try:
        cv2.imshow("Img", img)
        cv2.waitKey(0)
    except cv2.error:
        cv2.imwrite("det_result.jpg", img)
        print("imshow() is not supported! Saved result to det_result.jpg.")
        input() 
Example #27
Source File: opencv.py    From stytra with GNU General Public License v3.0 5 votes vote down vote up
def read(self):
        """ """
        try:
            ret, frame = self.cam.read()
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        except cv2.error:
            raise cv2.error("OpenCV can't find a camera!")
        if self.bw:
            return np.mean(rgb, 2).astype(rgb.dtype)
        else:
            return rgb 
Example #28
Source File: align_facescrub.py    From InsightFace-PyTorch with Apache License 2.0 5 votes vote down vote up
def detect_face(data):
    from retinaface.detector import detector
    from utils import align_face

    src_path = data['src_path']
    dst_path = data['dst_path']
    boxB = np.array(data['boxB'])

    img = cv.imread(src_path)
    if img is not None:
        img, ratio = resize(img)
        boxB = boxB * ratio

        try:
            bboxes, landmarks = detector.detect_faces(img)

            if len(bboxes) > 0:
                i = select_face(bboxes, boxB)
                bbox, landms = bboxes[i], landmarks[i]
                img = align_face(img, [landms])
                dirname = os.path.dirname(dst_path)
                os.makedirs(dirname, exist_ok=True)
                cv.imwrite(dst_path, img)
        except ValueError as err:
            print(err)
        except cv.error as err:
            print(err)

    return True 
Example #29
Source File: app_unsynced.py    From cvcalib with Apache License 2.0 5 votes vote down vote up
def find_camera_poses(self, verbose=False):
        ix_cam = 0
        for video in self.videos:
            camera = self.cameras[ix_cam]
            if verbose:
                print("Finding camera poses for video {:s} ... (this may take awhile)".format(video.name))
            video.poses = []
            rotations, translations = calibrate_intrinsics(camera, video.image_points,
                                                           self.board_object_corner_set,
                                                           self.args.use_rational_model,
                                                           self.args.use_tangential_coeffs,
                                                           self.args.use_thin_prism,
                                                           fix_radial=True,
                                                           fix_thin_prism=True,
                                                           max_iterations=1,
                                                           use_existing_guess=True,
                                                           test=True)
            if verbose:
                print("Camera pose reprojection error for video {:s}: {:.4f}"
                      .format(video.name, camera.intrinsics.error))
            for ix_pose in range(len(rotations)):
                translation = translations[ix_pose]
                rotation = rotations[ix_pose]
                pose = Pose(rotation=rotation, translation_vector=translation)
                video.poses.append(pose)

            ix_cam += 1 
Example #30
Source File: videos_to_tfrecords.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def ParallelPreprocessing(args):
  """Parallel preprocessing: rotation, resize and jpeg encoding to string."""
  (vid_path, timestep, num_timesteps, view) = args
  try:
    image = GetSpecificFrame(vid_path, timestep)

    # Resizing.
    resize_str = ''
    if FLAGS.resize_min_edge > 0:
      resize_str += ', resize ' + shapestring(image)
      image = cv2resizeminedge(image, FLAGS.resize_min_edge)
      resize_str += ' => ' + shapestring(image)

    # Rotating.
    rotate = None
    if FLAGS.rotate:
      rotate = FLAGS.rotate
      if FLAGS.rotate_if_matching is not None:
        rotate = None
        patt = re.compile(FLAGS.rotate_if_matching)
        if patt.match(vid_path) is not None:
          rotate = FLAGS.rotate
      if rotate is not None:
        image = cv2rotateimage(image, FLAGS.rotate)

    # Jpeg encoding.
    image = Image.fromarray(image)
    im_string = bytes_feature([JpegString(image)])

    if timestep % FLAGS.log_frequency == 0:
      tf.logging.info('Loaded frame %d / %d for %s (rotation %s%s) from %s' %
                      (timestep, num_timesteps, view, str(rotate), resize_str,
                       vid_path))
    return im_string
  except cv2.error as e:
    tf.logging.error('Error while loading frame %d of %s: %s' %
                     (timestep, vid_path, str(e)))
    return None