Python cv2.IMWRITE_JPEG_QUALITY Examples

The following are 30 code examples of cv2.IMWRITE_JPEG_QUALITY(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: decode.py    From blind-watermark with MIT License 7 votes vote down vote up
def decode(ori_path, img_path, res_path, alpha):
    ori = cv2.imread(ori_path)
    img = cv2.imread(img_path)
    ori_f = np.fft.fft2(ori)
    img_f = np.fft.fft2(img)
    height, width = ori.shape[0], ori.shape[1]
    watermark = (ori_f - img_f) / alpha
    watermark = np.real(watermark)
    res = np.zeros(watermark.shape)
    random.seed(height + width)
    x = range(height / 2)
    y = range(width)
    random.shuffle(x)
    random.shuffle(y)
    for i in range(height / 2):
        for j in range(width):
            res[x[i]][y[j]] = watermark[i][j]
    cv2.imwrite(res_path, res, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) 
Example #2
Source File: test_tiny_yolo.py    From object-detection with MIT License 6 votes vote down vote up
def image_detection(sess, image_path, image_file, colors):
    # Preprocess your image
    image, image_data = preprocess_image(image_path + image_file, model_image_size = (416, 416))
    
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input:image_data, K.learning_phase():0})

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    
    # Draw bounding boxes on the image file
    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    # Save the predicted bounding box on the image
    #image.save(os.path.join("out", image_file), quality=90)
    cv2.imwrite(os.path.join("out", "tiny_yolo_" + image_file), image, [cv2.IMWRITE_JPEG_QUALITY, 90])
    
    return out_scores, out_boxes, out_classes 
Example #3
Source File: preprocess.py    From jama16-retina-replication with MIT License 6 votes vote down vote up
def resize(images_paths, size=299):
    """
    Function for resizing images.

    :param images_paths:
        Required. Paths to images.

    :param size:
        Optional. Size to which resize to. Defaults to 299.

    :return:
        Nothing.
    """
    for image_path in images_paths:
        image = cv2.imread(image_path)

        # Resize the image.
        image = cv2.resize(image, (size, size))

        # Save the image.
        cv2.imwrite(image_path, image, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) 
Example #4
Source File: inference.py    From DMPR-PS with GNU General Public License v3.0 6 votes vote down vote up
def detect_image(detector, device, args):
    """Demo for detecting images."""
    timer = Timer()
    while True:
        image_file = input('Enter image file path: ')
        image = cv.imread(image_file)
        timer.tic()
        pred_points = detect_marking_points(
            detector, image, args.thresh, device)
        slots = None
        if pred_points and args.inference_slot:
            marking_points = list(list(zip(*pred_points))[1])
            slots = inference_slots(marking_points)
        timer.toc()
        plot_points(image, pred_points)
        plot_slots(image, pred_points, slots)
        cv.imshow('demo', image)
        cv.waitKey(1)
        if args.save:
            cv.imwrite('save.jpg', image, [int(cv.IMWRITE_JPEG_QUALITY), 100]) 
Example #5
Source File: format_dataset.py    From MassImageRetrieval with Apache License 2.0 6 votes vote down vote up
def format_OxBuild():
    src_image_save_dir = "/Volumes/projects/ImageRetireval/dataset/OxBuild/src/"
    oxbuild_index_file = src_image_save_dir + "index_file.csv"
    index_writer = open(oxbuild_index_file, "w")

    src_image_dir = "/Volumes/projects/ImageRetireval/dataset/OxBuild/not_deal_src/"
    for image_name in os.listdir(src_image_dir):
        if image_name.startswith("."):
            continue
        class_name = image_name.split("_")[0]
        image_data = cv2.imread(src_image_dir + image_name)
        resize_image_data = cv2.resize(image_data, (224, 224), interpolation=cv2.INTER_CUBIC)
        new_image_save_path = src_image_save_dir + image_name
        cv2.imwrite(new_image_save_path, resize_image_data, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
        index_writer.write("{},{}\n".format(image_name, class_name))

# format_OxBuild() 
Example #6
Source File: container.py    From visual_dynamics with MIT License 6 votes vote down vote up
def add_datum(self, *inds, **datum_dict):
        other_dict = dict([item for item in datum_dict.items() if not item[0].endswith('image')])
        super(ImageDataContainer, self).add_datum(*inds, **other_dict)
        image_dict = dict([item for item in datum_dict.items() if item[0].endswith('image')])
        for image_name, image in image_dict.items():
            if image_name in self.datum_shapes_dict and self.datum_shapes_dict[image_name] != image.shape:
                raise ValueError('unable to add datum %s with shape %s since the shape %s was expected' %
                                 (image_name, image.shape, self.datum_shapes_dict[image_name]))
            self.datum_shapes_dict[image_name] = image.shape
            image_fname = self._get_image_fname(*(inds + (image_name,)))
            if image.dtype == np.uint8:
                if image.ndim == 3 and image.shape[2] == 3:
                    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            else:
                image = math_utils.pack_image(image)
            cv2.imwrite(image_fname, image, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) 
Example #7
Source File: DFLJPG.py    From DeepFaceLab with GNU General Public License v3.0 6 votes vote down vote up
def set_xseg_mask(self, mask_a):
        if mask_a is None:
            self.dfl_dict['xseg_mask'] = None
            return

        mask_a = imagelib.normalize_channels(mask_a, 1)
        img_data = np.clip( mask_a*255, 0, 255 ).astype(np.uint8)

        data_max_len = 4096

        ret, buf = cv2.imencode('.png', img_data)

        if not ret or len(buf) > data_max_len:
            for jpeg_quality in range(100,-1,-1):
                ret, buf = cv2.imencode( '.jpg', img_data, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality] )
                if ret and len(buf) <= data_max_len:
                    break

        if not ret:
            raise Exception("set_xseg_mask: unable to generate image data for set_xseg_mask")

        self.dfl_dict['xseg_mask'] = buf 
Example #8
Source File: test_yolov3.py    From object-detection with MIT License 6 votes vote down vote up
def image_detection(sess, image_path, image_file, colors):
    # Preprocess your image
    image, image_data = preprocess_image(image_path + image_file, model_image_size = (416, 416))
    
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolov3.input:image_data, K.learning_phase():0})

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    
    # Draw bounding boxes on the image file
    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    # Save the predicted bounding box on the image
    #image.save(os.path.join("out", image_file), quality=90)
    cv2.imwrite(os.path.join("out", "yolov3_" + image_file), image, [cv2.IMWRITE_JPEG_QUALITY, 90])
    
    return out_scores, out_boxes, out_classes 
Example #9
Source File: soja_resize_image.py    From soja-box with MIT License 6 votes vote down vote up
def resize_image(img_path, mini_size=480, jpeg_quality=80):
    """
    缩放image
    :param img_path: image的路径
    :param mini_size: 最小边的尺寸
    :param jpeg_quality: jpeg图片的质量
    """
    org_img = cv2.imread(img_path)
    img_w = org_img.shape[0]
    img_h = org_img.shape[1]
    if max(img_w, img_h) > mini_size:
        if img_w > img_h:
            img_w = mini_size * img_w // img_h
            img_h = mini_size
        else:
            img_h = mini_size * img_h // img_w
            img_w = mini_size
    dist_size = (img_h, img_w)
    r_image = cv2.resize(org_img, dist_size, interpolation=cv2.INTER_AREA)
    params = [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality]
    img_name = img_path + '_New.jpg'
    cv2.imwrite(img_name, r_image, params=[cv2.IMWRITE_JPEG_QUALITY, params]) 
Example #10
Source File: encode.py    From blind-watermark with MIT License 6 votes vote down vote up
def encode(img_path, wm_path, res_path, alpha):
    img = cv2.imread(img_path)
    img_f = np.fft.fft2(img)
    height, width, channel = np.shape(img)
    watermark = cv2.imread(wm_path)
    wm_height, wm_width = watermark.shape[0], watermark.shape[1]
    x, y = range(height / 2), range(width)
    random.seed(height + width)
    random.shuffle(x)
    random.shuffle(y)
    tmp = np.zeros(img.shape)
    for i in range(height / 2):
        for j in range(width):
            if x[i] < wm_height and y[j] < wm_width:
                tmp[i][j] = watermark[x[i]][y[j]]
                tmp[height - 1 - i][width - 1 - j] = tmp[i][j]
    res_f = img_f + alpha * tmp
    res = np.fft.ifft2(res_f)
    res = np.real(res)
    cv2.imwrite(res_path, res, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) 
Example #11
Source File: resize_image.py    From soja-box with MIT License 6 votes vote down vote up
def resize_image(img_path, mini_size=480, jpeg_quality=80):
    """
    缩放image
    :param img_path: image的路径
    :param mini_size: 最小边的尺寸
    :param jpeg_quality: jpeg图片的质量
    """
    org_img = cv2.imread(img_path)
    img_w = org_img.shape[0]
    img_h = org_img.shape[1]
    if max(img_w, img_h) > mini_size:
        if img_w > img_h:
            img_w = mini_size * img_w // img_h
            img_h = mini_size
        else:
            img_h = mini_size * img_h // img_w
            img_w = mini_size
    dist_size = (img_h, img_w)
    r_image = cv2.resize(org_img, dist_size, interpolation=cv2.INTER_AREA)
    params = [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality]
    img_name = img_path + '_New.jpg'
    cv2.imwrite(img_name, r_image, params=[cv2.IMWRITE_JPEG_QUALITY, params]) 
Example #12
Source File: io.py    From ethoscope with GNU General Public License v3.0 6 votes vote down vote up
def flush(self, t, img):
        """

        :param t: the time since start of the experiment, in ms
        :param img: an array representing an image.
        :type img: np.ndarray
        :return:
        """

        tick = int(round((t/1000.0)/self._period))
        if tick == self._last_tick:
            return

        cv2.imwrite(self._tmp_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50])

        with open(self._tmp_file, "rb") as f:
                bstring = f.read()
                
        cmd = 'INSERT INTO ' + self._table_name + '(id,t,img) VALUES (%s,%s,%s)'

        args = (0, int(t), bstring)

        self._last_tick = tick

        return cmd, args 
Example #13
Source File: noise.py    From dataflow with Apache License 2.0 6 votes vote down vote up
def _augment(self, img, q):
        enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1]
        return cv2.imdecode(enc, 1).astype(img.dtype) 
Example #14
Source File: pickle_provider.py    From A-Light-and-Fast-Face-Detector-for-Edge-Devices with MIT License 5 votes vote down vote up
def __init__(self,
                 pickle_file_path,
                 encode_quality=90,
                 data_adapter=None):
        ProviderBaseclass.__init__(self)

        if data_adapter:  # write data

            self.data_adapter = data_adapter
            self.data = {}
            self.counter = 0
            self.pickle_file_path = pickle_file_path

        else:  # read data

            self.data = pickle.load(open(pickle_file_path, 'rb'))
            # get positive and negative indeices
            self._positive_index = []
            self._negative_index = []
            for k, v in self.data.items():
                if v[1] == 0:  # negative
                    self._negative_index.append(k)
                else:  # positive
                    self._positive_index.append(k)

        self.compression_mode = '.jpg'
        self.encode_params = [cv2.IMWRITE_JPEG_QUALITY, encode_quality] 
Example #15
Source File: opencv.py    From faceswap with GNU General Public License v3.0 5 votes vote down vote up
def get_save_args(self):
        """ Return the save parameters for the file format """
        filetype = self.config["format"]
        args = list()
        if filetype == "jpg" and self.config["jpg_quality"] > 0:
            args = (cv2.IMWRITE_JPEG_QUALITY,  # pylint: disable=no-member
                    self.config["jpg_quality"])
        if filetype == "png" and self.config["png_compress_level"] > -1:
            args = (cv2.IMWRITE_PNG_COMPRESSION,  # pylint: disable=no-member
                    self.config["png_compress_level"])
        logger.debug(args)
        return args 
Example #16
Source File: Util.py    From DeepFaceLab with GNU General Public License v3.0 5 votes vote down vote up
def add_landmarks_debug_images(input_path):
    io.log_info ("Adding landmarks debug images...")

    for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Processing"):
        filepath = Path(filepath)

        img = cv2_imread(str(filepath))

        dflimg = DFLIMG.load (filepath)

        if dflimg is None or not dflimg.has_data():
            io.log_err (f"{filepath.name} is not a dfl image file")
            continue
        
        if img is not None:
            face_landmarks = dflimg.get_landmarks()
            face_type = FaceType.fromString ( dflimg.get_face_type() )
            
            if face_type == FaceType.MARK_ONLY:
                rect = dflimg.get_source_rect()
                LandmarksProcessor.draw_rect_landmarks(img, rect, face_landmarks, FaceType.FULL )
            else:
                LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True )
            
            
            
            output_file = '{}{}'.format( str(Path(str(input_path)) / filepath.stem),  '_debug.jpg')
            cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50] ) 
Example #17
Source File: trans.py    From ocr.pytorch with MIT License 5 votes vote down vote up
def tranfun(self, image):
        img = trans_utils.getcvimage(image)
        param = [int(cv2.IMWRITE_JPEG_QUALITY), random.randint(self.lower, self.upper)]
        img_encode = cv2.imencode('.jpeg', img, param)
        img_decode = cv2.imdecode(img_encode[1], cv2.IMREAD_COLOR)
        pil_img = trans_utils.cv2pil(img_decode)
        if len(image.split())==1:
            pil_img = pil_img.convert('L')
        return pil_img 
Example #18
Source File: prepare_dataset.py    From DMPR-PS with GNU General Public License v3.0 5 votes vote down vote up
def write_image_and_label(name, image, centralied_marks, name_list):
    """Write image and label with given name."""
    name_list.append(os.path.basename(name))
    print("Processing NO.%d samples: %s..." % (len(name_list), name_list[-1]))
    image = cv.resize(image, (512, 512))
    cv.imwrite(name + '.jpg', image, [int(cv.IMWRITE_JPEG_QUALITY), 100])
    with open(name + '.json', 'w') as file:
        json.dump(generalize_marks(centralied_marks), file) 
Example #19
Source File: pickle_provider.py    From A-Light-and-Fast-Face-Detector-for-Edge-Devices with MIT License 5 votes vote down vote up
def __init__(self,
                 pickle_file_path,
                 encode_quality=90,
                 data_adapter=None):
        ProviderBaseclass.__init__(self)

        if data_adapter:  # write data

            self.data_adapter = data_adapter
            self.data = {}
            self.counter = 0
            self.pickle_file_path = pickle_file_path

        else:  # read data

            self.data = pickle.load(open(pickle_file_path, 'rb'))
            # get positive and negative indeices
            self._positive_index = []
            self._negative_index = []
            for k, v in self.data.items():
                if v[1] == 0:  # negative
                    self._negative_index.append(k)
                else:  # positive
                    self._positive_index.append(k)

        self.compression_mode = '.jpg'
        self.encode_params = [cv2.IMWRITE_JPEG_QUALITY, encode_quality] 
Example #20
Source File: pickle_provider.py    From lffd-pytorch with MIT License 5 votes vote down vote up
def __init__(self,
                 pickle_file_path,
                 encode_quality=90,
                 data_adapter=None):
        ProviderBaseclass.__init__(self)

        if data_adapter:  # write data

            self.data_adapter = data_adapter
            self.data = {}
            self.counter = 0
            self.pickle_file_path = pickle_file_path

        else:  # read data

            self.data = pickle.load(open(pickle_file_path, 'rb'))
            # get positive and negative indeices
            self._positive_index = []
            self._negative_index = []
            for k, v in self.data.items():
                if v[1] == 0:  # negative
                    self._negative_index.append(k)
                else:  # positive
                    self._positive_index.append(k)

        self.compression_mode = '.jpg'
        self.encode_params = [cv2.IMWRITE_JPEG_QUALITY, encode_quality] 
Example #21
Source File: evaluate.py    From face-parsing.PyTorch with MIT License 5 votes vote down vote up
def vis_parsing_maps(im, parsing_anno, stride, save_im=False, save_path='vis_results/parsing_map_on_im.jpg'):
    # Colors for all 20 parts
    part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],
                   [255, 0, 85], [255, 0, 170],
                   [0, 255, 0], [85, 255, 0], [170, 255, 0],
                   [0, 255, 85], [0, 255, 170],
                   [0, 0, 255], [85, 0, 255], [170, 0, 255],
                   [0, 85, 255], [0, 170, 255],
                   [255, 255, 0], [255, 255, 85], [255, 255, 170],
                   [255, 0, 255], [255, 85, 255], [255, 170, 255],
                   [0, 255, 255], [85, 255, 255], [170, 255, 255]]

    im = np.array(im)
    vis_im = im.copy().astype(np.uint8)
    vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
    vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
    vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255

    num_of_class = np.max(vis_parsing_anno)

    for pi in range(1, num_of_class + 1):
        index = np.where(vis_parsing_anno == pi)
        vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]

    vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
    # print(vis_parsing_anno_color.shape, vis_im.shape)
    vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)

    # Save result or not
    if save_im:
        cv2.imwrite(save_path, vis_im, [int(cv2.IMWRITE_JPEG_QUALITY), 100])

    # return vis_im 
Example #22
Source File: test.py    From face-parsing.PyTorch with MIT License 5 votes vote down vote up
def vis_parsing_maps(im, parsing_anno, stride, save_im=False, save_path='vis_results/parsing_map_on_im.jpg'):
    # Colors for all 20 parts
    part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],
                   [255, 0, 85], [255, 0, 170],
                   [0, 255, 0], [85, 255, 0], [170, 255, 0],
                   [0, 255, 85], [0, 255, 170],
                   [0, 0, 255], [85, 0, 255], [170, 0, 255],
                   [0, 85, 255], [0, 170, 255],
                   [255, 255, 0], [255, 255, 85], [255, 255, 170],
                   [255, 0, 255], [255, 85, 255], [255, 170, 255],
                   [0, 255, 255], [85, 255, 255], [170, 255, 255]]

    im = np.array(im)
    vis_im = im.copy().astype(np.uint8)
    vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
    vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
    vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255

    num_of_class = np.max(vis_parsing_anno)

    for pi in range(1, num_of_class + 1):
        index = np.where(vis_parsing_anno == pi)
        vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]

    vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
    # print(vis_parsing_anno_color.shape, vis_im.shape)
    vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)

    # Save result or not
    if save_im:
        cv2.imwrite(save_path[:-4] +'.png', vis_parsing_anno)
        cv2.imwrite(save_path, vis_im, [int(cv2.IMWRITE_JPEG_QUALITY), 100])

    # return vis_im 
Example #23
Source File: noise.py    From ADL with MIT License 5 votes vote down vote up
def _augment(self, img, q):
        enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1]
        return cv2.imdecode(enc, 1).astype(img.dtype) 
Example #24
Source File: collector.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def _rgbdCb(self, rgb_msg, depth_msg):
        if rgb_msg is None:
            rospy.logwarn("_rgbdCb: rgb_msg is None !!!!!!!!!")
        try:
            # max out at 10 hz assuming 30hz data source
            # TODO(ahundt) make mod value configurable
            if rgb_msg.header.seq % 3 == 0:
                cv_image = self._bridge.imgmsg_to_cv2(rgb_msg, "rgb8")

                # decode the data, this will take some time

                rospy.loginfo('rgb color cv_image shape: ' + str(cv_image.shape) + ' depth sequence number: ' + str(msg.header.seq))
                # print('rgb color cv_image shape: ' + str(cv_image.shape))
                cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
                # encode the jpeg with high quality
                encode_params = [cv2.IMWRITE_JPEG_QUALITY, 99]
                rgb_img = cv2.imencode('.jpg', cv_image, encode_params)[1].tobytes()
                # rgb_img = GetJpeg(np.asarray(cv_image))

                cv_depth_image = self._bridge.imgmsg_to_cv2(depth_msg, desired_encoding="passthrough")
                depth_encoded_as_rgb_numpy = encode_depth_numpy(cv_depth_image)
                bytevalues = cv2.imencode('.png', depth_encoded_as_rgb_numpy)[1].tobytes()

                with self.mutex:
                    self.rgb_time = msg.header.stamp
                    self.rgb_img = rgb_img
                    # self.depth_info = depth_camera_info
                    # self.rgb_info = rgb_camera_info
                    self.depth_img_time = msg.header.stamp
                    # self.depth_img = np_image
                    # self.depth_img = img_str
                    self.depth_img = bytevalues
            #print(self.rgb_img)
        except CvBridgeError as e:
            rospy.logwarn(str(e)) 
Example #25
Source File: noise.py    From petridishnn with MIT License 5 votes vote down vote up
def _augment(self, img, q):
        enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1]
        return cv2.imdecode(enc, 1).astype(img.dtype) 
Example #26
Source File: augmentation_utils.py    From GIFT with Apache License 2.0 5 votes vote down vote up
def jpeg_compress(img,quality_low=15,quality_high=75):
    quality=np.random.randint(quality_low,quality_high)
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
    result, encimg = cv2.imencode('.jpg', img, encode_param)
    img=cv2.imdecode(encimg,1)
    return img 
Example #27
Source File: recordio.py    From mxnet-lambda with Apache License 2.0 5 votes vote down vote up
def pack_img(header, img, quality=95, img_fmt='.jpg'):
    """Pack an image into ``MXImageRecord``.

    Parameters
    ----------
    header : IRHeader
        Header of the image record.
        ``header.label`` can be a number or an array. See more detail in ``IRHeader``.
    img : numpy.ndarray
        Image to be packed.
    quality : int
        Quality for JPEG encoding in range 1-100, or compression for PNG encoding in range 1-9.
    img_fmt : str
        Encoding of the image (.jpg for JPEG, .png for PNG).

    Returns
    -------
    s : str
        The packed string.

    Examples
    --------
    >>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]
    >>> id = 2574
    >>> header = mx.recordio.IRHeader(0, label, id, 0)
    >>> img = cv2.imread('test.jpg')
    >>> packed_s = mx.recordio.pack_img(header, img)
    """
    assert cv2 is not None
    jpg_formats = ['.JPG', '.JPEG']
    png_formats = ['.PNG']
    encode_params = None
    if img_fmt.upper() in jpg_formats:
        encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]
    elif img_fmt.upper() in png_formats:
        encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality]

    ret, buf = cv2.imencode(img_fmt, img, encode_params)
    assert ret, 'failed to encode image'
    return pack(header, buf.tostring()) 
Example #28
Source File: save_image.py    From detectron2-pipeline with MIT License 5 votes vote down vote up
def map(self, data):
        image = data[self.src]
        image_id = data["image_id"]

        # Prepare output for image based on image_id
        output = image_id.split(os.path.sep)
        dirname = output[:-1]
        if len(dirname) > 0:
            dirname = os.path.join(*dirname)
            dirname = os.path.join(self.path, dirname)
            os.makedirs(dirname, exist_ok=True)
        else:
            dirname = self.path
        filename = f"{output[-1].rsplit('.', 1)[0]}.{self.image_ext}"
        path = os.path.join(dirname, filename)

        if self.image_ext == "jpg":
            cv2.imwrite(path, image,
                        (cv2.IMWRITE_JPEG_QUALITY, self.jpg_quality) if self.jpg_quality else None)
        elif self.image_ext == "png":
            cv2.imwrite(path, image,
                        (cv2.IMWRITE_PNG_COMPRESSION, self.png_compression) if self.png_compression else None)
        else:
            raise Exception("Unsupported image format")

        return data 
Example #29
Source File: control_thread.py    From ethoscope with GNU General Public License v3.0 5 votes vote down vote up
def _update_info(self):
        if self._monit is None:
            return
        t = self._monit.last_time_stamp

        frame_idx = self._monit.last_frame_idx
        wall_time = time.time()
        dt = wall_time - self._last_info_t_stamp
        df = float(frame_idx - self._last_info_frame_idx)

        if self._last_info_t_stamp == 0 or dt > 0:
            f = round(df/dt, 2)
        else:
            f="NaN"



        if t is not None:# and p is not None:
            self._info["monitor_info"] = {
                            # "last_positions":pos,
                            "last_time_stamp":t,
                            "fps": f
                            }

        frame = self._drawer.last_drawn_frame
        if frame is not None:
            cv2.imwrite(self._info["last_drawn_img"], frame, [int(cv2.IMWRITE_JPEG_QUALITY), 50])


        self._last_info_t_stamp = wall_time
        self._last_info_frame_idx = frame_idx 
Example #30
Source File: pickle_provider.py    From lffd-pytorch with MIT License 5 votes vote down vote up
def __init__(self,
                 pickle_file_path,
                 encode_quality=90,
                 data_adapter=None):
        ProviderBaseclass.__init__(self)

        if data_adapter:  # write data

            self.data_adapter = data_adapter
            self.data = {}
            self.counter = 0
            self.pickle_file_path = pickle_file_path

        else:  # read data

            self.data = pickle.load(open(pickle_file_path, 'rb'))
            # get positive and negative indeices
            self._positive_index = []
            self._negative_index = []
            for k, v in self.data.items():
                if v[1] == 0:  # negative
                    self._negative_index.append(k)
                else:  # positive
                    self._positive_index.append(k)

        self.compression_mode = '.jpg'
        self.encode_params = [cv2.IMWRITE_JPEG_QUALITY, encode_quality]