Python cv2.IMWRITE_JPEG_QUALITY Examples

The following are 30 code examples for showing how to use cv2.IMWRITE_JPEG_QUALITY(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module cv2 , or try the search function .

Example 1
Project: blind-watermark   Author: linyacool   File: decode.py    License: MIT License 7 votes vote down vote up
def decode(ori_path, img_path, res_path, alpha):
    ori = cv2.imread(ori_path)
    img = cv2.imread(img_path)
    ori_f = np.fft.fft2(ori)
    img_f = np.fft.fft2(img)
    height, width = ori.shape[0], ori.shape[1]
    watermark = (ori_f - img_f) / alpha
    watermark = np.real(watermark)
    res = np.zeros(watermark.shape)
    random.seed(height + width)
    x = range(height / 2)
    y = range(width)
    random.shuffle(x)
    random.shuffle(y)
    for i in range(height / 2):
        for j in range(width):
            res[x[i]][y[j]] = watermark[i][j]
    cv2.imwrite(res_path, res, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) 
Example 2
Project: blind-watermark   Author: linyacool   File: encode.py    License: MIT License 6 votes vote down vote up
def encode(img_path, wm_path, res_path, alpha):
    img = cv2.imread(img_path)
    img_f = np.fft.fft2(img)
    height, width, channel = np.shape(img)
    watermark = cv2.imread(wm_path)
    wm_height, wm_width = watermark.shape[0], watermark.shape[1]
    x, y = range(height / 2), range(width)
    random.seed(height + width)
    random.shuffle(x)
    random.shuffle(y)
    tmp = np.zeros(img.shape)
    for i in range(height / 2):
        for j in range(width):
            if x[i] < wm_height and y[j] < wm_width:
                tmp[i][j] = watermark[x[i]][y[j]]
                tmp[height - 1 - i][width - 1 - j] = tmp[i][j]
    res_f = img_f + alpha * tmp
    res = np.fft.ifft2(res_f)
    res = np.real(res)
    cv2.imwrite(res_path, res, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) 
Example 3
Project: object-detection   Author: kaka-lin   File: test_tiny_yolo.py    License: MIT License 6 votes vote down vote up
def image_detection(sess, image_path, image_file, colors):
    # Preprocess your image
    image, image_data = preprocess_image(image_path + image_file, model_image_size = (416, 416))
    
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input:image_data, K.learning_phase():0})

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    
    # Draw bounding boxes on the image file
    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    # Save the predicted bounding box on the image
    #image.save(os.path.join("out", image_file), quality=90)
    cv2.imwrite(os.path.join("out", "tiny_yolo_" + image_file), image, [cv2.IMWRITE_JPEG_QUALITY, 90])
    
    return out_scores, out_boxes, out_classes 
Example 4
Project: object-detection   Author: kaka-lin   File: test_yolov3.py    License: MIT License 6 votes vote down vote up
def image_detection(sess, image_path, image_file, colors):
    # Preprocess your image
    image, image_data = preprocess_image(image_path + image_file, model_image_size = (416, 416))
    
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolov3.input:image_data, K.learning_phase():0})

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    
    # Draw bounding boxes on the image file
    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    # Save the predicted bounding box on the image
    #image.save(os.path.join("out", image_file), quality=90)
    cv2.imwrite(os.path.join("out", "yolov3_" + image_file), image, [cv2.IMWRITE_JPEG_QUALITY, 90])
    
    return out_scores, out_boxes, out_classes 
Example 5
Project: MassImageRetrieval   Author: liuguiyangnwpu   File: format_dataset.py    License: Apache License 2.0 6 votes vote down vote up
def format_OxBuild():
    src_image_save_dir = "/Volumes/projects/ImageRetireval/dataset/OxBuild/src/"
    oxbuild_index_file = src_image_save_dir + "index_file.csv"
    index_writer = open(oxbuild_index_file, "w")

    src_image_dir = "/Volumes/projects/ImageRetireval/dataset/OxBuild/not_deal_src/"
    for image_name in os.listdir(src_image_dir):
        if image_name.startswith("."):
            continue
        class_name = image_name.split("_")[0]
        image_data = cv2.imread(src_image_dir + image_name)
        resize_image_data = cv2.resize(image_data, (224, 224), interpolation=cv2.INTER_CUBIC)
        new_image_save_path = src_image_save_dir + image_name
        cv2.imwrite(new_image_save_path, resize_image_data, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
        index_writer.write("{},{}\n".format(image_name, class_name))

# format_OxBuild() 
Example 6
Project: jama16-retina-replication   Author: mikevoets   File: preprocess.py    License: MIT License 6 votes vote down vote up
def resize(images_paths, size=299):
    """
    Function for resizing images.

    :param images_paths:
        Required. Paths to images.

    :param size:
        Optional. Size to which resize to. Defaults to 299.

    :return:
        Nothing.
    """
    for image_path in images_paths:
        image = cv2.imread(image_path)

        # Resize the image.
        image = cv2.resize(image, (size, size))

        # Save the image.
        cv2.imwrite(image_path, image, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) 
Example 7
Project: visual_dynamics   Author: alexlee-gk   File: container.py    License: MIT License 6 votes vote down vote up
def add_datum(self, *inds, **datum_dict):
        other_dict = dict([item for item in datum_dict.items() if not item[0].endswith('image')])
        super(ImageDataContainer, self).add_datum(*inds, **other_dict)
        image_dict = dict([item for item in datum_dict.items() if item[0].endswith('image')])
        for image_name, image in image_dict.items():
            if image_name in self.datum_shapes_dict and self.datum_shapes_dict[image_name] != image.shape:
                raise ValueError('unable to add datum %s with shape %s since the shape %s was expected' %
                                 (image_name, image.shape, self.datum_shapes_dict[image_name]))
            self.datum_shapes_dict[image_name] = image.shape
            image_fname = self._get_image_fname(*(inds + (image_name,)))
            if image.dtype == np.uint8:
                if image.ndim == 3 and image.shape[2] == 3:
                    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            else:
                image = math_utils.pack_image(image)
            cv2.imwrite(image_fname, image, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) 
Example 8
Project: ethoscope   Author: gilestrolab   File: io.py    License: GNU General Public License v3.0 6 votes vote down vote up
def flush(self, t, img):
        """

        :param t: the time since start of the experiment, in ms
        :param img: an array representing an image.
        :type img: np.ndarray
        :return:
        """

        tick = int(round((t/1000.0)/self._period))
        if tick == self._last_tick:
            return

        cv2.imwrite(self._tmp_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50])

        with open(self._tmp_file, "rb") as f:
                bstring = f.read()
                
        cmd = 'INSERT INTO ' + self._table_name + '(id,t,img) VALUES (%s,%s,%s)'

        args = (0, int(t), bstring)

        self._last_tick = tick

        return cmd, args 
Example 9
Project: DMPR-PS   Author: Teoge   File: inference.py    License: GNU General Public License v3.0 6 votes vote down vote up
def detect_image(detector, device, args):
    """Demo for detecting images."""
    timer = Timer()
    while True:
        image_file = input('Enter image file path: ')
        image = cv.imread(image_file)
        timer.tic()
        pred_points = detect_marking_points(
            detector, image, args.thresh, device)
        slots = None
        if pred_points and args.inference_slot:
            marking_points = list(list(zip(*pred_points))[1])
            slots = inference_slots(marking_points)
        timer.toc()
        plot_points(image, pred_points)
        plot_slots(image, pred_points, slots)
        cv.imshow('demo', image)
        cv.waitKey(1)
        if args.save:
            cv.imwrite('save.jpg', image, [int(cv.IMWRITE_JPEG_QUALITY), 100]) 
Example 10
Project: soja-box   Author: itaa   File: soja_resize_image.py    License: MIT License 6 votes vote down vote up
def resize_image(img_path, mini_size=480, jpeg_quality=80):
    """
    缩放image
    :param img_path: image的路径
    :param mini_size: 最小边的尺寸
    :param jpeg_quality: jpeg图片的质量
    """
    org_img = cv2.imread(img_path)
    img_w = org_img.shape[0]
    img_h = org_img.shape[1]
    if max(img_w, img_h) > mini_size:
        if img_w > img_h:
            img_w = mini_size * img_w // img_h
            img_h = mini_size
        else:
            img_h = mini_size * img_h // img_w
            img_w = mini_size
    dist_size = (img_h, img_w)
    r_image = cv2.resize(org_img, dist_size, interpolation=cv2.INTER_AREA)
    params = [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality]
    img_name = img_path + '_New.jpg'
    cv2.imwrite(img_name, r_image, params=[cv2.IMWRITE_JPEG_QUALITY, params]) 
Example 11
Project: soja-box   Author: itaa   File: resize_image.py    License: MIT License 6 votes vote down vote up
def resize_image(img_path, mini_size=480, jpeg_quality=80):
    """
    缩放image
    :param img_path: image的路径
    :param mini_size: 最小边的尺寸
    :param jpeg_quality: jpeg图片的质量
    """
    org_img = cv2.imread(img_path)
    img_w = org_img.shape[0]
    img_h = org_img.shape[1]
    if max(img_w, img_h) > mini_size:
        if img_w > img_h:
            img_w = mini_size * img_w // img_h
            img_h = mini_size
        else:
            img_h = mini_size * img_h // img_w
            img_w = mini_size
    dist_size = (img_h, img_w)
    r_image = cv2.resize(org_img, dist_size, interpolation=cv2.INTER_AREA)
    params = [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality]
    img_name = img_path + '_New.jpg'
    cv2.imwrite(img_name, r_image, params=[cv2.IMWRITE_JPEG_QUALITY, params]) 
Example 12
Project: DeepFaceLab   Author: iperov   File: DFLJPG.py    License: GNU General Public License v3.0 6 votes vote down vote up
def set_xseg_mask(self, mask_a):
        if mask_a is None:
            self.dfl_dict['xseg_mask'] = None
            return

        mask_a = imagelib.normalize_channels(mask_a, 1)
        img_data = np.clip( mask_a*255, 0, 255 ).astype(np.uint8)

        data_max_len = 4096

        ret, buf = cv2.imencode('.png', img_data)

        if not ret or len(buf) > data_max_len:
            for jpeg_quality in range(100,-1,-1):
                ret, buf = cv2.imencode( '.jpg', img_data, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality] )
                if ret and len(buf) <= data_max_len:
                    break

        if not ret:
            raise Exception("set_xseg_mask: unable to generate image data for set_xseg_mask")

        self.dfl_dict['xseg_mask'] = buf 
Example 13
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: recordio.py    License: Apache License 2.0 5 votes vote down vote up
def pack_img(header, img, quality=95, img_fmt='.jpg'):
    """Pack an image into ``MXImageRecord``.

    Parameters
    ----------
    header : IRHeader
        Header of the image record.
        ``header.label`` can be a number or an array. See more detail in ``IRHeader``.
    img : numpy.ndarray
        Image to be packed.
    quality : int
        Quality for JPEG encoding in range 1-100, or compression for PNG encoding in range 1-9.
    img_fmt : str
        Encoding of the image (.jpg for JPEG, .png for PNG).

    Returns
    -------
    s : str
        The packed string.

    Examples
    --------
    >>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3]
    >>> id = 2574
    >>> header = mx.recordio.IRHeader(0, label, id, 0)
    >>> img = cv2.imread('test.jpg')
    >>> packed_s = mx.recordio.pack_img(header, img)
    """
    assert cv2 is not None
    jpg_formats = ['.JPG', '.JPEG']
    png_formats = ['.PNG']
    encode_params = None
    if img_fmt.upper() in jpg_formats:
        encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]
    elif img_fmt.upper() in png_formats:
        encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality]

    ret, buf = cv2.imencode(img_fmt, img, encode_params)
    assert ret, 'failed to encode image'
    return pack(header, buf.tostring()) 
Example 14
Project: dataflow   Author: tensorpack   File: noise.py    License: Apache License 2.0 5 votes vote down vote up
def _augment(self, img, q):
        enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1]
        return cv2.imdecode(enc, 1).astype(img.dtype) 
Example 15
Project: DPC   Author: TengdaHan   File: extract_frame.py    License: MIT License 5 votes vote down vote up
def extract_video_opencv(v_path, f_root, dim=240):
    '''v_path: single video path;
       f_root: root to store frames'''
    v_class = v_path.split('/')[-2]
    v_name = os.path.basename(v_path)[0:-4]
    out_dir = os.path.join(f_root, v_class, v_name)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    vidcap = cv2.VideoCapture(v_path)
    nb_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
    width = vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)   # float
    height = vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
    if (width == 0) or (height==0): 
        print(v_path, 'not successfully loaded, drop ..'); return
    new_dim = resize_dim(width, height, dim)

    success, image = vidcap.read()
    count = 1
    while success:
        image = cv2.resize(image, new_dim, interpolation = cv2.INTER_LINEAR)
        cv2.imwrite(os.path.join(out_dir, 'image_%05d.jpg' % count), image,
                    [cv2.IMWRITE_JPEG_QUALITY, 80])# quality from 0-100, 95 is default, high is good
        success, image = vidcap.read()
        count += 1
    if nb_frames > count:
        print('/'.join(out_dir.split('/')[-2::]), 'NOT extracted successfully: %df/%df' % (count, nb_frames))
    vidcap.release() 
Example 16
Project: derplearning   Author: notkarol   File: util.py    License: MIT License 5 votes vote down vote up
def encode_jpg(image, quality):
    return cv2.imencode(".jpg", image, [cv2.IMWRITE_JPEG_QUALITY, quality])[1].tostring() 
Example 17
Project: vidgear   Author: abhiTronix   File: webgear.py    License: Apache License 2.0 5 votes vote down vote up
def __producer(self):
        """
        A asynchronous frame producer/generator for WebGear application.
        """
        # loop over frames
        while self.__isrunning:
            # read frame
            frame = self.stream.read()
            # break if NoneType
            if frame is None:
                break
            # reducer frames size if specified
            if self.__frame_size_reduction:
                frame = await reducer(frame, percentage=self.__frame_size_reduction)
            # handle JPEG encoding
            encodedImage = cv2.imencode(
                ".jpg",
                frame,
                [
                    cv2.IMWRITE_JPEG_QUALITY,
                    self.__jpeg_quality,
                    cv2.IMWRITE_JPEG_PROGRESSIVE,
                    self.__jpeg_progressive,
                    cv2.IMWRITE_JPEG_OPTIMIZE,
                    self.__jpeg_optimize,
                ],
            )[1].tobytes()
            # yield frame in byte format
            yield (
                b"--frame\r\nContent-Type:image/jpeg\r\n\r\n" + encodedImage + b"\r\n"
            )
            await asyncio.sleep(0.01) 
Example 18
Project: rpitelecine   Author: Alexamder   File: tc-run.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def parse_commandline():
    # Command line arguments
    global job_name, start_frame, end_frame, frames_count
    global current_frame, capture_direction, capture_ext, reverse, brackets
    parser = argparse.ArgumentParser()
    parser.add_argument('jobname', help='Name of the telecine job')
    parser.add_argument('-s','--start', type=int, help='Start frame number')
    parser.add_argument('-e','--end', type=int, help='End frame number')
    parser.add_argument('-j','--jpeg', help='Save Jpeg images',	action='store_true')
    parser.add_argument('-r','--reverse', help='Run backwards', action='store_true')
    parser.add_argument('-b','--brackets', help='Bracket exposures', action='store_true')

    args = parser.parse_args()
    
    job_name = sanitise_job_name(args.jobname)

    start_frame = args.start if args.start else 0
    end_frame = args.end if args.end else 0
    frames_count = abs(end_frame - start_frame)+1
    if frames_count==0:
	print('Job needs to know how many frames')
	quit()
    capture_direction = 1 if end_frame > start_frame else -1
    current_frame = start_frame
    if args.jpeg:
	print('Saving as jpeg')
	capture_ext = 'jpg'
	fileSaveParams = [int(cv2.IMWRITE_JPEG_QUALITY), 95]
    brackets = args.brackets
    if args.brackets:
	print('Bracketing on')
    reverse = args.reverse
    if args.reverse:
	print('Reverse capture') 
Example 19
Project: object-detection   Author: kaka-lin   File: test_ssdlite_mobilenet_v2.py    License: MIT License 5 votes vote down vote up
def image_object_detection(interpreter, colors):
    image = cv2.imread('images/dog.jpg')
    image_data = preprocess_image_for_tflite(image, model_image_size=300)
    out_scores, out_boxes, out_classes = run_detection(image_data, interpreter)

    # Draw bounding boxes on the image file
    result = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    cv2.imwrite(os.path.join("out", "ssdlite_mobilenet_v2_dog.jpg"), result, [cv2.IMWRITE_JPEG_QUALITY, 90]) 
Example 20
Project: object-detection   Author: kaka-lin   File: test_ssd_mobilenet_v1.py    License: MIT License 5 votes vote down vote up
def image_object_detection(image_path, sess, colors):
    image = cv2.imread(image_path)

    image_data = preprocess_image(image, model_image_size=(300,300))
    out_scores, out_boxes, out_classes = run_detection(image_data, sess)

    # Draw bounding boxes on the image file
    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
    # Save the predicted bounding box on the image
    image_name = os.path.basename(image_path)
    cv2.imwrite(os.path.join("out/", "ssd_mobilenet_v1_" + image_name), image, [cv2.IMWRITE_JPEG_QUALITY, 90]) 
Example 21
Project: opencv-engine   Author: thumbor   File: engine_cv3.py    License: MIT License 5 votes vote down vote up
def read(self, extension=None, quality=None):
        if quality is None:
            quality = self.context.config.QUALITY

        options = None
        extension = extension or self.extension
        try:
            if FORMATS[extension] == 'JPEG':
                options = [cv2.IMWRITE_JPEG_QUALITY, quality]
        except KeyError:
            # default is JPEG so
            options = [cv2.IMWRITE_JPEG_QUALITY, quality]

        try:
            if FORMATS[extension] == 'WEBP':
                options = [cv2.IMWRITE_WEBP_QUALITY, quality]
        except KeyError:
            options = [cv2.IMWRITE_JPEG_QUALITY, quality]

        success, buf = cv2.imencode(extension, self.image, options or [])
        data = buf.tostring()

        if FORMATS[extension] == 'JPEG' and self.context.config.PRESERVE_EXIF_INFO:
            if hasattr(self, 'exif') and self.exif != None:
                img = JpegFile.fromString(data)
                img._segments.insert(0, ExifSegment(self.exif_marker, None, self.exif, 'rw'))
                data = img.writeString()

        return data 
Example 22
Project: MassImageRetrieval   Author: liuguiyangnwpu   File: format_dataset.py    License: Apache License 2.0 5 votes vote down vote up
def foramt_CIFAR100(step="train"):
    cifar100_save_dir = "/Volumes/projects/ImageRetireval/dataset/CIFAR-100/src/"
    cifar100_save_dir = "/home/ai-i-liuguiyang/ImageRetireval/dataset/CIFAR-100/src/"
    cifar100_dir = "/Volumes/projects/ImageRetireval/dataset/CIFAR-100/cifar-100-python/"
    cifar100_dir = "/home/ai-i-liuguiyang/ImageRetireval/dataset/CIFAR-100/cifar-100-python/"
    cifar100_meta = unpickle(cifar100_dir + "meta")
    cifar100_file_path = cifar100_dir + step
    cifar100_data = unpickle(cifar100_file_path)

    # ['filenames', 'batch_label', 'fine_labels', 'coarse_labels', 'data']
    print(cifar100_data.keys())
    print(len(cifar100_data["filenames"]))
    print(cifar100_data["data"].shape)

    image_datas = cifar100_data["data"]
    image_names = cifar100_data["filenames"]

    cifar100_index_file = cifar100_save_dir + "index_file.csv"
    index_writer = open(cifar100_index_file, "a+")

    n_images = image_datas.shape[0]
    for idx in range(n_images):
        single_image = image_datas[idx]
        r = single_image[:1024].reshape(32, 32)
        r = np.expand_dims(r, axis=3)
        g = single_image[1024:1024*2].reshape(32, 32)
        g = np.expand_dims(g, axis=3)
        b = single_image[1024*2:].reshape(32, 32)
        b = np.expand_dims(b, axis=3)
        image = np.concatenate((b, g, r), axis=2)
        resize_image = cv2.resize(image, (224, 224), interpolation=cv2.INTER_CUBIC)
        image_label = step + "_" + image_names[idx][:-3]+"jpg"
        cv2.imwrite(cifar100_save_dir + image_label, resize_image, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
        fine_label_id = cifar100_data["fine_labels"][idx]
        fine_label = cifar100_meta["fine_label_names"][fine_label_id]
        index_writer.write("{},{}\n".format(image_label, fine_label))
    index_writer.close()


# foramt_CIFAR100("train")
# foramt_CIFAR100("test") 
Example 23
Project: MassImageRetrieval   Author: liuguiyangnwpu   File: format_dataset.py    License: Apache License 2.0 5 votes vote down vote up
def format_Caltech_101():
    src_image_save_dir = "/Volumes/projects/ImageRetireval/dataset/Caltech_101/src/"
    caltech_101_index_file = src_image_save_dir + "index_file.csv"
    index_writer = open(caltech_101_index_file, "w")

    src_dataset_dir = "/Volumes/projects/ImageRetireval/dataset/Caltech_101/"
    src_image_dir = src_dataset_dir + "101_ObjectCategories/"
    tpl_src_annotation_dir = src_dataset_dir + "Annotations/" + "{}/annotation_{}.mat"
    categories = os.listdir(src_image_dir)
    for class_name in categories:
        if class_name.startswith("."):
            continue
        src_class_image_dir = src_image_dir + class_name + "/"
        image_name_list = os.listdir(src_class_image_dir)
        for image_name in image_name_list:
            if image_name.startswith("."):
                continue
            src_image_file_path = src_class_image_dir + image_name
            image_data = cv2.imread(src_image_file_path)
            resize_image_data = cv2.resize(image_data, (224, 224), interpolation=cv2.INTER_CUBIC)
            new_image_save_path = src_image_save_dir + class_name + "_" + image_name
            cv2.imwrite(new_image_save_path, resize_image_data, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
            index_writer.write("{},{}\n".format(class_name + "_" + image_name, class_name))
            # image_id = ".".join(image_name.split(".")[:-1]).split("_")[-1]
            # image_anno_file_path = tpl_src_annotation_dir.format(class_name, image_id)
            # print(image_anno_file_path)
            # data = sio.loadmat(image_anno_file_path)
            # print(data)
            # sys.exit(0)

# format_Caltech_101() 
Example 24
Project: DDRL   Author: anonymous-author1   File: noise.py    License: Apache License 2.0 5 votes vote down vote up
def _augment(self, img, q):
        enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1]
        return cv2.imdecode(enc, 1) 
Example 25
Project: visual_dynamics   Author: alexlee-gk   File: visualize_reset_states.py    License: MIT License 5 votes vote down vote up
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('reset_states_fname', type=str)
    parser.add_argument('--output_dir', '-o', type=str)
    args = parser.parse_args()

    with open(args.reset_states_fname, 'r') as reset_state_file:
        reset_state_config = from_yaml(reset_state_file)
    env = from_config(reset_state_config['environment_config'])
    reset_states = reset_state_config['reset_states']

    if args.output_dir:
        os.makedirs(args.output_dir, exist_ok=True)
    for traj_iter, reset_state in enumerate(reset_states):
        print(traj_iter)
        obs = env.reset(reset_state)
        image = cv2.cvtColor(obs['image'], cv2.COLOR_RGB2BGR)
        if args.output_dir:
            image_fname = os.path.join(args.output_dir, 'image_%03d.jpg' % traj_iter)
            cv2.imwrite(image_fname, image, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
        cv2.imshow("image", image)
        key = cv2.waitKey(0)
        key &= 255
        if key == 27 or key == ord('q'):
            print("Pressed ESC or q, exiting")
            break 
Example 26
Project: Jalali-Lab-Implementation-of-RAISR   Author: JalaliLabUCLA   File: Functions.py    License: GNU General Public License v3.0 5 votes vote down vote up
def Prepare(im, patchSize, R):
    patchMargin = floor(patchSize/2)
    H, W = im.shape
    imL = imresize(im, 1 / R, interp='bicubic')
    # cv2.imwrite('Compressed.jpg', imL, [int(cv2.IMWRITE_JPEG_QUALITY), 85])
    # imL = cv2.imread('Compressed.jpg')
    # imL = imL[:,:,0]   # Optional: Compress the image
    imL = imresize(imL, (H, W), interp='bicubic')
    imL = im2double(imL)
    im_LR = imL
    return im_LR 
Example 27
Project: deepstar   Author: zerofox-oss   File: pad_transform_set_select_extract_plugin.py    License: BSD 3-Clause Clear License 5 votes vote down vote up
def _pad(self, transform_set_path, transform_id, frame_id, metadata,
             target_set_id, size):
        """
        This method pads a transform.

        :param str transform_set_path: The transform set path.
        :param int transform_id: The transform ID.
        :param int frame_id: The frame ID.
        :param str metadata: Metadata for the transform.
        :param int target_set_id: The new transform set ID.
        :param int size: The size to which to pad.
        :rtype: None
        """

        transform_path = TransformFile().path(transform_set_path, transform_id,
                                              'jpg')
        img = cv2.imread(transform_path)
        img_height, img_width = img.shape[:2]

        img_padded = np.zeros((size, size, 3), dtype=np.uint8)
        img_padded[:img_height, :img_width, :] = img.copy()

        target_id = TransformModel().insert(target_set_id, frame_id,
                                            json.dumps(metadata), 0)
        output_path = TransformFile.path(
            TransformSetSubDir.path(target_set_id), target_id, 'jpg')

        cv2.imwrite(output_path, img_padded, [cv2.IMWRITE_JPEG_QUALITY, 100])

        debug(f'Transform with ID {target_id:08d} at {output_path} extracted '
              f'from transform with ID {transform_id:08d} at {transform_path}',
              4) 
Example 28
def _resize(self, transform_set_path, transform_id, frame_id, metadata,
                target_set_id, max_size):
        """
        This method resizes a transform.

        :param str transform_set_path: The transform set path.
        :param int transform_id:  The transform ID.
        :param int frame_id: The frame ID.
        :param str metadata: Metadata for the transform.
        :param int target_set_id: The new transform set ID.
        :param int max_size: The max size.
        :rtype: None
        """

        transform_path = TransformFile().path(transform_set_path, transform_id,
                                              'jpg')
        img = cv2.imread(transform_path)
        img_height, img_width = img.shape[:2]

        if img_height > max_size or img_width > max_size:
            if img_height > img_width:
                img = imutils.resize(img, height=max_size)
            else:
                img = imutils.resize(img, width=max_size)

        target_id = TransformModel().insert(target_set_id, frame_id,
                                            json.dumps(metadata), 0)
        output_path = TransformFile.path(
            TransformSetSubDir.path(target_set_id), target_id, 'jpg')

        cv2.imwrite(output_path, img, [cv2.IMWRITE_JPEG_QUALITY, 100])

        debug(f'Transform with ID {target_id:08d} at {output_path} extracted '
              f'from transform with ID {transform_id:08d} at {transform_path}',
              4) 
Example 29
Project: petastorm   Author: uber   File: codecs.py    License: Apache License 2.0 5 votes vote down vote up
def encode(self, unischema_field, value):
        """Encodes the image using OpenCV."""
        if unischema_field.numpy_dtype != value.dtype:
            raise ValueError("Unexpected type of {} feature, expected {}, got {}".format(
                unischema_field.name, unischema_field.numpy_dtype, value.dtype
            ))

        if not _is_compliant_shape(value.shape, unischema_field.shape):
            raise ValueError("Unexpected dimensions of {} feature, expected {}, got {}".format(
                unischema_field.name, unischema_field.shape, value.shape
            ))

        if len(value.shape) == 2:
            # Greyscale image
            image_bgr_or_gray = value
        elif len(value.shape) == 3 and value.shape[2] == 3:
            # Convert RGB to BGR
            image_bgr_or_gray = value[:, :, (2, 1, 0)]
        else:
            raise ValueError('Unexpected image dimensions. Supported dimensions are (H, W) or (H, W, 3). '
                             'Got {}'.format(value.shape))

        _, contents = cv2.imencode(self._image_codec,
                                   image_bgr_or_gray,
                                   [int(cv2.IMWRITE_JPEG_QUALITY), self._quality])
        return bytearray(contents) 
Example 30
Project: niryo_one_ros   Author: NiryoRobotics   File: image_functions.py    License: GNU General Public License v3.0 5 votes vote down vote up
def compress_image(img, quality=90):
    """
    Compress OpenCV image
    :param img: OpenCV Image
    :param quality: integer between 1 - 100. The higher it is, the less information will be lost,
    but the heavier the compressed image will be
    :return: string representing compressed image
    """
    result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality])
    if not result:
        return False, None

    return True, np.array(encimg).tostring()