Python cv2.COLOR_BGR2RGB Examples

The following are 30 code examples of cv2.COLOR_BGR2RGB(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: object_detection_multithreading.py    From object_detector_app with MIT License 7 votes vote down vote up
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        output_q.put(detect_objects(frame_rgb, sess, detection_graph))

    fps.stop()
    sess.close() 
Example #2
Source File: utils_image.py    From KAIR with MIT License 7 votes vote down vote up
def imread_uint(path, n_channels=3):
    #  input: path
    # output: HxWx3(RGB or GGG), or HxWx1 (G)
    if n_channels == 1:
        img = cv2.imread(path, 0)  # cv2.IMREAD_GRAYSCALE
        img = np.expand_dims(img, axis=2)  # HxWx1
    elif n_channels == 3:
        img = cv2.imread(path, cv2.IMREAD_UNCHANGED)  # BGR or G
        if img.ndim == 2:
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)  # GGG
        else:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # RGB
    return img


# --------------------------------------------
# matlab's imwrite
# -------------------------------------------- 
Example #3
Source File: facerec_from_webcam_mult_thread.py    From face-attendance-machine with Apache License 2.0 6 votes vote down vote up
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    fps = FPS().start()
    while True:
        myprint("updata start ", time.time())
        fps.update()
        myprint("updata end ", time.time())
        # global lock
        # if lock.acquire():
        #    lock.release()

        frame = input_q.get()
        myprint("out queue {} and input que size {} after input_q get".format(output_q.qsize(), input_q.qsize()), time.time())
        myprint("out queue {} and input que size {} after lock release ".format(output_q.qsize(), input_q.qsize()), time.time())
        myprint("face process start", time.time())
        # frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        out_frame = face_process(frame)
        myprint("out queue {} and input que size {}".format(output_q.qsize(), input_q.qsize()), time.time())
        output_q.put(out_frame)
        myprint("out queue {} and input que size {} ".format(output_q.qsize(), input_q.qsize()), time.time())

    fps.stop() 
Example #4
Source File: data.py    From kuzushiji-recognition with MIT License 6 votes vote down vote up
def __getitem__(self, index, to_tensor=True):
    fn = self.image_fns[index]
    img = cv2.cvtColor(cv2.imread(fn, 1), cv2.COLOR_BGR2RGB)

    img, pad_top, pad_left = KuzushijiDataset.pad_to_ratio(img, ratio=1.5)
    h, w = img.shape[:2]
    # print(h / w, pad_left, pad_top)
    assert img.ndim == 3
    scaled_imgs = []
    for scale in self.scales:
      h_scale = int(scale * self.height)
      w_scale = int(scale * self.width)
      simg = cv2.resize(img, (w_scale, h_scale))

      if to_tensor:
        assert simg.ndim == 3, simg.ndim
        simg = simg.transpose((2, 0, 1))
        simg = th.from_numpy(simg.copy())

      scaled_imgs.append(simg)

    return scaled_imgs + [fn] 
Example #5
Source File: face_recognition_tester.py    From TripletLossFace with MIT License 6 votes vote down vote up
def show_who_in_image(self, path, get_face: bool = True, show: bool = True, turn_rgb: bool = True):
		min_im, image, all_frames = self.detect_which(path, get_face)

		for (confidance, who), frame in zip(min_im, all_frames):
			color = self.colors[who]
			x1, x2, y1, y2 = frame
			cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
			cv2.putText(image, f"{who}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)}

		if turn_rgb:
			image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

		if show:
			cv2.imshow("a", image)
			cv2.waitKey(0)

		return image 
Example #6
Source File: main_engine.py    From TripletLossFace with MIT License 6 votes vote down vote up
def show_who_in_image(self, path, get_face: bool = True, show: bool = True, turn_rgb: bool = True):
		min_im, image, all_frames = self.index_image(path, get_face)

		for (confidance, who), frame in zip(min_im, all_frames):
			try:
				color = self.colors[str(who)]
				x1, x2, y1, y2 = frame
				cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
				cv2.putText(image, f"id: {str(who)}- conf:{abs(round(float(confidance), 2))}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)}
			except KeyError:
				continue

		if turn_rgb:
			image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

		if show:
			cv2.imshow("a", image)
			cv2.waitKey(1)

		return image, min_im, all_frames 
Example #7
Source File: object_detection_app.py    From object_detector_app with MIT License 6 votes vote down vote up
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        output_q.put(detect_objects(frame_rgb, sess, detection_graph))

    fps.stop()
    sess.close() 
Example #8
Source File: image_descriptor.py    From netvlad_tf_open with MIT License 6 votes vote down vote up
def describeAllJpegsInPath(self, path, batch_size, verbose=False):
        ''' returns a list of descriptors '''
        jpeg_paths = sorted(glob.glob(os.path.join(path, '*.jpg')))
        descs = []
        for batch_offset in range(0, len(jpeg_paths), batch_size):
            images = []
            for i in range(batch_offset, batch_offset + batch_size):
                if i == len(jpeg_paths):
                    break
                if verbose:
                    print('%d/%d' % (i, len(jpeg_paths)))
                if self.is_grayscale:
                    image = cv2.imread(jpeg_paths[i], cv2.IMREAD_GRAYSCALE)
                    images.append(np.expand_dims(
                            np.expand_dims(image, axis=0), axis=-1))
                else:
                    image = cv2.imread(jpeg_paths[i])
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    images.append(np.expand_dims(image, axis=0))
            batch = np.concatenate(images, 0)
            descs = descs + list(self.sess.run(
                    self.net_out, feed_dict={self.tf_batch: batch}))
        return descs 
Example #9
Source File: Main.py    From bjtu_BinocularCameraRecord with MIT License 6 votes vote down vote up
def loop2(self,text,w=1280,h=720):
        cap = cv2.VideoCapture(int(text))
        cap.set(6 ,cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') );
        global capnum2
        capnum2 = int(text)
        cap.set(3,w);
        cap.set(4,h);
        global update2
        update2 = 1
        global shotmark2

        while (update2 == 1):
            ret, frame = cap.read() 
            if shotmark2 == 1:
                fn = self.lineEdit.text()
                name = "photo/2_"+fn + "video.jpg"
                if os.path.exists(name):
                    name = "photo/2_" + fn + "video"+str(int(time.time()))+".jpg"
                cv2.imwrite(name, frame)
                shotmark2 = 0
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.original2_image.updateImage(frame)
        # cap.release()
        cv_img_rgb = np.zeros((700,700,3))
        self.original2_image.updateImage(cv_img_rgb) 
Example #10
Source File: surface.py    From License-Plate-Recognition with MIT License 6 votes vote down vote up
def get_imgtk(self, img_bgr):
		img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
		im = Image.fromarray(img)
		imgtk = ImageTk.PhotoImage(image=im)
		wide = imgtk.width()
		high = imgtk.height()
		if wide > self.viewwide or high > self.viewhigh:
			wide_factor = self.viewwide / wide
			high_factor = self.viewhigh / high
			factor = min(wide_factor, high_factor)
			wide = int(wide * factor)
			if wide <= 0 : wide = 1
			high = int(high * factor)
			if high <= 0 : high = 1
			im=im.resize((wide, high), Image.ANTIALIAS)
			imgtk = ImageTk.PhotoImage(image=im)
		return imgtk 
Example #11
Source File: surface.py    From License-Plate-Recognition with MIT License 6 votes vote down vote up
def show_roi(self, r, roi, color):
		if r :
			roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
			roi = Image.fromarray(roi)
			self.imgtk_roi = ImageTk.PhotoImage(image=roi)
			self.roi_ctl.configure(image=self.imgtk_roi, state='enable')
			self.r_ctl.configure(text=str(r))
			self.update_time = time.time()
			try:
				c = self.color_transform[color]
				self.color_ctl.configure(text=c[0], background=c[1], state='enable')
			except: 
				self.color_ctl.configure(state='disabled')
		elif self.update_time + 8 < time.time():
			self.roi_ctl.configure(state='disabled')
			self.r_ctl.configure(text="")
			self.color_ctl.configure(state='disabled') 
Example #12
Source File: test.py    From yolo_tensorflow with MIT License 6 votes vote down vote up
def detect(self, img):
        img_h, img_w, _ = img.shape
        inputs = cv2.resize(img, (self.image_size, self.image_size))
        inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
        inputs = (inputs / 255.0) * 2.0 - 1.0
        inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))

        result = self.detect_from_cvmat(inputs)[0]

        for i in range(len(result)):
            result[i][1] *= (1.0 * img_w / self.image_size)
            result[i][2] *= (1.0 * img_h / self.image_size)
            result[i][3] *= (1.0 * img_w / self.image_size)
            result[i][4] *= (1.0 * img_h / self.image_size)

        return result 
Example #13
Source File: face_model.py    From insightface with MIT License 6 votes vote down vote up
def get_input(self, face_img):
    ret = self.detector.detect_face(face_img, det_type = self.args.det)
    if ret is None:
      return None
    bbox, points = ret
    if bbox.shape[0]==0:
      return None
    bbox = bbox[0,0:4]
    points = points[0,:].reshape((2,5)).T
    #print(bbox)
    #print(points)
    nimg = face_preprocess.preprocess(face_img, bbox, points, image_size='112,112')
    nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
    aligned = np.transpose(nimg, (2,0,1))
    input_blob = np.expand_dims(aligned, axis=0)
    data = mx.nd.array(input_blob)
    db = mx.io.DataBatch(data=(data,))
    return db 
Example #14
Source File: eval.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 6 votes vote down vote up
def debug_visualize(self, data_yx_min, data_yx_max, yx_min, yx_max, c, tp, path):
        canvas = cv2.imread(path)
        canvas = cv2.cvtColor(canvas, cv2.COLOR_BGR2RGB)
        size = np.reshape(np.array(canvas.shape[:2], np.float32), [1, 2])
        data_yx_min, data_yx_max, yx_min, yx_max = (np.reshape(t.cpu().numpy(), [-1, 2]) * size for t in (data_yx_min, data_yx_max, yx_min, yx_max))
        canvas = self.draw_bbox(canvas, data_yx_min, data_yx_max, colors=['g'])
        canvas = self.draw_bbox(canvas, *(a[tp] for a in (yx_min, yx_max)), colors=['w'])
        fp = ~tp
        canvas = self.draw_bbox(canvas, *(a[fp] for a in (yx_min, yx_max)), colors=['k'])
        fig = plt.figure()
        ax = fig.gca()
        ax.imshow(canvas)
        ax.set_title('tp=%d, fp=%d' % (np.sum(tp), np.sum(fp)))
        fig.canvas.set_window_title(self.category[c] + ': ' + path)
        plt.show()
        plt.close(fig) 
Example #15
Source File: face_genderage.py    From insightface with MIT License 6 votes vote down vote up
def get(self, img):
        assert self.param_file and self.model
        assert img.shape[2]==3 and img.shape[0:2]==self.image_size
        data = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        data = np.transpose(data, (2,0,1))
        data = np.expand_dims(data, axis=0)
        data = mx.nd.array(data)
        db = mx.io.DataBatch(data=(data,))
        self.model.forward(db, is_train=False)
        ret = self.model.get_outputs()[0].asnumpy()
        g = ret[:,0:2].flatten()
        gender = np.argmax(g)
        a = ret[:,2:202].reshape( (100,2) )
        a = np.argmax(a, axis=1)
        age = int(sum(a))
        return gender, age 
Example #16
Source File: dataset.py    From rcan-tensorflow with MIT License 6 votes vote down vote up
def convert_to_img(self):
        def to_img(i):
            cv2.imwrite('imgHQ%05d.png' % i, cv2.COLOR_BGR2RGB)
            return True

        raw_data_shape = self.raw_data.shape  # (N, H * W * C)

        try:
            assert os.path.exists(self.save_file_name)
        except AssertionError:
            print("[-] There's no %s :(" % self.save_file_name)
            print("[*] Make directory at %s... " % self.save_file_name)
            os.mkdir(self.save_file_name)

        ii = [i for i in range(raw_data_shape[0])]

        pool = Pool(self.n_threads)
        print(pool.map(to_img, ii)) 
Example #17
Source File: create_hdf5.py    From iGAN with MIT License 5 votes vote down vote up
def ProcessImage(img, channel=3):  # [assumption]:  image is x, w, 3 with uint8
    if channel == 1:
        img = 255 - cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).reshape(1, width, width, 1)
    else:
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).reshape(1, width, width, 3)
    return img 
Example #18
Source File: utils.py    From posenet-pytorch with Apache License 2.0 5 votes vote down vote up
def _process_input(source_img, scale_factor=1.0, output_stride=16):
    target_width, target_height = valid_resolution(
        source_img.shape[1] * scale_factor, source_img.shape[0] * scale_factor, output_stride=output_stride)
    scale = np.array([source_img.shape[0] / target_height, source_img.shape[1] / target_width])

    input_img = cv2.resize(source_img, (target_width, target_height), interpolation=cv2.INTER_LINEAR)
    input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2RGB).astype(np.float32)
    input_img = input_img * (2.0 / 255.0) - 1.0
    input_img = input_img.transpose((2, 0, 1)).reshape(1, 3, target_height, target_width)
    return input_img, source_img, scale 
Example #19
Source File: my_api.py    From Python-Tensorflow-Face-v2.0 with Apache License 2.0 5 votes vote down vote up
def photo_read(self, path, num):
        # 使用dlib自带的frontal_face_detector作为我们的特征提取器
        detector = align_dlib.AlignDlib(self.PREDICTOR_PATH)
        path = self.input_dir + '/' + path
        print(path + " 正在处理...")
        name_file = str(num) + '_' + path.split('/')[-1]
        name_file = self.output_dir + '/' + name_file
        # 如果不存在目录 就创造目录
        if not os.path.exists(name_file):
            os.makedirs(name_file)
        index = 1

        for filename in os.listdir(path):
            if filename.endswith('.jpg'):
                img_path = path + '/' + filename
                print(img_path)
                # 从文件读取图片
                img_bgr = cv2.imread(img_path)  # 从文件读取bgr图片
                img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)  # 转为RGB图片
                face_align = detector.align(size, img_rgb)
                if face_align is None:
                    pass
                else:
                    face_align = cv2.cvtColor(face_align, cv2.COLOR_RGB2BGR)  # 转为BGR图片
                    # 保存图片
                    cv2.imwrite(name_file + '/' + str(index) + '.jpg', face_align)
                    index += 1 
Example #20
Source File: dataset.py    From kaggle-aptos2019-blindness-detection with MIT License 5 votes vote down vote up
def __getitem__(self, index):
        img_path, label = self.img_paths[index], self.labels[index]

        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        # img = imread(img_path)
        img = Image.fromarray(img)

        if self.transform is not None:
            img = self.transform(img)

        return img, label 
Example #21
Source File: utils.py    From iGAN with MIT License 5 votes vote down vote up
def CVShow(im, im_name='', wait=1):
    if len(im.shape) >= 3 and im.shape[2] == 3:
        im_show = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
    else:
        im_show = im

    cv2.imshow(im_name, im_show)
    cv2.waitKey(wait)
    return im_show 
Example #22
Source File: my_api.py    From Python-Tensorflow-Face-v2.0 with Apache License 2.0 5 votes vote down vote up
def get_one_image(self, x, detector):
        path_name_x = self.path + self.path_array[x-1]
        try:
            img_x = cv2.imread(path_name_x)
        except IndexError:
            print(path_name_x)
            print('error')
        else:
            img_x_rgb = cv2.cvtColor(img_x, cv2.COLOR_BGR2RGB)  # 转为RGB图片
            face_align_rgb_x = detector.align(size, img_x_rgb)
            if face_align_rgb_x is None:
                det = dlib.get_frontal_face_detector()
                gray_img = cv2.cvtColor(img_x, cv2.COLOR_BGR2GRAY)
                # 使用detector进行人脸检测
                dets = det(gray_img, 1)
                if len(dets) > 0:
                    x1 = dets[0].top() if dets[0].top() > 0 else 0
                    y1 = dets[0].bottom() if dets[0].bottom() > 0 else 0
                    x2 = dets[0].left() if dets[0].left() > 0 else 0
                    y2 = dets[0].right() if dets[0].right() > 0 else 0
                    face = img_x[x1:y1, x2:y2]
                else:
                    face = cv2.resize(img_x, (size, size))
                face_align_x = cv2.resize(face, (size, size))
            else:
                face_align_x = cv2.cvtColor(face_align_rgb_x, cv2.COLOR_RGB2BGR)  # 转为BGR图片
            x_img = np.array(face_align_x)
            x_img = x_img.astype('float32') / 255.0
            return x_img 
Example #23
Source File: pascal_voc.py    From yolo_tensorflow with MIT License 5 votes vote down vote up
def image_read(self, imname, flipped=False):
        image = cv2.imread(imname)
        image = cv2.resize(image, (self.image_size, self.image_size))
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
        image = (image / 255.0) * 2.0 - 1.0
        if flipped:
            image = image[:, ::-1, :]
        return image 
Example #24
Source File: test.py    From ImageColorTheme with MIT License 5 votes vote down vote up
def getPixData(imgfile='imgs/avatar_282x282.png'):
    return cv.cvtColor(cv.imread(imgfile, 1), cv.COLOR_BGR2RGB) 
Example #25
Source File: image.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __call__(self, image):
        return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 
Example #26
Source File: imgproc.py    From graph_distillation with Apache License 2.0 5 votes vote down vote up
def imread_rgb(dset, path):
  if dset == 'ucf-101':
    rgb = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
    return rgb[:, :-1]  # oflow is 1px smaller than rgb in ucf-101
  elif dset == 'ntu-rgbd' or dset == 'pku-mmd' or dset == 'cad-60':
    rgb = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
    return rgb
  else:
    assert False 
Example #27
Source File: cvm1.py    From Traffic-Signs-and-Object-Detection with GNU General Public License v3.0 5 votes vote down vote up
def display_camera_stream(self):
        val, frame = self.capture.read()

        frame = cv2.flip(frame, 1)
        self.frame_changed.emit(frame)

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        if self.face_rect is not None:
            x, y, w, h = self.face_rect
            cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

        image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
        self.image_label.setPixmap(QPixmap.fromImage(image)) 
Example #28
Source File: create_dataset.py    From tensorflow-data with MIT License 5 votes vote down vote up
def load_image(addr):
    # read an image and resize to (224, 224)
    # cv2 load images as BGR, convert it to RGB
    img = cv2.imread(addr)
    if img is None:
        return None
    img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_CUBIC)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    return img 
Example #29
Source File: MTCNN_detect.py    From speech_separation with MIT License 5 votes vote down vote up
def face_detect(file,detector,frame_path=frame_path,cat_train=cat_train):
    name = file.replace('.jpg', '').split('-')
    log = cat_train.iloc[int(name[0])]
    x = log['pos_x']
    y = log['pos_y']

    img = cv2.imread('%s%s'%(frame_path,file))
    x = img.shape[1] * x
    y = img.shape[0] * y
    faces = detector.detect_faces(img)
    # check if detected faces
    if(len(faces)==0):
        print('no face detect: '+file)
        return #no face
    bounding_box = bounding_box_check(faces,x,y)
    if(bounding_box == None):
        print('face is not related to given coord: '+file)
        return
    print(file," ",bounding_box)
    print(file," ",x, y)
    crop_img = img[bounding_box[1]:bounding_box[1] + bounding_box[3],bounding_box[0]:bounding_box[0]+bounding_box[2]]
    crop_img = cv2.resize(crop_img,(160,160))
    cv2.imwrite('%s/frame_'%output_dir + name[0] + '_' + name[1] + '.jpg', crop_img)
    #crop_img = cv2.cvtColor(crop_img, cv2.COLOR_BGR2RGB)
    #plt.imshow(crop_img)
    #plt.show() 
Example #30
Source File: CVTransforms.py    From ext_portrait_segmentation with MIT License 5 votes vote down vote up
def __call__(self, image, label):
        if random.random() < set_ratio:
            return image, label
        image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

        random_factor = np.random.randint(4, 17) / 10.
        color_image = ImageEnhance.Color(image).enhance(random_factor)
        random_factor = np.random.randint(4, 17) / 10.
        brightness_image = ImageEnhance.Brightness(color_image).enhance(random_factor)
        random_factor = np.random.randint(6, 15) / 10.
        contrast_image = ImageEnhance.Contrast(brightness_image).enhance(random_factor)
        random_factor = np.random.randint(8, 13) / 10.
        image = ImageEnhance.Sharpness(contrast_image).enhance(random_factor)
        return np.uint8(np.array(image)[:,:,::-1]), label