Python cv2.COLOR_RGB2GRAY Examples

The following are 30 code examples of cv2.COLOR_RGB2GRAY(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: plate_locate.py    From EasyPR-python with Apache License 2.0 8 votes vote down vote up
def sobelOperT(self, img, blursize, morphW, morphH):
        '''
            No different with sobelOper ? 
        '''
        blur = cv2.GaussianBlur(img, (blursize, blursize), 0, 0, cv2.BORDER_DEFAULT)

        if len(blur.shape) == 3:
            gray = cv2.cvtColor(blur, cv2.COLOR_RGB2GRAY)
        else:
            gray = blur

        x = cv2.Sobel(gray, cv2.CV_16S, 1, 0, 3)
        absX = cv2.convertScaleAbs(x)
        grad = cv2.addWeighted(absX, 1, 0, 0, 0)

        _, threshold = cv2.threshold(grad, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)

        element = cv2.getStructuringElement(cv2.MORPH_RECT, (morphW, morphH))
        threshold = cv2.morphologyEx(threshold, cv2.MORPH_CLOSE, element)

        return threshold 
Example #2
Source File: object_detection_2d_photometric_ops.py    From data_generator_object_detection_2d with GNU General Public License v3.0 7 votes vote down vote up
def __call__(self, image, labels=None):
        if self.current == 'RGB' and self.to == 'HSV':
            image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
        elif self.current == 'RGB' and self.to == 'GRAY':
            image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
            if self.keep_3ch:
                image = np.stack([image] * 3, axis=-1)
        elif self.current == 'HSV' and self.to == 'RGB':
            image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
        elif self.current == 'HSV' and self.to == 'GRAY':
            image = cv2.cvtColor(image, cv2.COLOR_HSV2GRAY)
            if self.keep_3ch:
                image = np.stack([image] * 3, axis=-1)
        if labels is None:
            return image
        else:
            return image, labels 
Example #3
Source File: atari_wrapper.py    From tf2rl with MIT License 6 votes vote down vote up
def observation(self, obs):
        if self._key is None:
            frame = obs
        else:
            frame = obs[self._key]

        if self._grayscale:
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        frame = cv2.resize(
            frame, (self._width, self._height), interpolation=cv2.INTER_AREA
        )
        if self._grayscale:
            frame = np.expand_dims(frame, -1)

        if self._key is None:
            obs = frame
        else:
            obs = obs.copy()
            obs[self._key] = frame
        return obs 
Example #4
Source File: rule_based.py    From PythonPilot with Apache License 2.0 6 votes vote down vote up
def __apply_canny(self, src, ksize=7, sigma=1.2, low_th=10, high_th=70):
        """Apply canny edge detection.

        Args:
            src (int): Input image BGR.
                       numpy.ndarray, (720, 1280, 3), 0~255

        Returns:
            dst (int): Output image.
                       numpy.ndarray, (720, 1280), 0~1

        """
        gray = cv2.cvtColor(src, cv2.COLOR_RGB2GRAY)
        blur_gray = cv2.GaussianBlur(gray,(ksize, ksize), sigma)
        dst = cv2.Canny(blur_gray, low_th, high_th) // 255

        return dst 
Example #5
Source File: atari_wrappers.py    From torchbeast with Apache License 2.0 6 votes vote down vote up
def observation(self, obs):
        if self._key is None:
            frame = obs
        else:
            frame = obs[self._key]

        if self._grayscale:
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        frame = cv2.resize(
            frame, (self._width, self._height), interpolation=cv2.INTER_AREA
        )
        if self._grayscale:
            frame = np.expand_dims(frame, -1)

        if self._key is None:
            obs = frame
        else:
            obs = obs.copy()
            obs[self._key] = frame
        return obs 
Example #6
Source File: functional.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def adjust_saturation(img, saturation_factor):
    """Adjust color saturation of an image.

    Args:
        img (CV Image): CV Image to be adjusted.
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
        CV Image: Saturation adjusted image.
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be CV Image. Got {}'.format(type(img)))

    im = img.astype(np.float32)
    degenerate = cv2.cvtColor(
        cv2.cvtColor(
            im,
            cv2.COLOR_RGB2GRAY),
        cv2.COLOR_GRAY2RGB)
    im = (1 - saturation_factor) * degenerate + saturation_factor * im
    im = im.clip(min=0, max=255)
    return im.astype(img.dtype) 
Example #7
Source File: functional.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def adjust_contrast(img, contrast_factor):
    """Adjust contrast of an Image.

    Args:
        img (CV Image): CV Image to be adjusted.
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.

    Returns:
        CV Image: Contrast adjusted image.
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be CV Image. Got {}'.format(type(img)))

    im = img.astype(np.float32)
    mean = round(cv2.cvtColor(im, cv2.COLOR_RGB2GRAY).mean())
    im = (1 - contrast_factor) * mean + contrast_factor * im
    im = im.clip(min=0, max=255)
    return im.astype(img.dtype) 
Example #8
Source File: imgproc.py    From dataflow with Apache License 2.0 6 votes vote down vote up
def _augment(self, img, r):
        old_dtype = img.dtype

        if img.ndim == 3:
            if self.rgb is not None:
                m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY
                grey = cv2.cvtColor(img.astype('float32'), m)
                mean = np.mean(grey)
            else:
                mean = np.mean(img, axis=(0, 1), keepdims=True)
        else:
            mean = np.mean(img)

        img = img * r + mean * (1 - r)
        if self.clip or old_dtype == np.uint8:
            img = np.clip(img, 0, 255)
        return img.astype(old_dtype) 
Example #9
Source File: opencv_functional.py    From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def to_grayscale(img, num_output_channels=1):
    """Convert image to grayscale version of image.
    Args:
        img (numpy ndarray): Image to be converted to grayscale.
    Returns:
        numpy ndarray: Grayscale version of the image.
            if num_output_channels = 1 : returned image is single channel
            if num_output_channels = 3 : returned image is 3 channel with r = g = b
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy ndarray. Got {}'.format(type(img)))

    if num_output_channels==1:
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)[:,:,np.newaxis]
    elif num_output_channels==3:
        # much faster than doing cvtColor to go back to gray
        img = np.broadcast_to(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)[:,:,np.newaxis], img.shape)
    return img 
Example #10
Source File: functional.py    From opencv_transforms with MIT License 6 votes vote down vote up
def to_grayscale(img, num_output_channels=1):
    """Convert image to grayscale version of image.
    Args:
        img (numpy ndarray): Image to be converted to grayscale.
    Returns:
        numpy ndarray: Grayscale version of the image.
            if num_output_channels = 1 : returned image is single channel
            if num_output_channels = 3 : returned image is 3 channel with r = g = b
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy ndarray. Got {}'.format(type(img)))

    if num_output_channels==1:
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)[:,:,np.newaxis]
    elif num_output_channels==3:
        # much faster than doing cvtColor to go back to gray
        img = np.broadcast_to(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)[:,:,np.newaxis], img.shape) 
    return img 
Example #11
Source File: CVFeatures.py    From videofeatures with MIT License 6 votes vote down vote up
def computeFeatures(self, video):
    descriptor_array = []
    for i in range(video.shape[0]):
      frame = cv2.cvtColor(video[i], cv2.COLOR_RGB2GRAY).astype('uint8')
      _, descriptors = cv2.xfeatures2d.SURF_create().detectAndCompute(frame, None)

      # make sure that descriptors have shape (n_descriptor, 64)
      if descriptors is not None:
        if descriptors.shape[0] < self.n_descriptors:
          descriptors = np.concatenate([descriptors, np.zeros((self.n_descriptors - descriptors.shape[0], 64))],
                                       axis=0)
        else:
          descriptors = descriptors[:self.n_descriptors]
      else:
        descriptors = np.zeros((self.n_descriptors, 64))

      assert descriptors.shape == (self.n_descriptors, 64)
      descriptor_array.append(descriptors)

    return np.concatenate(descriptor_array, axis=0) 
Example #12
Source File: CVFeatures.py    From videofeatures with MIT License 6 votes vote down vote up
def computeFeatures(self, video):
    """
    todo: improve documentation
    Computes SIFT features for a single video.
    :param video: a video of shape (n_frames, width, height, channel)
    :return: the features, shape ()
    """
    descriptor_array = []
    for i in range(video.shape[0]):
      frame = cv2.cvtColor(video[i], cv2.COLOR_RGB2GRAY).astype('uint8')
      _, descriptors = cv2.xfeatures2d.SIFT_create(nfeatures=self.n_descriptors).detectAndCompute(frame, None)

      if descriptors is not None:
        if descriptors.shape[0] < self.n_descriptors:
          descriptors = np.concatenate([descriptors, np.zeros((self.n_descriptors - descriptors.shape[0], 128))], axis=0)
        else:
          descriptors = descriptors[:self.n_descriptors]
      else:
          descriptors = np.zeros((self.n_descriptors, 128))

      assert descriptors.shape == (self.n_descriptors, 128)
      descriptor_array.append(descriptors)
    features = np.concatenate(descriptor_array, axis=0)
    return features 
Example #13
Source File: dataset_scene.py    From Decoupled-attention-network with MIT License 6 votes vote down vote up
def keepratio_resize(self, img):
        cur_ratio = img.size[0] / float(img.size[1])

        mask_height = self.img_height
        mask_width = self.img_width
        img = np.array(img)
        if len(img.shape) == 3:
            img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        if cur_ratio > self.target_ratio:
            cur_target_height = self.img_height
            cur_target_width = self.img_width
        else:
            cur_target_height = self.img_height
            cur_target_width = int(self.img_height * cur_ratio)
        img = cv2.resize(img, (cur_target_width, cur_target_height))
        start_x = int((mask_height - img.shape[0])/2)
        start_y = int((mask_width - img.shape[1])/2)
        mask = np.zeros([mask_height, mask_width]).astype(np.uint8)
        mask[start_x : start_x + img.shape[0], start_y : start_y + img.shape[1]] = img
        img = mask        
        return img 
Example #14
Source File: dqn_atari.py    From cleanrl with MIT License 6 votes vote down vote up
def observation(self, obs):
        if self._key is None:
            frame = obs
        else:
            frame = obs[self._key]

        if self._grayscale:
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        frame = cv2.resize(
            frame, (self._width, self._height), interpolation=cv2.INTER_AREA
        )
        if self._grayscale:
            frame = np.expand_dims(frame, -1)

        if self._key is None:
            obs = frame
        else:
            obs = obs.copy()
            obs[self._key] = frame
        return obs 
Example #15
Source File: device.py    From fitch with MIT License 5 votes vote down vote up
def screen_shot_to_object(self) -> np.ndarray:
        """ screen shot and return numpy array (data saved in memory) """
        pic_path = self.screen_shot()
        # temp file will be automatically removed after usage
        data = cv2.imread(pic_path, cv2.COLOR_RGB2GRAY)
        os.remove(pic_path)
        return data 
Example #16
Source File: agc_demos.py    From ICML2019-TREX with MIT License 5 votes vote down vote up
def GrayScaleWarpImage(image):
    """Warp frames to 84x84 as done in the Nature paper and later work."""
    width=84
    height=84
    frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
    frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA)
    #frame = np.expand_dims(frame, -1)
    return frame 
Example #17
Source File: env.py    From Street-fighter-A3C-ICM-pytorch with MIT License 5 votes vote down vote up
def process_frame(frame):
    if frame is not None:
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        frame = cv2.resize(frame, (168, 168))[None, :, :] / 255.
        return frame
    else:
        return np.zeros((1, 168, 168)) 
Example #18
Source File: atari_wrappers.py    From ICML2019-TREX with MIT License 5 votes vote down vote up
def observation(self, frame):
        if self.grayscale:
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
        if self.grayscale:
            frame = np.expand_dims(frame, -1)
        return frame 
Example #19
Source File: atari_wrappers.py    From ICML2019-TREX with MIT License 5 votes vote down vote up
def observation(self, frame):
        if self.grayscale:
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
        if self.grayscale:
            frame = np.expand_dims(frame, -1)
        return frame 
Example #20
Source File: utils.py    From Deep-Q-Learning-Paper-To-Code with MIT License 5 votes vote down vote up
def observation(self, obs):
        new_frame = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
        resized_screen = cv2.resize(new_frame, self.shape[1:],
                                    interpolation=cv2.INTER_AREA)

        new_obs = np.array(resized_screen, dtype=np.uint8).reshape(self.shape)
        new_obs = np.swapaxes(new_obs, 2,0)
        new_obs = new_obs / 255.0
        return new_obs 
Example #21
Source File: utils.py    From Deep-Q-Learning-Paper-To-Code with MIT License 5 votes vote down vote up
def observation(self, obs):
        new_frame = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
        resized_screen = cv2.resize(new_frame, self.shape[1:],
                                    interpolation=cv2.INTER_AREA)

        new_obs = np.array(resized_screen, dtype=np.uint8).reshape(self.shape)
        new_obs = np.swapaxes(new_obs, 2,0)
        new_obs = new_obs / 255.0
        return new_obs 
Example #22
Source File: rule_based.py    From PythonPilot with Apache License 2.0 5 votes vote down vote up
def __apply_multi_threshold(self, src):
        """Apply multi thresholding using LAB, HLS and HSV.

        Args:
            src (int): Input image BGR.
                       numpy.ndarray, (720, 1280, 3), 0~255

        Returns:
            dst (int): Output image.
                       numpy.ndarray, (720, 1280), 0~1

        """
        settings = []
        settings.append({'cspace': 'LAB', 'channel': 2, 'clipLimit': 2.0, 'threshold': 190})
        settings.append({'cspace': 'HLS', 'channel': 1, 'clipLimit': 1.0, 'threshold': 200})
        settings.append({'cspace': 'HSV', 'channel': 2, 'clipLimit': 3.0, 'threshold': 230})

        gray = cv2.cvtColor(src, cv2.COLOR_RGB2GRAY)
        dst = np.zeros_like(gray)
        for s in settings:
            color_t = getattr(cv2, 'COLOR_RGB2{}'.format(s['cspace']))
            gray = cv2.cvtColor(src, color_t)[:,:,s['channel']]
            
            clahe = cv2.createCLAHE(s['clipLimit'], tileGridSize=(8,8))
            norm_img = clahe.apply(gray)
            
            binary = np.zeros_like(norm_img)
            binary[(norm_img >= s['threshold']) & (norm_img <= 255)] = 1
            dst[(dst == 1) | (binary == 1)] = 1

        return dst 
Example #23
Source File: utils.py    From Deep-Q-Learning-Paper-To-Code with MIT License 5 votes vote down vote up
def observation(self, obs):
        new_frame = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
        resized_screen = cv2.resize(new_frame, self.shape[1:],
                                    interpolation=cv2.INTER_AREA)

        new_obs = np.array(resized_screen, dtype=np.uint8).reshape(self.shape)
        new_obs = np.swapaxes(new_obs, 2,0)
        new_obs = new_obs / 255.0
        return new_obs 
Example #24
Source File: utils.py    From Deep-Q-Learning-Paper-To-Code with MIT License 5 votes vote down vote up
def observation(self, obs):
        new_frame = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
        resized_screen = cv2.resize(new_frame, self.shape[1:],
                                    interpolation=cv2.INTER_AREA)
        new_obs = np.array(resized_screen, dtype=np.uint8).reshape(self.shape)
        new_obs = new_obs / 255.0
        return new_obs 
Example #25
Source File: utils.py    From Deep-Q-Learning-Paper-To-Code with MIT License 5 votes vote down vote up
def observation(self, obs):
        new_frame = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
        resized_screen = cv2.resize(new_frame, self.shape[1:],
                                    interpolation=cv2.INTER_AREA)
        new_obs = np.array(resized_screen, dtype=np.uint8).reshape(self.shape)
        new_obs = new_obs / 255.0

        return new_obs 
Example #26
Source File: datagen.py    From Convolutional-Pose-Machine-tf with GNU Lesser General Public License v3.0 5 votes vote down vote up
def test(self, toWait=0.2):
        """ TESTING METHOD
        You can run it to see if the preprocessing is well done.
        Wait few seconds for loading, then diaporama appears with image and highlighted joints
        /!\ Use Esc to quit
        Args:
            toWait : In sec, time between pictures
        """
        self._create_train_table()
        self._create_sets()
        for i in range(len(self.train_set)):
            img = self.open_img(self.train_set[i])
            w = self.data_dict[self.train_set[i]]['weights']
            padd, box = self._crop_data(img.shape[0], img.shape[1], self.data_dict[self.train_set[i]]['box'],
                                        self.data_dict[self.train_set[i]]['joints'], boxp=0.0)
            new_j = self._relative_joints(box, padd, self.data_dict[self.train_set[i]]['joints'], to_size=self.in_size)
            rhm = self._generate_hm(self.in_size, self.in_size, new_j, self.in_size, w)
            rimg = self._crop_img(img, padd, box)
            # See Error in self._generator
            # rimg = cv2.resize(rimg, (self.in_size,self.in_size))
            rimg = scm.imresize(rimg, (self.in_size, self.in_size))
            # rhm = np.zeros((self.in_size,self.in_size,16))
            # for i in range(16):
            #	rhm[:,:,i] = cv2.resize(rHM[:,:,i], (self.in_size,self.in_size))
            grimg = cv2.cvtColor(rimg, cv2.COLOR_RGB2GRAY)
            cv2.imshow('image', grimg / 255 + np.sum(rhm, axis=2))
            # Wait
            time.sleep(toWait)
            if cv2.waitKey(1) == 27:
                print('Ended')
                cv2.destroyAllWindows()
                break

    # ------------------------------- PCK METHODS------------------------------- 
Example #27
Source File: toolbox.py    From stagesepx with MIT License 5 votes vote down vote up
def turn_grey(old: np.ndarray) -> np.ndarray:
    try:
        return cv2.cvtColor(old, cv2.COLOR_RGB2GRAY)
    except cv2.error:
        return old 
Example #28
Source File: toolbox.py    From findit with MIT License 5 votes vote down vote up
def turn_grey(old: np.ndarray) -> np.ndarray:
    try:
        return cv2.cvtColor(old, cv2.COLOR_RGB2GRAY)
    except cv2.error:
        return old 
Example #29
Source File: visual_augmentation.py    From face_landmark with Apache License 2.0 5 votes vote down vote up
def gray(src):
    g_img=cv2.cvtColor(src,cv2.COLOR_RGB2GRAY)
    src[:,:,0]=g_img
    src[:,:,1]=g_img
    src[:,:,2]=g_img
    return src 
Example #30
Source File: atari_wrappers_deprecated.py    From learning2run with MIT License 5 votes vote down vote up
def process(frame):
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        frame = cv2.resize(frame, (84, 84), interpolation=cv2.INTER_AREA)
        return frame.reshape(84, 84, 1)