Python cv2.COLOR_RGB2HLS Examples

The following are 20 code examples of cv2.COLOR_RGB2HLS(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: image_process.py    From Advanced_Lane_Lines with MIT License 6 votes vote down vote up
def color_grid_thresh(img, s_thresh=(170,255), sx_thresh=(20, 100)):
	img = np.copy(img)
	# Convert to HLS color space and separate the V channel
	hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
	l_channel = hls[:,:,1]
	s_channel = hls[:,:,2]
	# Sobel x
	sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivateive in x
	abs_sobelx = np.absolute(sobelx) # Absolute x derivateive to accentuate lines
	scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))

	# Threshold x gradient
	sxbinary = np.zeros_like(scaled_sobel)
	sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1

	# Threshold color channel
	s_binary = np.zeros_like(s_channel)
	s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1

	# combine the two binary
	binary = sxbinary | s_binary

	# Stack each channel (for visual check the pixal sourse)
	# color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary,s_binary)) * 255
	return binary 
Example #2
Source File: image_transforms.py    From PyTorch-MFNet with MIT License 6 votes vote down vote up
def __call__(self, data):
        h, w, c = data.shape
        assert c%3 == 0, "input channel = %d, illegal"%c

        random_vars = [int(round(self.rng.uniform(-x, x))) for x in self.vars]

        base = len(random_vars)
        augmented_data = np.zeros(data.shape, )

        for i_im in range(0, int(c/3)):
            augmented_data[:,:,3*i_im:(3*i_im+3)] = \
                    cv2.cvtColor(data[:,:,3*i_im:(3*i_im+3)], cv2.COLOR_RGB2HLS)

        hls_limits = [180, 255, 255]
        for ic in range(0, c):
            var = random_vars[ic%base]
            limit = hls_limits[ic%base]
            augmented_data[:,:,ic] = np.minimum(np.maximum(augmented_data[:,:,ic] + var, 0), limit)

        for i_im in range(0, int(c/3)):
            augmented_data[:,:,3*i_im:(3*i_im+3)] = \
                    cv2.cvtColor(augmented_data[:,:,3*i_im:(3*i_im+3)].astype(np.uint8), \
                        cv2.COLOR_HLS2RGB)

        return augmented_data 
Example #3
Source File: image_transforms.py    From GloRe with MIT License 6 votes vote down vote up
def __call__(self, data, idx=None, copy_id=0):
        h, w, c = data.shape
        assert c%3 == 0, "input channel = %d, illegal"%c

        random_vars = [int(self.rng.uniform(-x, x)) for x in self.vars]

        base = len(random_vars)
        augmented_data = np.zeros(data.shape, )

        for i_im in range(0, int(c/3)):
            augmented_data[:,:,3*i_im:(3*i_im+3)] = \
                    cv2.cvtColor(data[:,:,3*i_im:(3*i_im+3)], cv2.COLOR_RGB2HLS)

        hls_limits = [180, 255, 255]
        for ic in range(0, c):
            var = random_vars[ic%base]
            limit = hls_limits[ic%base]
            augmented_data[:,:,ic] = np.minimum(np.maximum(augmented_data[:,:,ic] + var, 0), limit)

        for i_im in range(0, int(c/3)):
            augmented_data[:,:,3*i_im:(3*i_im+3)] = \
                    cv2.cvtColor(augmented_data[:,:,3*i_im:(3*i_im+3)].astype(np.uint8), \
                        cv2.COLOR_HLS2RGB)

        return augmented_data 
Example #4
Source File: image_transforms.py    From dmc-net with MIT License 6 votes vote down vote up
def __call__(self, data):
        h, w, c = data.shape
        
        assert c%3 == 0, "input channel = %d, illegal"%c
        random_vars = [int(round(self.rng.uniform(-x, x))) for x in self.vars]

        base = len(random_vars)
        augmented_data = np.zeros(data.shape, )
        for i_im in range(0, int(c/3)):
            augmented_data[:,:,3*i_im:(3*i_im+3)] = \
                    cv2.cvtColor(data[:,:,3*i_im:(3*i_im+3)], cv2.COLOR_RGB2HLS)

        hls_limits = [180, 255, 255]
        for ic in range(0, c):
            var = random_vars[ic%base]
            limit = hls_limits[ic%base]
            augmented_data[:,:,ic] = np.minimum(np.maximum(augmented_data[:,:,ic] + var, 0), limit)

        for i_im in range(0, int(c/3)):
            augmented_data[:,:,3*i_im:(3*i_im+3)] = \
                    cv2.cvtColor(augmented_data[:,:,3*i_im:(3*i_im+3)].astype(np.uint8), \
                        cv2.COLOR_HLS2RGB)

        return augmented_data 
Example #5
Source File: image_process.py    From Advanced_Lane_Lines with MIT License 5 votes vote down vote up
def test_thresh_image(image, s_thresh, sx_thresh):
	"""
	adjust the thresh parameters
	"""
	img = mpimg.imread(image)
	img_threshed = color_grid_thresh(img, s_thresh=s_thresh, sx_thresh=sx_thresh)

	# Convert to HLS color space and separate the V channel
	hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
	l_channel = hls[:,:,1]
	s_channel = hls[:,:,2]
	# Sobel x
	sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivateive in x
	abs_sobelx = np.absolute(sobelx) # Absolute x derivateive to accentuate lines
	scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))

	# Threshold x gradient
	sxbinary = np.zeros_like(scaled_sobel)
	sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1

	# Threshold color channel
	s_binary = np.zeros_like(s_channel)
	s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1

	# combine the two binary
	binary = sxbinary | s_binary

	
	plt.figure(),plt.imshow(img),plt.title("original")
	plt.figure(),plt.imshow(sxbinary, cmap='gray'),plt.title("x-gradient")
	plt.figure(),plt.imshow(s_binary, cmap='gray'),plt.title("color-threshed")
	plt.figure(),plt.imshow(s_channel, cmap='gray'),plt.title("s_channel")

	plt.figure(),plt.imshow(img_threshed, cmap='gray'),plt.title("combined-threshed")
	plt.show() 
Example #6
Source File: image_process.py    From Advanced_Lane_Lines with MIT License 5 votes vote down vote up
def yellow_grid_thresh(img, y_low=(10,50,0), y_high=(30,255,255), sx_thresh=(20, 100)):
	img = np.copy(img)
	# Convert to HLS color space and separate the V channel
	hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
	l_channel = hls[:,:,1]
	s_channel = hls[:,:,2]
	# Sobel x
	sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivateive in x
	abs_sobelx = np.absolute(sobelx) # Absolute x derivateive to accentuate lines
	scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))

	# Threshold x gradient
	sxbinary = np.zeros_like(scaled_sobel)
	sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1

	# # Threshold color channel
	# s_binary = np.zeros_like(s_channel)
	# s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
	
	yellow_filtered = yellow_filter(img, y_low, y_high)
	yellow_filtered[yellow_filtered > 0] = 1 # transfer to binary

	# combine the two binary, right and left
	sxbinary[:,:640] = 0 # use right side of sxbinary
	yellow_filtered[:,640:] = 0 # use left side of yellow filtered


	binary = sxbinary | yellow_filtered

	# Stack each channel (for visual check the pixal sourse)
	# color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary,s_binary)) * 255
	return binary 
Example #7
Source File: test_color.py    From imgaug with MIT License 5 votes vote down vote up
def test_random_colorspace(self):
        def _images_to_cspaces(images, choices):
            result = np.full((len(images),), -1, dtype=np.int32)
            for i, image_aug in enumerate(images):
                for j, choice in enumerate(choices):
                    if np.array_equal(image_aug, choice):
                        result[i] = j
                        break
            assert np.all(result != -1)
            return result

        image = np.arange(6*6*3).astype(np.uint8).reshape((6, 6, 3))
        expected_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)[:, :, 2:2+1]
        expected_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)[:, :, 1:1+1]

        child = _BatchCapturingDummyAugmenter()
        aug = iaa.WithBrightnessChannels(
            children=child,
            to_colorspace=[iaa.CSPACE_HSV, iaa.CSPACE_HLS])

        images = [np.copy(image) for _ in sm.xrange(100)]

        _ = aug(images=images)
        images_aug1 = child.last_batch.images

        _ = aug(images=images)
        images_aug2 = child.last_batch.images

        cspaces1 = _images_to_cspaces(images_aug1, [expected_hsv, expected_hls])
        cspaces2 = _images_to_cspaces(images_aug2, [expected_hsv, expected_hls])

        assert np.any(cspaces1 != cspaces2)
        assert len(np.unique(cspaces1)) > 1
        assert len(np.unique(cspaces2)) > 1 
Example #8
Source File: test_weather.py    From imgaug with MIT License 5 votes vote down vote up
def test_basic_functionality(self):
        # basic functionality test
        aug = iaa.FastSnowyLandscape(
            lightness_threshold=100,
            lightness_multiplier=2.0)
        image = np.arange(0, 6*6*3).reshape((6, 6, 3)).astype(np.uint8)
        image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
        mask = (image_hls[..., 1] < 100)
        expected = np.copy(image_hls).astype(np.float32)
        expected[..., 1][mask] *= 2.0
        expected = np.clip(np.round(expected), 0, 255).astype(np.uint8)
        expected = cv2.cvtColor(expected, cv2.COLOR_HLS2RGB)
        observed = aug.augment_image(image)
        assert np.array_equal(observed, expected) 
Example #9
Source File: test_weather.py    From imgaug with MIT License 5 votes vote down vote up
def test_vary_lightness_threshold(self):
        # test when varying lightness_threshold between images
        image = np.arange(0, 6*6*3).reshape((6, 6, 3)).astype(np.uint8)
        image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)

        aug = iaa.FastSnowyLandscape(
            lightness_threshold=_TwoValueParam(75, 125),
            lightness_multiplier=2.0)

        mask = (image_hls[..., 1] < 75)
        expected1 = np.copy(image_hls).astype(np.float64)
        expected1[..., 1][mask] *= 2.0
        expected1 = np.clip(np.round(expected1), 0, 255).astype(np.uint8)
        expected1 = cv2.cvtColor(expected1, cv2.COLOR_HLS2RGB)

        mask = (image_hls[..., 1] < 125)
        expected2 = np.copy(image_hls).astype(np.float64)
        expected2[..., 1][mask] *= 2.0
        expected2 = np.clip(np.round(expected2), 0, 255).astype(np.uint8)
        expected2 = cv2.cvtColor(expected2, cv2.COLOR_HLS2RGB)

        observed = aug.augment_images([image] * 4)

        assert np.array_equal(observed[0], expected1)
        assert np.array_equal(observed[1], expected2)
        assert np.array_equal(observed[2], expected1)
        assert np.array_equal(observed[3], expected2) 
Example #10
Source File: test_weather.py    From imgaug with MIT License 5 votes vote down vote up
def test_vary_lightness_multiplier(self):
        # test when varying lightness_multiplier between images
        image = np.arange(0, 6*6*3).reshape((6, 6, 3)).astype(np.uint8)
        image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)

        aug = iaa.FastSnowyLandscape(
            lightness_threshold=100,
            lightness_multiplier=_TwoValueParam(1.5, 2.0))

        mask = (image_hls[..., 1] < 100)
        expected1 = np.copy(image_hls).astype(np.float64)
        expected1[..., 1][mask] *= 1.5
        expected1 = np.clip(np.round(expected1), 0, 255).astype(np.uint8)
        expected1 = cv2.cvtColor(expected1, cv2.COLOR_HLS2RGB)

        mask = (image_hls[..., 1] < 100)
        expected2 = np.copy(image_hls).astype(np.float64)
        expected2[..., 1][mask] *= 2.0
        expected2 = np.clip(np.round(expected2), 0, 255).astype(np.uint8)
        expected2 = cv2.cvtColor(expected2, cv2.COLOR_HLS2RGB)

        observed = aug.augment_images([image] * 4)

        assert np.array_equal(observed[0], expected1)
        assert np.array_equal(observed[1], expected2)
        assert np.array_equal(observed[2], expected1)
        assert np.array_equal(observed[3], expected2) 
Example #11
Source File: utils.py    From VerifAI with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def random_shadow(image):
    """
    Generates and adds random shadow
    """
    # (x1, y1) and (x2, y2) forms a line
    # xm, ym gives all the locations of the image
    x1, y1 = IMAGE_WIDTH * np.random.rand(), 0
    x2, y2 = IMAGE_WIDTH * np.random.rand(), IMAGE_HEIGHT
    xm, ym = np.mgrid[0:IMAGE_HEIGHT, 0:IMAGE_WIDTH]

    # mathematically speaking, we want to set 1 below the line and zero otherwise
    # Our coordinate is up side down.  So, the above the line:
    # (ym-y1)/(xm-x1) > (y2-y1)/(x2-x1)
    # as x2 == x1 causes zero-division problem, we'll write it in the below form:
    # (ym-y1)*(x2-x1) - (y2-y1)*(xm-x1) > 0
    mask = np.zeros_like(image[:, :, 1])
    mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1

    # choose which side should have shadow and adjust saturation
    cond = mask == np.random.randint(2)
    s_ratio = np.random.uniform(low=0.2, high=0.5)

    # adjust Saturation in HLS(Hue, Light, Saturation)
    hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
    hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio
    return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB) 
Example #12
Source File: data_augmentation.py    From uois with GNU General Public License v3.0 5 votes vote down vote up
def random_color_warp(image, d_h=None, d_s=None, d_l=None):
    """ Given an RGB image [H x W x 3], add random hue, saturation and luminosity to the image

        Code adapted from: https://github.com/yuxng/PoseCNN/blob/master/lib/utils/blob.py
    """
    H, W, _ = image.shape

    image_color_warped = np.zeros_like(image)

    # Set random hue, luminosity and saturation which ranges from -0.1 to 0.1
    if d_h is None:
        d_h = (random.random() - 0.5) * 0.2 * 256
    if d_l is None:
        d_l = (random.random() - 0.5) * 0.2 * 256
    if d_s is None:
        d_s = (random.random() - 0.5) * 0.2 * 256

    # Convert the RGB to HLS
    hls = cv2.cvtColor(image.round().astype(np.uint8), cv2.COLOR_RGB2HLS)
    h, l, s = cv2.split(hls)

    # Add the values to the image H, L, S
    new_h = (np.round((h + d_h)) % 256).astype(np.uint8)
    new_l = np.round(np.clip(l + d_l, 0, 255)).astype(np.uint8)
    new_s = np.round(np.clip(s + d_s, 0, 255)).astype(np.uint8)

    # Convert the HLS to RGB
    new_hls = cv2.merge((new_h, new_l, new_s)).astype(np.uint8)
    new_im = cv2.cvtColor(new_hls, cv2.COLOR_HLS2RGB)

    image_color_warped = new_im.astype(np.float32)

    return image_color_warped 
Example #13
Source File: utils.py    From car-behavioral-cloning with MIT License 5 votes vote down vote up
def random_shadow(image):
    """
    Generates and adds random shadow
    """
    # (x1, y1) and (x2, y2) forms a line
    # xm, ym gives all the locations of the image
    x1, y1 = IMAGE_WIDTH * np.random.rand(), 0
    x2, y2 = IMAGE_WIDTH * np.random.rand(), IMAGE_HEIGHT
    xm, ym = np.mgrid[0:IMAGE_HEIGHT, 0:IMAGE_WIDTH]

    # mathematically speaking, we want to set 1 below the line and zero otherwise
    # Our coordinate is up side down.  So, the above the line: 
    # (ym-y1)/(xm-x1) > (y2-y1)/(x2-x1)
    # as x2 == x1 causes zero-division problem, we'll write it in the below form:
    # (ym-y1)*(x2-x1) - (y2-y1)*(xm-x1) > 0
    mask = np.zeros_like(image[:, :, 1])
    mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1

    # choose which side should have shadow and adjust saturation
    cond = mask == np.random.randint(2)
    s_ratio = np.random.uniform(low=0.2, high=0.5)

    # adjust Saturation in HLS(Hue, Light, Saturation)
    hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
    hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio
    return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB) 
Example #14
Source File: image_process.py    From Advanced_Lane_Lines with MIT License 5 votes vote down vote up
def color_grid_thresh_dynamic(img, s_thresh=(170,255), sx_thresh=(20, 100)):
	img = np.copy(img)
	height = img.shape[0]
	width = img.shape[1]
	# Convert to HLS color space and separate the V channel
	hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
	l_channel = hls[:,:,1]
	s_channel = hls[:,:,2]
	# Sobel x
	sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivateive in x
	abs_sobelx = np.absolute(sobelx) # Absolute x derivateive to accentuate lines
	scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))

	# Threshold x gradient
	sxbinary = np.zeros_like(scaled_sobel)
	sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1

	# Threshold color channel
	s_binary = np.zeros_like(s_channel)
	s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1

	sxbinary[:, :width//2] = 0	# use the left side
	s_binary[:,width//2:] = 0 # use the right side

	# combine the two binary
	binary = sxbinary | s_binary

	# Stack each channel (for visual check the pixal sourse)
	# color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary,s_binary)) * 255
	return binary 
Example #15
Source File: functional.py    From albumentations with MIT License 5 votes vote down vote up
def add_shadow(img, vertices_list):
    """Add shadows to the image.

    From https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library

    Args:
        img (numpy.ndarray):
        vertices_list (list):

    Returns:
        numpy.ndarray:

    """
    non_rgb_warning(img)
    input_dtype = img.dtype
    needs_float = False

    if input_dtype == np.float32:
        img = from_float(img, dtype=np.dtype("uint8"))
        needs_float = True
    elif input_dtype not in (np.uint8, np.float32):
        raise ValueError("Unexpected dtype {} for RandomSnow augmentation".format(input_dtype))

    image_hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
    mask = np.zeros_like(img)

    # adding all shadow polygons on empty mask, single 255 denotes only red channel
    for vertices in vertices_list:
        cv2.fillPoly(mask, vertices, 255)

    # if red channel is hot, image's "Lightness" channel's brightness is lowered
    red_max_value_ind = mask[:, :, 0] == 255
    image_hls[:, :, 1][red_max_value_ind] = image_hls[:, :, 1][red_max_value_ind] * 0.5

    image_rgb = cv2.cvtColor(image_hls, cv2.COLOR_HLS2RGB)

    if needs_float:
        image_rgb = to_float(image_rgb, max_value=255)

    return image_rgb 
Example #16
Source File: histogram_intersection.py    From exposure with MIT License 5 votes vote down vote up
def get_statistics(img):
    img = np.clip(img, a_min=0.0, a_max=1.0)
    HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
    lum = img[:, :, 0] * 0.27 + img[:, :, 1] * 0.67 + img[:, :, 2] * 0.06
    sat = HLS[:, :, 2].mean()
    return [lum.mean(), lum.std() * 2, sat] 
Example #17
Source File: test_color.py    From imgaug with MIT License 4 votes vote down vote up
def test_every_colorspace(self):
        def _image_to_channel(image, cspace):
            if cspace == iaa.CSPACE_YCrCb:
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YCR_CB)
                return image_cvt[:, :, 0:0+1]
            elif cspace == iaa.CSPACE_HSV:
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
                return image_cvt[:, :, 2:2+1]
            elif cspace == iaa.CSPACE_HLS:
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
                return image_cvt[:, :, 1:1+1]
            elif cspace == iaa.CSPACE_Lab:
                if hasattr(cv2, "COLOR_RGB2Lab"):
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Lab)
                else:
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
                return image_cvt[:, :, 0:0+1]
            elif cspace == iaa.CSPACE_Luv:
                if hasattr(cv2, "COLOR_RGB2Luv"):
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Luv)
                else:
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
                return image_cvt[:, :, 0:0+1]
            else:
                assert cspace == iaa.CSPACE_YUV
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
                return image_cvt[:, :, 0:0+1]

        # Max differences between input image and image after augmentation
        # when no child augmenter is used (for the given example image below).
        # For some colorspaces the conversion to input colorspace isn't
        # perfect.
        # Values were manually checked.
        max_diff_expected = {
            iaa.CSPACE_YCrCb: 1,
            iaa.CSPACE_HSV: 0,
            iaa.CSPACE_HLS: 0,
            iaa.CSPACE_Lab: 2,
            iaa.CSPACE_Luv: 4,
            iaa.CSPACE_YUV: 1
        }

        image = np.arange(6*6*3).astype(np.uint8).reshape((6, 6, 3))

        for cspace in self.valid_colorspaces:
            with self.subTest(colorspace=cspace):
                child = _BatchCapturingDummyAugmenter()
                aug = iaa.WithBrightnessChannels(
                    children=child,
                    to_colorspace=cspace)

                image_aug = aug(image=image)

                expected = _image_to_channel(image, cspace)
                diff = np.abs(
                    image.astype(np.int32) - image_aug.astype(np.int32))
                assert np.all(diff <= max_diff_expected[cspace])
                assert np.array_equal(child.last_batch.images[0], expected) 
Example #18
Source File: functional.py    From albumentations with MIT License 4 votes vote down vote up
def add_snow(img, snow_point, brightness_coeff):
    """Bleaches out pixels, imitation snow.

    From https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library

    Args:
        img (numpy.ndarray): Image.
        snow_point: Number of show points.
        brightness_coeff: Brightness coefficient.

    Returns:
        numpy.ndarray: Image.

    """
    non_rgb_warning(img)

    input_dtype = img.dtype
    needs_float = False

    snow_point *= 127.5  # = 255 / 2
    snow_point += 85  # = 255 / 3

    if input_dtype == np.float32:
        img = from_float(img, dtype=np.dtype("uint8"))
        needs_float = True
    elif input_dtype not in (np.uint8, np.float32):
        raise ValueError("Unexpected dtype {} for RandomSnow augmentation".format(input_dtype))

    image_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
    image_HLS = np.array(image_HLS, dtype=np.float32)

    image_HLS[:, :, 1][image_HLS[:, :, 1] < snow_point] *= brightness_coeff

    image_HLS[:, :, 1] = clip(image_HLS[:, :, 1], np.uint8, 255)

    image_HLS = np.array(image_HLS, dtype=np.uint8)

    image_RGB = cv2.cvtColor(image_HLS, cv2.COLOR_HLS2RGB)

    if needs_float:
        image_RGB = to_float(image_RGB, max_value=255)

    return image_RGB 
Example #19
Source File: functional.py    From albumentations with MIT License 4 votes vote down vote up
def add_rain(img, slant, drop_length, drop_width, drop_color, blur_value, brightness_coefficient, rain_drops):
    """

    From https://github.com/UjjwalSaxena/Automold--Road-Augmentation-Library

    Args:
        img (numpy.ndarray): Image.
        slant (int):
        drop_length:
        drop_width:
        drop_color:
        blur_value (int): Rainy view are blurry.
        brightness_coefficient (float): Rainy days are usually shady.
        rain_drops:

    Returns:
        numpy.ndarray: Image.

    """
    non_rgb_warning(img)

    input_dtype = img.dtype
    needs_float = False

    if input_dtype == np.float32:
        img = from_float(img, dtype=np.dtype("uint8"))
        needs_float = True
    elif input_dtype not in (np.uint8, np.float32):
        raise ValueError("Unexpected dtype {} for RandomSnow augmentation".format(input_dtype))

    image = img.copy()

    for (rain_drop_x0, rain_drop_y0) in rain_drops:
        rain_drop_x1 = rain_drop_x0 + slant
        rain_drop_y1 = rain_drop_y0 + drop_length

        cv2.line(image, (rain_drop_x0, rain_drop_y0), (rain_drop_x1, rain_drop_y1), drop_color, drop_width)

    image = cv2.blur(image, (blur_value, blur_value))  # rainy view are blurry
    image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS).astype(np.float32)
    image_hls[:, :, 1] *= brightness_coefficient

    image_rgb = cv2.cvtColor(image_hls.astype(np.uint8), cv2.COLOR_HLS2RGB)

    if needs_float:
        image_rgb = to_float(image_rgb, max_value=255)

    return image_rgb 
Example #20
Source File: functional.py    From albumentations with MIT License 4 votes vote down vote up
def iso_noise(image, color_shift=0.05, intensity=0.5, random_state=None, **kwargs):
    """
    Apply poisson noise to image to simulate camera sensor noise.

    Args:
        image (numpy.ndarray): Input image, currently, only RGB, uint8 images are supported.
        color_shift (float):
        intensity (float): Multiplication factor for noise values. Values of ~0.5 are produce noticeable,
                   yet acceptable level of noise.
        random_state:
        **kwargs:

    Returns:
        numpy.ndarray: Noised image

    """
    if image.dtype != np.uint8:
        raise TypeError("Image must have uint8 channel type")
    if is_grayscale_image(image):
        raise TypeError("Image must be RGB")

    if random_state is None:
        random_state = np.random.RandomState(42)

    one_over_255 = float(1.0 / 255.0)
    image = np.multiply(image, one_over_255, dtype=np.float32)
    hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
    _, stddev = cv2.meanStdDev(hls)

    luminance_noise = random_state.poisson(stddev[1] * intensity * 255, size=hls.shape[:2])
    color_noise = random_state.normal(0, color_shift * 360 * intensity, size=hls.shape[:2])

    hue = hls[..., 0]
    hue += color_noise
    hue[hue < 0] += 360
    hue[hue > 360] -= 360

    luminance = hls[..., 1]
    luminance += (luminance_noise / 255) * (1.0 - luminance)

    image = cv2.cvtColor(hls, cv2.COLOR_HLS2RGB) * 255
    return image.astype(np.uint8)