Python cv2.COLOR_BGR2LAB Examples

The following are 30 code examples of cv2.COLOR_BGR2LAB(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: neural_style.py    From neural-style-tf with GNU General Public License v3.0 7 votes vote down vote up
def convert_to_original_colors(content_img, stylized_img):
  content_img  = postprocess(content_img)
  stylized_img = postprocess(stylized_img)
  if args.color_convert_type == 'yuv':
    cvt_type = cv2.COLOR_BGR2YUV
    inv_cvt_type = cv2.COLOR_YUV2BGR
  elif args.color_convert_type == 'ycrcb':
    cvt_type = cv2.COLOR_BGR2YCR_CB
    inv_cvt_type = cv2.COLOR_YCR_CB2BGR
  elif args.color_convert_type == 'luv':
    cvt_type = cv2.COLOR_BGR2LUV
    inv_cvt_type = cv2.COLOR_LUV2BGR
  elif args.color_convert_type == 'lab':
    cvt_type = cv2.COLOR_BGR2LAB
    inv_cvt_type = cv2.COLOR_LAB2BGR
  content_cvt = cv2.cvtColor(content_img, cvt_type)
  stylized_cvt = cv2.cvtColor(stylized_img, cvt_type)
  c1, _, _ = cv2.split(stylized_cvt)
  _, c2, c3 = cv2.split(content_cvt)
  merged = cv2.merge((c1, c2, c3))
  dst = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32)
  dst = preprocess(dst)
  return dst 
Example #2
Source File: utils.py    From PHiSeg-code with Apache License 2.0 6 votes vote down vote up
def histogram_equalization(img):

    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)

    # -----Splitting the LAB image to different channels-------------------------
    l, a, b = cv2.split(lab)

    # -----Applying CLAHE to L-channel-------------------------------------------
    clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
    cl = clahe.apply(l)

    # -----Merge the CLAHE enhanced L-channel with the a and b channel-----------
    limg = cv2.merge((cl, a, b))

    # -----Converting image from LAB Color model to RGB model--------------------
    final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)

    return final 
Example #3
Source File: phiseg_makegif_samples.py    From PHiSeg-code with Apache License 2.0 6 votes vote down vote up
def histogram_equalization(img):

    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)

    # -----Splitting the LAB image to different channels-------------------------
    l, a, b = cv2.split(lab)

    # -----Applying CLAHE to L-channel-------------------------------------------
    clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
    cl = clahe.apply(l)

    # -----Merge the CLAHE enhanced L-channel with the a and b channel-----------
    limg = cv2.merge((cl, a, b))

    # -----Converting image from LAB Color model to RGB model--------------------
    final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)

    return final 
Example #4
Source File: image.py    From surface-crack-detection with MIT License 6 votes vote down vote up
def equalize_light(image, limit=3, grid=(7,7), gray=False):
    if (len(image.shape) == 2):
        image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
        gray = True
    
    clahe = cv2.createCLAHE(clipLimit=limit, tileGridSize=grid)
    lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
    l, a, b = cv2.split(lab)

    cl = clahe.apply(l)
    limg = cv2.merge((cl,a,b))

    image = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
    if gray: 
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    return np.uint8(image) 
Example #5
Source File: omnirobot_simulator_server.py    From robotics-rl-srl with MIT License 6 votes vote down vote up
def renderEnvLuminosityNoise(self, origin_image, noise_var=0.1, in_RGB=False, out_RGB=False):
        """
        render the different environment luminosity
        """
        # variate luminosity and color
        origin_image_LAB = cv2.cvtColor(
            origin_image, cv2.COLOR_RGB2LAB if in_RGB else cv2.COLOR_BGR2LAB, cv2.CV_32F)
        origin_image_LAB[:, :, 0] = origin_image_LAB[:,
                                                     :, 0] * (np.random.randn() * noise_var + 1.0)
        origin_image_LAB[:, :, 1] = origin_image_LAB[:,
                                                     :, 1] * (np.random.randn() * noise_var + 1.0)
        origin_image_LAB[:, :, 2] = origin_image_LAB[:,
                                                     :, 2] * (np.random.randn() * noise_var + 1.0)
        out_image = cv2.cvtColor(
            origin_image_LAB, cv2.COLOR_LAB2RGB if out_RGB else cv2.COLOR_LAB2BGR, cv2.CV_8UC3)
        return out_image 
Example #6
Source File: predict_densenet_oof.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def bgr_to_lab(img):
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(17, 17))
    lab = clahe.apply(lab[:, :, 0])
    if lab.mean() > 127:
        lab = 255 - lab
    return lab[..., np.newaxis] 
Example #7
Source File: tune_densenet_softmax_final.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def bgr_to_lab(img):
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(17, 17))
    lab = clahe.apply(lab[:, :, 0])
    if lab.mean() > 127:
        lab = 255 - lab
    return lab[..., np.newaxis] 
Example #8
Source File: rgb2gray_lab.py    From plantcv with MIT License 5 votes vote down vote up
def rgb2gray_lab(rgb_img, channel):
    """Convert image from RGB colorspace to LAB colorspace. Returns the specified subchannel as a gray image.

    Inputs:
    rgb_img   = RGB image data
    channel   = color subchannel (l = lightness, a = green-magenta, b = blue-yellow)

    Returns:
    l | a | b = grayscale image from one LAB color channel

    :param rgb_img: numpy.ndarray
    :param channel: str
    :return channel: numpy.ndarray
    """
    # Auto-increment the device counter
    params.device += 1
    # The allowable channel inputs are l, a or b
    names = {"l": "lightness", "a": "green-magenta", "b": "blue-yellow"}
    channel = channel.lower()
    if channel not in names:
        fatal_error("Channel " + str(channel) + " is not l, a or b!")

    # Convert the input BGR image to LAB colorspace
    lab = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2LAB)
    # Split LAB channels
    l, a, b = cv2.split(lab)
    # Create a channel dictionaries for lookups by a channel name index
    channels = {"l": l, "a": a, "b": b}

    if params.debug == "print":
        print_image(channels[channel], os.path.join(params.debug_outdir,
                                                    str(params.device) + "_lab_" + names[channel] + ".png"))
    elif params.debug == "plot":
        plot_image(channels[channel], cmap="gray")

    return channels[channel] 
Example #9
Source File: common.py    From pytorch-faster-rcnn with MIT License 5 votes vote down vote up
def light(im1_name, im2_name):
    # im1
    im = cv2.imread(im1_name)
    im = im.astype(np.float32)
    im /= 255.
    im_lab = cv2.cvtColor(im, cv2.COLOR_BGR2LAB)
    l = im_lab[:, :, 0]
    L1_mean = np.mean(l)
    L1_std = np.std(l)

    # im2
    im = cv2.imread(im2_name)
    im = im.astype(np.float32)
    im /= 255.
    im_lab = cv2.cvtColor(im, cv2.COLOR_BGR2LAB)
    l = im_lab[:, :, 0]
    L2_mean = np.mean(l)
    L2_std = np.std(l)

    if L2_std != 0:
        l = (l - L2_mean) / L2_std * L1_std + L1_mean
    l = l[:, :, np.newaxis]
    im_lab = np.concatenate((l, im_lab[:, :, 1:]), axis=2)
    im = cv2.cvtColor(im_lab, cv2.COLOR_LAB2BGR)
    im *= 255.
    return im 
Example #10
Source File: image.py    From OverwatchDataAnalysis with GNU General Public License v3.0 5 votes vote down vote up
def increase_contrast(img):
    """
    Increase contrast of an RGB image
    @Author: Appcell
    @param img: image to be processed
    @return: a numpy.ndarray object of this image
    """
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    l, a, b = cv2.split(lab)
    clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(4, 4))
    cl = clahe.apply(l)
    limg = cv2.merge((cl,a,b))
    final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
    return final 
Example #11
Source File: color_transfer.py    From DeepFaceLab with GNU General Public License v3.0 5 votes vote down vote up
def color_transfer_mix(img_src,img_trg):
    img_src = np.clip(img_src*255.0, 0, 255).astype(np.uint8)
    img_trg = np.clip(img_trg*255.0, 0, 255).astype(np.uint8)

    img_src_lab = cv2.cvtColor(img_src, cv2.COLOR_BGR2LAB)
    img_trg_lab = cv2.cvtColor(img_trg, cv2.COLOR_BGR2LAB)

    rct_light = np.clip ( linear_color_transfer(img_src_lab[...,0:1].astype(np.float32)/255.0,
                                                img_trg_lab[...,0:1].astype(np.float32)/255.0 )[...,0]*255.0,
                          0, 255).astype(np.uint8)

    img_src_lab[...,0] = (np.ones_like (rct_light)*100).astype(np.uint8)
    img_src_lab = cv2.cvtColor(img_src_lab, cv2.COLOR_LAB2BGR)

    img_trg_lab[...,0] = (np.ones_like (rct_light)*100).astype(np.uint8)
    img_trg_lab = cv2.cvtColor(img_trg_lab, cv2.COLOR_LAB2BGR)

    img_rct = color_transfer_sot( img_src_lab.astype(np.float32), img_trg_lab.astype(np.float32) )
    img_rct = np.clip(img_rct, 0, 255).astype(np.uint8)

    img_rct = cv2.cvtColor(img_rct, cv2.COLOR_BGR2LAB)
    img_rct[...,0] = rct_light
    img_rct = cv2.cvtColor(img_rct, cv2.COLOR_LAB2BGR)


    return (img_rct / 255.0).astype(np.float32) 
Example #12
Source File: inference_utils.py    From rpg_e2vid with GNU General Public License v3.0 5 votes vote down vote up
def upsample_color_image(grayscale_highres, color_lowres_bgr, colorspace='LAB'):
    """
    Generate a high res color image from a high res grayscale image, and a low res color image,
    using the trick described in:
    http://www.planetary.org/blogs/emily-lakdawalla/2013/04231204-image-processing-colorizing-images.html
    """
    assert(len(grayscale_highres.shape) == 2)
    assert(len(color_lowres_bgr.shape) == 3 and color_lowres_bgr.shape[2] == 3)

    if colorspace == 'LAB':
        # convert color image to LAB space
        lab = cv2.cvtColor(src=color_lowres_bgr, code=cv2.COLOR_BGR2LAB)
        # replace lightness channel with the highres image
        lab[:, :, 0] = grayscale_highres
        # convert back to BGR
        color_highres_bgr = cv2.cvtColor(src=lab, code=cv2.COLOR_LAB2BGR)
    elif colorspace == 'HSV':
        # convert color image to HSV space
        hsv = cv2.cvtColor(src=color_lowres_bgr, code=cv2.COLOR_BGR2HSV)
        # replace value channel with the highres image
        hsv[:, :, 2] = grayscale_highres
        # convert back to BGR
        color_highres_bgr = cv2.cvtColor(src=hsv, code=cv2.COLOR_HSV2BGR)
    elif colorspace == 'HLS':
        # convert color image to HLS space
        hls = cv2.cvtColor(src=color_lowres_bgr, code=cv2.COLOR_BGR2HLS)
        # replace lightness channel with the highres image
        hls[:, :, 1] = grayscale_highres
        # convert back to BGR
        color_highres_bgr = cv2.cvtColor(src=hls, code=cv2.COLOR_HLS2BGR)

    return color_highres_bgr 
Example #13
Source File: preprocess.py    From pytorch-segmentation with MIT License 5 votes vote down vote up
def clahe(img, clip=2, grid=8):
    img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    _clahe = cv2.createCLAHE(clipLimit=clip, tileGridSize=(grid, grid))
    img_yuv[:, :, 0] = _clahe.apply(img_yuv[:, :, 0])
    img_equ = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2BGR)
    return img_equ 
Example #14
Source File: color_transfer.py    From pyimagesearch with GNU General Public License v3.0 5 votes vote down vote up
def color_transfer(source, target):

    source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32")
    target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32")

    (lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source)
    (lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target)

    (l, a, b) = cv2.split(target)
    l -= lMeanTar
    a -= aMeanTar
    b -= bMeanTar

    l = (lStdTar / lStdSrc) * l
    a = (aStdTar / aStdSrc) * a
    b = (bStdTar / bStdSrc) * b

    l += lMeanSrc
    a += aMeanSrc
    b += bMeanSrc

    l = np.clip(l, 0, 255)
    a = np.clip(a, 0, 255)
    b = np.clip(b, 0, 255)

    transfer = cv2.merge([l, a, b])
    transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR)

    return transfer 
Example #15
Source File: augment.py    From kaggle-dsb2018 with Apache License 2.0 5 votes vote down vote up
def keep_L_channel(images):
    for index, image in enumerate(images):
        if image.shape[2] == 3:
            bgr = image[:,:,[2,1,0]] # flip r and b
            lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
            images[index] = lab[:,:,0]
            images[index] = images[index][:,:, np.newaxis]
    return images 
Example #16
Source File: image_processing.py    From kaggle-dsb2018 with Apache License 2.0 5 votes vote down vote up
def rgb_clahe(in_rgb_img):
    bgr = in_rgb_img[:,:,[2,1,0]] # flip r and b
    lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    lab[:,:,0] = clahe.apply(lab[:,:,0])
    bgr = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
    return bgr[:,:,[2,1,0]] 
Example #17
Source File: image_processing.py    From kaggle-dsb2018 with Apache License 2.0 5 votes vote down vote up
def rgb_clahe_justl(in_rgb_img):
    bgr = in_rgb_img[:,:,[2,1,0]] # flip r and b
    lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    img = clahe.apply(lab[:,:,0])
    return img 
Example #18
Source File: train_inception_softmax.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def bgr_to_lab(img):
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(17, 17))
    lab = clahe.apply(lab[:, :, 0])
    if lab.mean() > 127:
        lab = 255 - lab
    return lab[..., np.newaxis] 
Example #19
Source File: merge_faces_larger.py    From df with Mozilla Public License 2.0 5 votes vote down vote up
def transfer_avg_color(img_old,img_new):
  assert(img_old.shape==img_new.shape) 
  source = cv2.cvtColor(img_old, cv2.COLOR_BGR2LAB).astype("float32")
  target = cv2.cvtColor(img_new, cv2.COLOR_BGR2LAB).astype("float32")

  (lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source)
  (lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target)

  (l, a, b) = cv2.split(target)

  l -= lMeanTar
  a -= aMeanTar
  b -= bMeanTar

  l = (lStdTar / lStdSrc) * l
  a = (aStdTar / aStdSrc) * a
  b = (bStdTar / bStdSrc) * b

  l += lMeanSrc
  a += aMeanSrc
  b += bMeanSrc

  l = numpy.clip(l, 0, 255)
  a = numpy.clip(a, 0, 255)
  b = numpy.clip(b, 0, 255)

  transfer = cv2.merge([l, a, b])
  transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR)

  return transfer 
Example #20
Source File: predict_inception_oof.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def bgr_to_lab(img):
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(17, 17))
    lab = clahe.apply(lab[:, :, 0])
    if lab.mean() > 127:
        lab = 255 - lab
    return lab[..., np.newaxis] 
Example #21
Source File: color_space.py    From PyIntroduction with MIT License 5 votes vote down vote up
def showImageLab(image_file):
    image_bgr = cv2.imread(image_file)
    image_Lab = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2LAB)

    L = image_Lab[:, :, 0]
    a = image_Lab[:, :, 1]
    b = image_Lab[:, :, 2]

    plt.subplot(1, 3, 1)
    plt.title('L')
    plt.gray()
    plt.imshow(L)
    plt.axis('off')

    plt.subplot(1, 3, 2)
    plt.title('a')
    plt.gray()
    plt.imshow(a)
    plt.axis('off')

    plt.subplot(1, 3, 3)
    plt.title('b')
    plt.gray()
    plt.imshow(b)
    plt.axis('off')

    plt.show() 
Example #22
Source File: predict_inception.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def bgr_to_lab(img):
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(17, 17))
    lab = clahe.apply(lab[:, :, 0])
    if lab.mean() > 127:
        lab = 255 - lab
    return lab[..., np.newaxis] 
Example #23
Source File: predict_densenet.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def bgr_to_lab(img):
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(17, 17))
    lab = clahe.apply(lab[:, :, 0])
    if lab.mean() > 127:
        lab = 255 - lab
    return lab[..., np.newaxis] 
Example #24
Source File: tune_inception_softmax_final.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def bgr_to_lab(img):
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(17, 17))
    lab = clahe.apply(lab[:, :, 0])
    if lab.mean() > 127:
        lab = 255 - lab
    return lab[..., np.newaxis] 
Example #25
Source File: img_util.py    From CvStudio with MIT License 5 votes vote down vote up
def correct_lightness(img: np.ndarray):
        if len(np.shape(img)) == 3:
            img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
            l, a, b = cv2.split(img_lab)
            clahe = cv2.createCLAHE(clipLimit=40.0, tileGridSize=(4, 4))
            l = clahe.apply(l)
            img = cv2.merge((l, a, b))
            img = cv2.cvtColor(img, cv2.COLOR_LAB2BGR)
        return img 
Example #26
Source File: light_remover.py    From drowsiness-detection with MIT License 5 votes vote down vote up
def light_removing(frame) :
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)
    L = lab[:,:,0]
    med_L = cv2.medianBlur(L,99) #median filter
    invert_L = cv2.bitwise_not(med_L) #invert lightness
    composed = cv2.addWeighted(gray, 0.75, invert_L, 0.25, 0)
    return L, composed 
Example #27
Source File: image.py    From BirdCLEF-Baseline with MIT License 5 votes vote down vote up
def lightness(img, amount=0.25):

    try:
        # Only works with BGR images
        lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
        lab[:, :, 0] *= RANDOM.uniform(1 - amount, 1 + amount)
        lab[:, :, 0].clip(0, 255)
        img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
    except:
        pass

    return img 
Example #28
Source File: pipline_test.py    From Face-skin-hair-segmentaiton-and-skin-color-evaluation with Apache License 2.0 4 votes vote down vote up
def color_moments(image, mask, color_space):
    """
    function: Color Moment Features
    image: raw image
    mask: image mask
    color_space: 'rgb' or 'lab' or 'ycrcb' or 'hsv'
    """
    assert image.shape[:2] == mask.shape
    assert color_space.lower() in ['lab', 'rgb', 'ycrcb', 'hsv']

    if color_space.lower() == 'rgb':
        image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
    elif color_space.lower() == 'hsv':
        image = cv.cvtColor(image, cv.COLOR_BGR2HSV)
    elif color_space.lower() == 'lab':
        image = cv.cvtColor(image, cv.COLOR_BGR2LAB)
    elif color_space.lower() == 'ycrcb':
        image = cv.cvtColor(image, cv.COLOR_BGR2YCrCb)
    else:
        raise ValueError("Color space error...")

    # Split image channels info
    c1, c2, c3 = cv.split(image)
    color_feature = []

    # Only process mask != 0 channel region
    c1 = c1[np.where(mask != 0)]
    c2 = c2[np.where(mask != 0)]
    c3 = c3[np.where(mask != 0)]

    # Extract mean
    mean_1 = np.mean(c1)
    mean_2 = np.mean(c2)
    mean_3 = np.mean(c3)

    # Extract variance
    variance_1 = np.std(c1)
    variance_2 = np.std(c2)
    variance_3 = np.std(c3)

    # Extract skewness
    skewness_1 = np.mean(np.abs(c1 - mean_1) ** 3) ** (1. / 3)
    skewness_2 = np.mean(np.abs(c1 - mean_2) ** 3) ** (1. / 3)
    skewness_3 = np.mean(np.abs(c1 - mean_3) ** 3) ** (1. / 3)

    color_feature.extend(
        [mean_1, mean_2, mean_3, variance_1, variance_2,
            variance_3, skewness_1, skewness_2, skewness_3])

    return color_feature 
Example #29
Source File: cal_moments.py    From Face-skin-hair-segmentaiton-and-skin-color-evaluation with Apache License 2.0 4 votes vote down vote up
def color_moments(image, mask, color_space):
    """
    function: Color Moment Features
    image: raw image
    mask: image mask
    color_space: 'rgb' or 'lab' or 'ycrcb' or 'hsv'
    """
    assert image.shape[:2] == mask.shape
    assert color_space.lower() in ['lab', 'rgb', 'ycrcb', 'hsv']

    if color_space.lower() == 'rgb':
        image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
    elif color_space.lower() == 'hsv':
        image = cv.cvtColor(image, cv.COLOR_BGR2HSV)
    elif color_space.lower() == 'lab':
        image = cv.cvtColor(image, cv.COLOR_BGR2LAB)
    elif color_space.lower() == 'ycrcb':
        image = cv.cvtColor(image, cv.COLOR_BGR2YCrCb)
    else:
        raise ValueError("Color space error...")

    # Split image channels info
    c1, c2, c3 = cv.split(image)
    color_feature = []

    # Only process mask != 0 channel region
    c1 = c1[np.where(mask != 0)]
    c2 = c2[np.where(mask != 0)]
    c3 = c3[np.where(mask != 0)]

    # Extract mean
    mean_1 = np.mean(c1)
    mean_2 = np.mean(c2)
    mean_3 = np.mean(c3)

    # Extract variance
    variance_1 = np.std(c1)
    variance_2 = np.std(c2)
    variance_3 = np.std(c3)

    # Extract skewness
    skewness_1 = np.mean(np.abs(c1 - mean_1) ** 3) ** (1. / 3)
    skewness_2 = np.mean(np.abs(c1 - mean_2) ** 3) ** (1. / 3)
    skewness_3 = np.mean(np.abs(c1 - mean_3) ** 3) ** (1. / 3)

    color_feature.extend(
        [mean_1, mean_2, mean_3, variance_1, variance_2,
            variance_3, skewness_1, skewness_2, skewness_3])

    return color_feature 
Example #30
Source File: trackbar.py    From PyIntroduction with MIT License 4 votes vote down vote up
def cvEdgeDetection(image_file):
    image_bgr = cv2.imread(image_file)
    image_Lab = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2LAB)
    L = image_Lab[:, :, 0]


    win_name = "CannyEdge"
    cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
    cv2.imshow(win_name, image_bgr)

    display_modes = ['Original', 'Gray', 'CannyEdge']
    display_mode = display_modes[2]

    low_threshold_trackbar = CVTrackbar("LowThr", win_name, 0, 200)
    low_threshold_trackbar.setValue(20)

    upper_threshold_trackbar = CVTrackbar("UpperThr", win_name, 0, 300)
    upper_threshold_trackbar.setValue(40)

    while True:
        key = cv2.waitKey(30) & 0xFF

        # 表示切り替えの実装
        if key == ord('1'):
            display_mode = display_modes[0]
            cv2.imshow(win_name, image_bgr)

        elif key == ord('2'):
            display_mode = display_modes[1]
            cv2.imshow(win_name, L)

        elif key == ord('3'):
            display_mode = display_modes[2]

        elif key == ord('q'):
            break

        # Canny Edgeのインタラクティブな描画更新
        if display_mode == display_modes[2]:
            edge = cv2.Canny(L, low_threshold_trackbar.value(), upper_threshold_trackbar.value())
            cv2.imshow(win_name, edge)

    cv2.destroyAllWindows()


# Matplot + OpenCVのTrackbarでCanny Edge抽出するデモ