Python cv2.COLOR_RGB2LAB Examples

The following are 22 code examples of cv2.COLOR_RGB2LAB(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: luminosity_threshold_tissue_locator.py    From StainTools with MIT License 6 votes vote down vote up
def get_tissue_mask(I, luminosity_threshold=0.8):
        """
        Get a binary mask where true denotes pixels with a luminosity less than the specified threshold.
        Typically we use to identify tissue in the image and exclude the bright white background.

        :param I: RGB uint 8 image.
        :param luminosity_threshold: Luminosity threshold.
        :return: Binary mask.
        """
        assert is_uint8_image(I), "Image should be RGB uint8."
        I_LAB = cv.cvtColor(I, cv.COLOR_RGB2LAB)
        L = I_LAB[:, :, 0] / 255.0  # Convert to range [0,1].
        mask = L < luminosity_threshold

        # Check it's not empty
        if mask.sum() == 0:
            raise TissueMaskException("Empty tissue mask computed")

        return mask 
Example #2
Source File: reinhard_color_normalizer.py    From StainTools with MIT License 6 votes vote down vote up
def lab_split(I):
        """
        Convert from RGB uint8 to LAB and split into channels.

        :param I: Image RGB uint8.
        :return:
        """
        assert is_uint8_image(I), "Should be a RGB uint8 image"
        I = cv.cvtColor(I, cv.COLOR_RGB2LAB)
        I_float = I.astype(np.float32)
        I1, I2, I3 = cv.split(I_float)
        I1 /= 2.55  # should now be in range [0,100]
        I2 -= 128.0  # should now be in range [-127,127]
        I3 -= 128.0  # should now be in range [-127,127]
        return I1, I2, I3 
Example #3
Source File: luminosity_standardizer.py    From StainTools with MIT License 6 votes vote down vote up
def standardize(I, percentile=95):
        """
        Transform image I to standard brightness.
        Modifies the luminosity channel such that a fixed percentile is saturated.

        :param I: Image uint8 RGB.
        :param percentile: Percentile for luminosity saturation. At least (100 - percentile)% of pixels should be fully luminous (white).
        :return: Image uint8 RGB with standardized brightness.
        """
        assert is_uint8_image(I), "Image should be RGB uint8."
        I_LAB = cv.cvtColor(I, cv.COLOR_RGB2LAB)
        L_float = I_LAB[:, :, 0].astype(float)
        p = np.percentile(L_float, percentile)
        I_LAB[:, :, 0] = np.clip(255 * L_float / p, 0, 255).astype(np.uint8)
        I = cv.cvtColor(I_LAB, cv.COLOR_LAB2RGB)
        return I 
Example #4
Source File: omnirobot_simulator_server.py    From robotics-rl-srl with MIT License 6 votes vote down vote up
def renderEnvLuminosityNoise(self, origin_image, noise_var=0.1, in_RGB=False, out_RGB=False):
        """
        render the different environment luminosity
        """
        # variate luminosity and color
        origin_image_LAB = cv2.cvtColor(
            origin_image, cv2.COLOR_RGB2LAB if in_RGB else cv2.COLOR_BGR2LAB, cv2.CV_32F)
        origin_image_LAB[:, :, 0] = origin_image_LAB[:,
                                                     :, 0] * (np.random.randn() * noise_var + 1.0)
        origin_image_LAB[:, :, 1] = origin_image_LAB[:,
                                                     :, 1] * (np.random.randn() * noise_var + 1.0)
        origin_image_LAB[:, :, 2] = origin_image_LAB[:,
                                                     :, 2] * (np.random.randn() * noise_var + 1.0)
        out_image = cv2.cvtColor(
            origin_image_LAB, cv2.COLOR_LAB2RGB if out_RGB else cv2.COLOR_LAB2BGR, cv2.CV_8UC3)
        return out_image 
Example #5
Source File: train101_9ch_fold.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 5 votes vote down vote up
def clahe(img, clipLimit=2.0, tileGridSize=(5,5)):
    img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
    clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
    img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
    img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
    return img_output 
Example #6
Source File: functional.py    From albumentations with MIT License 5 votes vote down vote up
def clahe(img, clip_limit=2.0, tile_grid_size=(8, 8)):
    if img.dtype != np.uint8:
        raise TypeError("clahe supports only uint8 inputs")

    clahe_mat = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid_size)

    if len(img.shape) == 2 or img.shape[2] == 1:
        img = clahe_mat.apply(img)
    else:
        img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
        img[:, :, 0] = clahe_mat.apply(img[:, :, 0])
        img = cv2.cvtColor(img, cv2.COLOR_LAB2RGB)

    return img 
Example #7
Source File: functional.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def clahe(img, clipLimit=2.0, tileGridSize=(8,8)):
    img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
    clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
    img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
    img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
    return img_output 
Example #8
Source File: functional.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def add_channel(img):
    lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(21, 21))
    lab = clahe.apply(lab[:, :, 0])
    if lab.mean() > 127:
        lab = 255 - lab
    return np.dstack((img, lab)) 
Example #9
Source File: functional.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def clahe(img, clipLimit=2.0, tileGridSize=(8,8)):
    img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
    clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
    img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
    img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
    return img_output 
Example #10
Source File: functional.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def add_channel(img):
    lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(21, 21))
    lab = clahe.apply(lab[:, :, 0])
    if lab.mean() > 127:
        lab = 255 - lab
    return np.dstack((img, lab)) 
Example #11
Source File: functional.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def clahe(img, clipLimit=2.0, tileGridSize=(8,8)):
    img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
    clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
    img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
    img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
    return img_output 
Example #12
Source File: MR.py    From mr_saliency with GNU General Public License v2.0 5 votes vote down vote up
def __MR_readimg(self,img):
        if isinstance(img,str): # a image path
            img = cv2.imread(img, _cv2_LOAD_IMAGE_COLOR)
        img = cv2.cvtColor(img,cv2.COLOR_RGB2LAB).astype(float)/255
        h = 100
        w = int(float(h)/float(img.shape[0])*float(img.shape[1]))
        return cv2.resize(img,(w,h)) 
Example #13
Source File: train92_9ch_fold.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 5 votes vote down vote up
def clahe(img, clipLimit=2.0, tileGridSize=(5,5)):
    img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
    clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
    img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
    img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
    return img_output 
Example #14
Source File: train50_9ch_fold.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 5 votes vote down vote up
def clahe(img, clipLimit=2.0, tileGridSize=(5,5)):
    img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
    clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
    img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
    img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
    return img_output 
Example #15
Source File: train154_9ch_fold.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 5 votes vote down vote up
def clahe(img, clipLimit=2.0, tileGridSize=(5,5)):
    img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
    clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
    img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
    img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
    return img_output 
Example #16
Source File: image.py    From GuidedFilter with MIT License 5 votes vote down vote up
def rgb2Lab(img):
    img_rgb = rgb(img)
    Lab = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2LAB)
    return Lab


## Lab to RGB. 
Example #17
Source File: image.py    From ColorHistogram with MIT License 5 votes vote down vote up
def rgb2Lab(img):
    img_rgb = rgb(img)
    Lab = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2LAB)
    return Lab


## Lab to RGB. 
Example #18
Source File: FilterCvQtContainer.py    From bjtu_BinocularCameraRecord with MIT License 4 votes vote down vote up
def process(self, cv_before, name):

        k = self.k[0]
        kernel = np.ones((k, k), np.uint8)

        if name == 'Invert':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.bitwise_not(cv_before)
        elif name == 'Histogram Equalization':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
            cv_after = clahe.apply(cv_before)
        elif name == 'Threshold':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            ret, cv_after = cv2.threshold(
                cv_before, k, 255, cv2.THRESH_BINARY)
        elif name == 'Gaussian Threshold':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.adaptiveThreshold(cv_before, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                             cv2.THRESH_BINARY, k, 2)
        elif name == 'HSV':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2HSV)
            lower_color = np.array([k - 35, 0, 0])
            upper_color = np.array([k + 35, 255, 255])
            cv_after = cv2.inRange(cv_before, lower_color, upper_color)
        elif name == 'LAB':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2LAB)
            L, a, b = cv2.split(cv_before)
            ret, cv_after = cv2.threshold(L, k, 255, cv2.THRESH_BINARY)
        elif name == 'Erosion':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.erode(cv_before, kernel, iterations=1)
        elif name == 'Dilation':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.dilate(cv_before, kernel, iterations=1)
        elif name == 'Opening':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.morphologyEx(
                cv_before, cv2.MORPH_OPEN, kernel)
        elif name == 'Closing':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.morphologyEx(
                cv_before, cv2.MORPH_CLOSE, kernel)
        elif name == 'Top Hat':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.morphologyEx(
                cv_before, cv2.MORPH_TOPHAT, kernel)
        elif name == 'Black Hat':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.morphologyEx(
                cv_before, cv2.MORPH_BLACKHAT, kernel)
        elif name == 'Canny':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.Canny(cv_before, 100, k)
        elif name == 'Laplacian':
            cv_before = cv2.cvtColor(cv_before, cv2.COLOR_RGB2GRAY)
            cv_after = cv2.Laplacian(cv_before, cv2.CV_64F)
            cv_after = np.absolute(cv_after)
            cv_after = np.uint8(cv_after)

        return cv_after 
Example #19
Source File: arm_resnet.py    From craves.ai with GNU General Public License v3.0 4 votes vote down vote up
def __getitem__(self, index):

        if self.is_train:
            ids = self.train[index]
        else:
            ids = self.valid[index]

        images = self.dataset.get_image([self.cam_name], [ids])
        img_path = images[0]

        img = load_image(img_path)  #CxHxW
        target = self.load_angles(img_path)

        original_size = np.array((img.shape[2], img.shape[1]))

        segmasks = self.dataset.get_seg([self.cam_name], [ids])
        segmask = io.imread(segmasks[0])

        binary_arm = vdb.get_obj_mask(segmask, self.color)
        bb = vdb.seg2bb(binary_arm)
        x0, x1, y0, y1 = bb
        

        c = np.array([(x0+x1), (y0+y1)])/2
        #s = np.sqrt((y1-y0)*(x1-x0))/120.0
        s = np.sqrt((y1-y0)*(x1-x0))/60.0
        r = 0

        #s = max(x1-x0, y1-y0)/125
        if self.is_train:
            c = c + np.array([-30 + 60*random.random() ,-30 + 60*random.random()]) #random move
            s *= 0.6*(1+2*random.random())#random scale
        
            rf = 15
            r = -rf + 2*random.random()*rf#random rotation
            #r = torch.randn(1).mul_(rf).clamp(-2*rf, 2*rf)[0] if random.random() <= 0.6 else 0

            # Color
            im_rgb = im_to_numpy(img)
            im_lab = cv2.cvtColor(im_rgb, cv2.COLOR_RGB2LAB)
            im_lab[:,:,0] = np.clip(im_lab[:,:,0]*(random.uniform(0.3, 1.3)), 0, 255)
            img = im_to_torch(cv2.cvtColor(im_lab, cv2.COLOR_LAB2RGB))

            if random.random() <= 0.5:
                img = torch.from_numpy(fliplr(img.numpy())).float()

        inp = crop(img, c, s, [self.inp_res, self.inp_res], rot=r)
        inp = color_normalize(inp, self.mean, self.std)

        return inp, target 
Example #20
Source File: test_color.py    From imgaug with MIT License 4 votes vote down vote up
def test_every_colorspace(self):
        def _image_to_channel(image, cspace):
            if cspace == iaa.CSPACE_YCrCb:
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YCR_CB)
                return image_cvt[:, :, 0:0+1]
            elif cspace == iaa.CSPACE_HSV:
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
                return image_cvt[:, :, 2:2+1]
            elif cspace == iaa.CSPACE_HLS:
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
                return image_cvt[:, :, 1:1+1]
            elif cspace == iaa.CSPACE_Lab:
                if hasattr(cv2, "COLOR_RGB2Lab"):
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Lab)
                else:
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
                return image_cvt[:, :, 0:0+1]
            elif cspace == iaa.CSPACE_Luv:
                if hasattr(cv2, "COLOR_RGB2Luv"):
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2Luv)
                else:
                    image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
                return image_cvt[:, :, 0:0+1]
            else:
                assert cspace == iaa.CSPACE_YUV
                image_cvt = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
                return image_cvt[:, :, 0:0+1]

        # Max differences between input image and image after augmentation
        # when no child augmenter is used (for the given example image below).
        # For some colorspaces the conversion to input colorspace isn't
        # perfect.
        # Values were manually checked.
        max_diff_expected = {
            iaa.CSPACE_YCrCb: 1,
            iaa.CSPACE_HSV: 0,
            iaa.CSPACE_HLS: 0,
            iaa.CSPACE_Lab: 2,
            iaa.CSPACE_Luv: 4,
            iaa.CSPACE_YUV: 1
        }

        image = np.arange(6*6*3).astype(np.uint8).reshape((6, 6, 3))

        for cspace in self.valid_colorspaces:
            with self.subTest(colorspace=cspace):
                child = _BatchCapturingDummyAugmenter()
                aug = iaa.WithBrightnessChannels(
                    children=child,
                    to_colorspace=cspace)

                image_aug = aug(image=image)

                expected = _image_to_channel(image, cspace)
                diff = np.abs(
                    image.astype(np.int32) - image_aug.astype(np.int32))
                assert np.all(diff <= max_diff_expected[cspace])
                assert np.array_equal(child.last_batch.images[0], expected) 
Example #21
Source File: lane_tracker.py    From lane_tracker with GNU General Public License v3.0 4 votes vote down vote up
def filter_lane_points(self,
                           img,
                           filter_type='bilateral',
                           ksize_r=25,
                           C_r=8,
                           ksize_b=35,
                           C_b=5,
                           mask_noise=False,
                           ksize_noise=65,
                           C_noise=10,
                           noise_thresh=135):
        '''
        Filter an image to isolate lane lines and return a binary version.

        All image color space conversion, thresholding, filtering and morphing
        happens inside this method. It takes an RGB color image as input and
        returns a binary filtered version.
        '''

        # Define structuring elements for cv2 functions
        strel_lab_b = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(55,55))
        strel_rgb_r = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(29,29))
        strel_open = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(5,5))
        # Extract RGB R-channel and LAB B-channel
        rgb_r_channel = img[:,:,0]
        lab_b_channel = (cv2.cvtColor(img, cv2.COLOR_RGB2LAB))[:,:,2]
        # Apply tophat morphology
        rgb_r_tophat = cv2.morphologyEx(rgb_r_channel, cv2.MORPH_TOPHAT, strel_rgb_r, iterations=1)
        lab_b_tophat = cv2.morphologyEx(lab_b_channel, cv2.MORPH_TOPHAT, strel_lab_b, iterations=1)
        if filter_type == 'bilateral':
            # Apply bilateral adaptive color thresholding
            rgb_r_thresh = bilateral_adaptive_threshold(rgb_r_tophat, ksize=ksize_r, C=C_r)
            lab_b_thresh = bilateral_adaptive_threshold(lab_b_tophat, ksize=ksize_b, C=C_b)
        elif filter_type == 'neighborhood':
            rgb_r_thresh = cv2.adaptiveThreshold(rgb_r_channel, 255, adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=ksize_r, C=-C_r)
            lab_b_thresh = cv2.adaptiveThreshold(lab_b_channel, 255, adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=ksize_b, C=-C_b)
        else:
            raise ValueError("Unexpected filter mode. Expected modes are 'bilateral' or 'neighborhood'.")
        if mask_noise: # Merge both color channels and the noise mask
            # Create a mask to filter out noise such as trees and other greenery based on the LAB B-channel
            noise_mask_part1 = cv2.inRange(lab_b_channel, noise_thresh, 255) # This catches the noise, but unfortunately also the yellow line, therefore...
            noise_mask_part2 = bilateral_adaptive_threshold(lab_b_channel, ksize=ksize_noise, C=C_noise) # ...this brings the yellow line back...
            noise_bool = np.logical_or(np.logical_not(noise_mask_part1), noise_mask_part2) # ...once we combine the two.
            noise_mask = np.zeros_like(rgb_r_channel, dtype=np.uint8)
            noise_mask[noise_bool] = 255

            merged_bool = np.logical_and(np.logical_or(rgb_r_thresh, lab_b_thresh), noise_mask)
            merged = np.zeros_like(rgb_r_channel, dtype=np.uint8)
            merged[merged_bool] = 255
        else: # Only merge the two color channels
            merged_bool = np.logical_or(rgb_r_thresh, lab_b_thresh)
            merged = np.zeros_like(rgb_r_channel, dtype=np.uint8)
            merged[merged_bool] = 255

        # Apply open morphology
        opened = cv2.morphologyEx(merged, cv2.MORPH_OPEN, strel_open, iterations=1)

        return opened 
Example #22
Source File: saliency_map.py    From deepgaze with MIT License 4 votes vote down vote up
def returnMask(self, image, tot_bins=8, format='BGR2LAB'):
        """ Return the saliency mask of the input image.
        
        @param: image the image to process
        @param: tot_bins the number of bins used in the histogram
        @param: format conversion, it can be one of the following:
            BGR2LAB, BGR2RGB, RGB2LAB, RGB, BGR, LAB
        @return: the saliency mask
        """
        if format == 'BGR2LAB':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
        elif format == 'BGR2RGB':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        elif format == 'RGB2LAB':
            image = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
        elif format == 'RGB' or format == 'BGR' or format == 'LAB':
            pass
        else:
            raise ValueError('[DEEPGAZE][SALIENCY-MAP][ERROR] the input format of the image is not supported.')
        if DEBUG: start = timer()
        self._calculate_histogram(image, tot_bins=tot_bins)
        if DEBUG: end = timer()
        if DEBUG: print("--- %s calculate_histogram seconds ---" % (end - start))
        if DEBUG: start = timer()
        number_of_colors = self._precompute_parameters()
        if DEBUG: end = timer()
        if DEBUG: print("--- number of colors: " + str(number_of_colors) + " ---")
        if DEBUG: print("--- %s precompute_paramters seconds ---" % (end - start))
        if DEBUG: start = timer()
        self._bilateral_filtering()
        if DEBUG: end = timer()
        if DEBUG: print("--- %s bilateral_filtering seconds ---" % (end - start))
        if DEBUG: start = timer()
        self._calculate_probability()
        if DEBUG: end = timer()
        if DEBUG: print("--- %s calculate_probability seconds ---" % (end - start))
        if DEBUG: start = timer()
        self._compute_saliency_map()
        if DEBUG: end = timer()
        if DEBUG: print("--- %s compute_saliency_map seconds ---" % (end - start))
        if DEBUG: start = timer()
        it = np.nditer(self.salient_image, flags=['multi_index'], op_flags=['writeonly'])
        while not it.finished:
            # This part takes 0.1 seconds
            y = it.multi_index[0]
            x = it.multi_index[1]
            #L_id = self.L_id_matrix[y, x]
            #A_id = self.A_id_matrix[y, x]
            #B_id = self.B_id_matrix[y, x]
            index = self.image_quantized[y, x]
            # These operations take 0.1 seconds
            index = self.map_3d_1d[index[0], index[1], index[2]]
            it[0] = self.saliency[index]
            it.iternext()

        if DEBUG: end = timer()
        # ret, self.salient_image = cv2.threshold(self.salient_image, 150, 255, cv2.THRESH_BINARY)
        if DEBUG: print("--- %s returnMask 'iteration part' seconds ---" % (end - start))
        return self.salient_image