Python cv2.createCLAHE() Examples

The following are 30 code examples of cv2.createCLAHE(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: tkinter_functions.py    From simba with GNU Lesser General Public License v3.0 8 votes vote down vote up
def clahe(filename):
    os.chdir(os.path.dirname(filename))
    print('Applying CLAHE, this might take awhile...')

    currentVideo = os.path.basename(filename)
    fileName, fileEnding = currentVideo.split('.',2)
    saveName = str('CLAHE_') + str(fileName) + str('.avi')
    cap = cv2.VideoCapture(currentVideo)
    imageWidth = int(cap.get(3))
    imageHeight = int(cap.get(4))
    fps = cap.get(cv2.CAP_PROP_FPS)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(saveName, fourcc, fps, (imageWidth, imageHeight), 0)
    try:
        while True:
            ret, image = cap.read()
            if ret == True:
                im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                claheFilter = cv2.createCLAHE(clipLimit=2, tileGridSize=(16, 16))
                claheCorrecttedFrame = claheFilter.apply(im)
                out.write(claheCorrecttedFrame)
                if cv2.waitKey(10) & 0xFF == ord('q'):
                    break
            else:
                print(str('Completed video ') + str(saveName))
                break
    except:
        print('clahe not applied')
    cap.release()
    out.release()
    cv2.destroyAllWindows()
    return saveName 
Example #2
Source File: training_data.py    From faceswap with GNU General Public License v3.0 6 votes vote down vote up
def _random_clahe(self, batch):
        """ Randomly perform Contrast Limited Adaptive Histogram Equalization on
        a batch of images """
        base_contrast = self._constants["clahe_base_contrast"]

        batch_random = np.random.rand(self._batchsize)
        indices = np.where(batch_random > self._config.get("color_clahe_chance", 50) / 100)[0]

        grid_bases = np.rint(np.random.uniform(0,
                                               self._config.get("color_clahe_max_size", 4),
                                               size=indices.shape[0])).astype("uint8")
        contrast_adjustment = (grid_bases * (base_contrast // 2))
        grid_sizes = contrast_adjustment + base_contrast
        logger.trace("Adjusting Contrast. Grid Sizes: %s", grid_sizes)

        clahes = [cv2.createCLAHE(clipLimit=2.0,  # pylint: disable=no-member
                                  tileGridSize=(grid_size, grid_size))
                  for grid_size in grid_sizes]

        for idx, clahe in zip(indices, clahes):
            batch[idx, :, :, 0] = clahe.apply(batch[idx, :, :, 0])
        return batch 
Example #3
Source File: phiseg_makegif_samples.py    From PHiSeg-code with Apache License 2.0 6 votes vote down vote up
def histogram_equalization(img):

    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)

    # -----Splitting the LAB image to different channels-------------------------
    l, a, b = cv2.split(lab)

    # -----Applying CLAHE to L-channel-------------------------------------------
    clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
    cl = clahe.apply(l)

    # -----Merge the CLAHE enhanced L-channel with the a and b channel-----------
    limg = cv2.merge((cl, a, b))

    # -----Converting image from LAB Color model to RGB model--------------------
    final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)

    return final 
Example #4
Source File: image.py    From ImageAnalysis with MIT License 6 votes vote down vote up
def load_rgb(self, equalize=False):
        # print("Loading:", self.image_file)
        try:
            img_rgb = cv2.imread(self.image_file, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
            if equalize:
                # equalize val (essentially gray scale level)
                clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
                hsv = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2HSV)
                hue, sat, val = cv2.split(hsv)
                aeq = clahe.apply(val)
                # recombine
                hsv = cv2.merge((hue,sat,aeq))
                # convert back to rgb
                img_rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
            h, w = img_rgb.shape[:2]
            self.node.setInt('height', h)
            self.node.setInt('width', w)
            return img_rgb

        except:
            print(self.image_file + ":\n" + "  rgb load error: " \
                + str(sys.exc_info()[1]))
            return None 
Example #5
Source File: helpers.py    From kaggle_ndsb2 with Apache License 2.0 6 votes vote down vote up
def prepare_cropped_sax_image(sax_image, clahe=True, intermediate_crop=0, rotate=0):
    if rotate != 0:
        rot_mat = cv2.getRotationMatrix2D((sax_image.shape[0] / 2, sax_image.shape[0] / 2), rotate, 1)
        sax_image = cv2.warpAffine(sax_image, rot_mat, (sax_image.shape[0], sax_image.shape[1]))

    if intermediate_crop == 0:
        res = sax_image[settings.CROP_INDENT_Y:settings.CROP_INDENT_Y + settings.TARGET_CROP, settings.CROP_INDENT_X:settings.CROP_INDENT_X + settings.TARGET_CROP]
    else:
        crop_indent_y = settings.CROP_INDENT_Y - ((intermediate_crop - settings.TARGET_CROP) / 2)
        crop_indent_x = settings.CROP_INDENT_X - ((intermediate_crop - settings.TARGET_CROP) / 2)
        res = sax_image[crop_indent_y:crop_indent_y + intermediate_crop, crop_indent_x:crop_indent_x + intermediate_crop]
        res = cv2.resize(res, (settings.TARGET_CROP, settings.TARGET_CROP))

    if clahe:
        clahe = cv2.createCLAHE(tileGridSize=(1, 1))
        res = clahe.apply(res)
    return res 
Example #6
Source File: rpotter.py    From rpotter with MIT License 6 votes vote down vote up
def FindWand():
    global rval,old_frame,old_gray,p0,mask,color,ig,img,frame
    try:
        rval, old_frame = cam.read()
	cv2.flip(old_frame,1,old_frame)
        old_gray = cv2.cvtColor(old_frame,cv2.COLOR_BGR2GRAY)
        equalizeHist(old_gray)
	old_gray = GaussianBlur(old_gray,(9,9),1.5)
        dilate_kernel = np.ones(dilation_params, np.uint8)
        old_gray = cv2.dilate(old_gray, dilate_kernel, iterations=1)
        clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
        old_gray = clahe.apply(old_gray)
        #TODO: trained image recognition
        p0 = cv2.HoughCircles(old_gray,cv2.HOUGH_GRADIENT,3,50,param1=240,param2=8,minRadius=4,maxRadius=15)
	if p0 is not None:
            p0.shape = (p0.shape[1], 1, p0.shape[2])
            p0 = p0[:,:,0:2] 
            mask = np.zeros_like(old_frame)
            ig = [[0] for x in range(20)]
        print "finding..."
        threading.Timer(3, FindWand).start()
    except:
        e = sys.exc_info()[1]
        print "Error: %s" % e 
        exit 
Example #7
Source File: file_function.py    From gradcam.pytorch with MIT License 6 votes vote down vote up
def resize_and_contrast(in_dir, out_dir, target_size):
    check_and_mkdir(out_dir)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))

    for subdir, dirs, files in os.walk(in_dir):
        for f in files:
            file_path = subdir + os.sep + f
            if (is_image(f)):
                img = cv2.imread(file_path, 0)
                resized_img = cv2.resize(img, (target_size, target_size), interpolation = cv2.INTER_CUBIC)
                class_dir = out_dir + os.sep + file_path.split("/")[-2]
                check_and_mkdir(class_dir)

                file_name = class_dir + os.sep + file_path.split("/")[-1]
                print(file_name)

                norm_image = cv2.normalize(resized_img, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) * 256
                # norm_image = clahe.apply(resized_img)
                cv2.imwrite(file_name, norm_image)

# count the direct one-step sub directories (which will represent the class name) 
Example #8
Source File: clahe_histogram_equalization.py    From Mastering-OpenCV-4-with-Python with MIT License 6 votes vote down vote up
def equalize_clahe_color(img):
    """Equalize the image splitting the image applying CLAHE to each channel
    and merging the results
    """

    cla = cv2.createCLAHE(clipLimit=4.0)
    channels = cv2.split(img)
    eq_channels = []
    for ch in channels:
        eq_channels.append(cla.apply(ch))

    eq_image = cv2.merge(eq_channels)
    return eq_image


# Create the dimensions of the figure and set title: 
Example #9
Source File: image.py    From surface-crack-detection with MIT License 6 votes vote down vote up
def equalize_light(image, limit=3, grid=(7,7), gray=False):
    if (len(image.shape) == 2):
        image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
        gray = True
    
    clahe = cv2.createCLAHE(clipLimit=limit, tileGridSize=grid)
    lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
    l, a, b = cv2.split(lab)

    cl = clahe.apply(l)
    limg = cv2.merge((cl,a,b))

    image = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
    if gray: 
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    return np.uint8(image) 
Example #10
Source File: 03_hist_equalize.py    From Practical-Computer-Vision with MIT License 6 votes vote down vote up
def main():
    # read an image 
    img = cv2.imread('../figures/_DSC2126.jpg')
    img = cv2.resize(img, (600,400))
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    
    
    # hist,bins = np.histogram(img[100:400, 100:400].flatten(),256,[0,256])
    # cdf = hist.cumsum()
    # cdf_normalized = cdf * hist.max()/ cdf.max()
    
    # # plot hist normalized 
    # plot_hist_cdf(cdf_normalized, img[100:400, 100:400])
        
    equ = cv2.equalizeHist(gray)
    
    # create a CLAHE object (Arguments are optional).
    clahe = cv2.createCLAHE()
    cl1 = clahe.apply(gray)
    
    plot_gray(gray, equ, cl1) 
Example #11
Source File: utils.py    From PHiSeg-code with Apache License 2.0 6 votes vote down vote up
def histogram_equalization(img):

    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)

    # -----Splitting the LAB image to different channels-------------------------
    l, a, b = cv2.split(lab)

    # -----Applying CLAHE to L-channel-------------------------------------------
    clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
    cl = clahe.apply(l)

    # -----Merge the CLAHE enhanced L-channel with the a and b channel-----------
    limg = cv2.merge((cl, a, b))

    # -----Converting image from LAB Color model to RGB model--------------------
    final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)

    return final 
Example #12
Source File: utils_3d.py    From Kaggle-DSB with MIT License 5 votes vote down vote up
def clahe_equalized(imgs):
    assert (len(imgs.shape)==4)  #4D arrays
    assert (imgs.shape[1]==1)  #check the channel is 1
    #create a CLAHE object (Arguments are optional).
    clahe = cv2.createCLAHE(clipLimit=2.3, tileGridSize=(8,8))
    imgs_equalized = np.empty(imgs.shape)
    for i in range(imgs.shape[0]):
        imgs_equalized[i,0] = clahe.apply(np.array(imgs[i,0], dtype = np.uint8))
    return imgs_equalized 
Example #13
Source File: render4geotiff.py    From ImageAnalysis with MIT License 5 votes vote down vote up
def aeq_value(self, bgr):
        hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
        hue,sat,val = cv2.split(hsv)
        # adaptive histogram equalization on 'value' channel
        clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
        aeq = clahe.apply(val)
        # recombine
        hsv = cv2.merge((hue,sat,aeq))
        # convert back to rgb
        result = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        return result 
Example #14
Source File: functional.py    From albumentations with MIT License 5 votes vote down vote up
def clahe(img, clip_limit=2.0, tile_grid_size=(8, 8)):
    if img.dtype != np.uint8:
        raise TypeError("clahe supports only uint8 inputs")

    clahe_mat = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid_size)

    if len(img.shape) == 2 or img.shape[2] == 1:
        img = clahe_mat.apply(img)
    else:
        img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
        img[:, :, 0] = clahe_mat.apply(img[:, :, 0])
        img = cv2.cvtColor(img, cv2.COLOR_LAB2RGB)

    return img 
Example #15
Source File: ColorAugmenters.py    From impy with Apache License 2.0 5 votes vote down vote up
def histogramEqualization(self, frame = None, equalizationType = None):
		"""
		Args:
			frame: A tensor that contains an image.
			equalizationType: An int that defines what type of histogram
						equalization algorithm to use.
		Returns:
			A frame whose channels have been equalized.
		"""
		# Assertions
		if (self.assertion.assertNumpyType(frame) == False):
			raise ValueError("Frame has to be a numpy array.")
		if (len(frame.shape) != 3):
			raise ValueError("Frame needs to have at least 3 channels.")
		if (equalizationType == None):
			equalizationType = 0
		if (type(equalizationType) != int):
			raise TypeError("ERROR: equalizationType has to be of type int.")
		# Local variables
		equ = np.zeros(frame.shape, np.uint8)
		# Equalize hist
		if (equalizationType == 0):
			for channel in range(3):
				equ[:, :, channel] = cv2.equalizeHist(frame[:, :, channel])
		elif (equalizationType == 1):
			clahe = cv2.createCLAHE(clipLimit=2.0)
			for channel in range(3):
				equ[:, :, channel] = clahe.apply(frame[:, :, channel])
		else:
			raise ValueError("ERROR: equalizationType not understood.")
		if (not (equ.dtype == np.uint8)):
			print("WARNING: Image is not dtype uint8. Forcing type.")
			equ = equ.astype(np.uint8)
		return equ 
Example #16
Source File: ca1pc.py    From sima with GNU General Public License v2.0 5 votes vote down vote up
def _clahe(image, x_tile_size=10, y_tile_size=10, clip_limit=20):
    """Perform contrast limited adaptive histogram equalization (CLAHE)."""
    if not cv2_available:
        raise ImportError('OpenCV >= 2.4.8 required')
    transform = cv2.createCLAHE(clipLimit=clip_limit,
                                tileGridSize=(
                                    int(image.shape[1] // float(x_tile_size)),
                                    int(image.shape[0] // float(y_tile_size))))
    return transform.apply(sima.misc.to8bit(image)) 
Example #17
Source File: ac3d.py    From ImageAnalysis with MIT License 5 votes vote down vote up
def make_textures_opencv(src_dir, project_dir, image_list, resolution=256):
    dst_dir = os.path.join(project_dir, 'models')
    if not os.path.exists(dst_dir):
        print("Notice: creating texture directory =", dst_dir)
        os.makedirs(dst_dir)
    for image in image_list:
        src = image.image_file
        dst = os.path.join(dst_dir, image.name + '.JPG')
        if not os.path.exists(dst):
            print(src)
            src = cv2.imread(src, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
            height, width = src.shape[:2]
            # downscale image first
            method = cv2.INTER_AREA  # cv2.INTER_AREA
            scale = cv2.resize(src, (0,0),
                               fx=resolution/float(width),
                               fy=resolution/float(height),
                               interpolation=method)
            # convert to hsv color space
            hsv = cv2.cvtColor(scale, cv2.COLOR_BGR2HSV)
            hue,sat,val = cv2.split(hsv)
            # adaptive histogram equalization on 'value' channel
            clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
            aeq = clahe.apply(val)
            # recombine
            hsv = cv2.merge((hue,sat,aeq))
            # convert back to rgb
            result = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
            cv2.imwrite(dst, result)
            print("Texture %dx%d %s" % (resolution, resolution, dst)) 
Example #18
Source File: video.py    From open_model_zoo with Apache License 2.0 5 votes vote down vote up
def __init__(self, clip_limit=.5, tile_size=16):
        self.clahe = cv.createCLAHE(clipLimit=clip_limit,
                                    tileGridSize=(tile_size, tile_size)) 
Example #19
Source File: clahe_histogram_equalization.py    From Mastering-OpenCV-4-with-Python with MIT License 5 votes vote down vote up
def equalize_clahe_color_yuv(img):
    """Equalize the image splitting it after conversion to YUV and applying CLAHE
    to the Y channel and merging the channels and convert back to BGR
    """

    cla = cv2.createCLAHE(clipLimit=4.0)
    Y, U, V = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2YUV))
    eq_Y = cla.apply(Y)
    eq_image = cv2.cvtColor(cv2.merge([eq_Y, U, V]), cv2.COLOR_YUV2BGR)
    return eq_image 
Example #20
Source File: clahe_histogram_equalization.py    From Mastering-OpenCV-4-with-Python with MIT License 5 votes vote down vote up
def equalize_clahe_color_lab(img):
    """Equalize the image splitting it after conversion to LAB and applying CLAHE
    to the L channel and merging the channels and convert back to BGR
    """

    cla = cv2.createCLAHE(clipLimit=4.0)
    L, a, b = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2Lab))
    eq_L = cla.apply(L)
    eq_image = cv2.cvtColor(cv2.merge([eq_L, a, b]), cv2.COLOR_Lab2BGR)
    return eq_image 
Example #21
Source File: clahe_histogram_equalization.py    From Mastering-OpenCV-4-with-Python with MIT License 5 votes vote down vote up
def equalize_clahe_color_hsv(img):
    """Equalize the image splitting it after conversion to HSV and applying CLAHE
    to the V channel and merging the channels and convert back to BGR
    """

    cla = cv2.createCLAHE(clipLimit=4.0)
    H, S, V = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
    eq_V = cla.apply(V)
    eq_image = cv2.cvtColor(cv2.merge([H, S, eq_V]), cv2.COLOR_HSV2BGR)
    return eq_image 
Example #22
Source File: unet_utils.py    From Kaggle-DSB with MIT License 5 votes vote down vote up
def clahe_equalized(imgs):
    assert (len(imgs.shape)==4)  #4D arrays
    assert (imgs.shape[1]==1)  #check the channel is 1
    #create a CLAHE object (Arguments are optional).
    clahe = cv2.createCLAHE(clipLimit=2.3, tileGridSize=(8,8))
    imgs_equalized = np.empty(imgs.shape)
    for i in range(imgs.shape[0]):
        imgs_equalized[i,0] = clahe.apply(np.array(imgs[i,0], dtype = np.uint8))
    return imgs_equalized 
Example #23
Source File: image.py    From OverwatchDataAnalysis with GNU General Public License v3.0 5 votes vote down vote up
def increase_contrast(img):
    """
    Increase contrast of an RGB image
    @Author: Appcell
    @param img: image to be processed
    @return: a numpy.ndarray object of this image
    """
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    l, a, b = cv2.split(lab)
    clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(4, 4))
    cl = clahe.apply(l)
    limg = cv2.merge((cl,a,b))
    final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
    return final 
Example #24
Source File: FeatureExtractor.py    From adviser with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, domain: Domain = ""):
        Service.__init__(self, domain=domain)
        self.module_dir = os.path.dirname(os.path.abspath(__file__))
        # # CLAHE (Contrast Limited Adaptive Histogram Equalization)
        self.CLAHE = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
        # for detecting faces (returns coordinates of rectangle(s) of face area(s))
        self.DETECTOR = dlib.get_frontal_face_detector()
        # facial landmark predictor
        predictor_file = os.path.abspath(os.path.join(self.module_dir, '..', '..', '..', 'resources', 'models', 'video', 'shape_predictor_68_face_landmarks.dat'))
        self.PREDICTOR = dlib.shape_predictor(predictor_file) 
Example #25
Source File: card.py    From idmatch with MIT License 5 votes vote down vote up
def remove_borders(image):
    image = cv2.imread(image)
    orig = image.copy()
    ratio = image.shape[0] / 500.0
    image = resize(image, height=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray,(7,7),0)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    contrasted = clahe.apply(blur)
    im = Image.fromarray(contrasted)
    
    edged = cv2.Canny(blur, 20, 170)
    _, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    largest_area = 0
    for c in cnts:
        r = cv2.minAreaRect(c)
        area = r[1][0]*r[1][1]
        if area > largest_area:
            largest_area = area
            rect = r

    screenCnt = np.int0(cv2.boxPoints(rect))
    im = Image.fromarray(edged)

    cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
    im = Image.fromarray(image)

    if screenCnt is not None and len(screenCnt) > 0:
        return four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
    return orig 
Example #26
Source File: image_processing.py    From kaggle-dsb2018 with Apache License 2.0 5 votes vote down vote up
def rgb_clahe(in_rgb_img):
    bgr = in_rgb_img[:,:,[2,1,0]] # flip r and b
    lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    lab[:,:,0] = clahe.apply(lab[:,:,0])
    bgr = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
    return bgr[:,:,[2,1,0]] 
Example #27
Source File: yarn_common_functions.py    From TextileDefectDetection with GNU Affero General Public License v3.0 5 votes vote down vote up
def showPoints(self, image=None):
        fig, ax = plt.subplots(figsize=(10, 10))
        axes = plt.gca()
        axes.invert_yaxis()

        if image is not None:
            plt.axis('on')
            gray_im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
            cl1 = clahe.apply(gray_im)
            plt.imshow(cl1, cmap='gray')
            axes.set_xlim(left=0, right=1900)
            axes.set_ylim(top=0, bottom=1900)

        for myFloatPoint in self:
            if myFloatPoint.label == "warp_float":
                ax.plot(myFloatPoint.x, myFloatPoint.y, '.b', markersize=10)
            elif myFloatPoint.label == "weft_float":
                ax.plot(myFloatPoint.x, myFloatPoint.y, '.g', markersize=10)
            else:
                ax.plot(myFloatPoint.x, myFloatPoint.y, '.r', markersize=10)
            x1 = myFloatPoint.x
            y1 = myFloatPoint.y

            if myFloatPoint.lower_n:
                x2 = myFloatPoint.lower_n.x
                y2 = myFloatPoint.lower_n.y
                plt.plot([x1, x2], [y1, y2], 'r')
            if myFloatPoint.right_n:
                x2 = myFloatPoint.right_n.x
                y2 = myFloatPoint.right_n.y
                plt.plot([x1, x2], [y1, y2], 'g') 
Example #28
Source File: _base.py    From faceswap with GNU General Public License v3.0 5 votes vote down vote up
def _normalize_clahe(face):
        """ Perform Contrast Limited Adaptive Histogram Equalization """
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4, 4))
        for chan in range(3):
            face[:, :, chan] = clahe.apply(face[:, :, chan])
        return face 
Example #29
Source File: dsbowl_utils.py    From Kaggle-DSB with MIT License 5 votes vote down vote up
def clahe_equalized(imgs):
    assert (len(imgs.shape)==4)  #4D arrays
    assert (imgs.shape[1]==1)  #check the channel is 1
    #create a CLAHE object (Arguments are optional).
    clahe = cv2.createCLAHE(clipLimit=2.3, tileGridSize=(8,8))
    imgs_equalized = np.empty(imgs.shape)
    for i in range(imgs.shape[0]):
        imgs_equalized[i,0] = clahe.apply(np.array(imgs[i,0], dtype = np.uint8))
    return imgs_equalized 
Example #30
Source File: augmentations.py    From segmentation-networks-benchmark with MIT License 5 votes vote down vote up
def __call__(self, im):
        img_yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV)
        clahe = cv2.createCLAHE(clipLimit=self.clipLimit, tileGridSize=self.tileGridSize)
        img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
        img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
        return img_output