Python cv2.COLOR_BGR2GRAY() Examples

The following are code examples for showing how to use cv2.COLOR_BGR2GRAY(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: surface-crack-detection   Author: Khoronus   File: image.py    MIT License 11 votes vote down vote up
def equalize_light(image, limit=3, grid=(7,7), gray=False):
    if (len(image.shape) == 2):
        image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
        gray = True
    
    clahe = cv2.createCLAHE(clipLimit=limit, tileGridSize=grid)
    lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
    l, a, b = cv2.split(lab)

    cl = clahe.apply(l)
    limg = cv2.merge((cl,a,b))

    image = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
    if gray: 
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    return np.uint8(image) 
Example 2
Project: ConvLSTM   Author: XingguangZhang   File: extract_flow.py    MIT License 9 votes vote down vote up
def cal_for_frames(video_path):
    cap = cv2.VideoCapture(video_path)
    i = 0
    flow = []
    while(cap.isOpened()):
        ret, curr = cap.read()
        if(not ret): break
        if i == 0:
            prev = cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY)
        else:
            curr = cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY)
            tmp_flow = compute_TVL1(prev, curr)
            flow.append(tmp_flow)
            prev = curr
        i += 1
    return flow 
Example 3
Project: pynvr   Author: JFF-Bohdan   File: motion_detection.py    BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (21, 21), 0)

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        frameDiff = cv.absdiff(gray, self.prevFrame)

        # kernel = np.ones((5, 5), np.uint8)

        opening = cv.morphologyEx(frameDiff, cv.MORPH_OPEN, None)  # noqa
        closing = cv.morphologyEx(frameDiff, cv.MORPH_CLOSE, None)  # noqa

        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        height = np.size(th1, 0)
        width = np.size(th1, 1)

        nb = cv.countNonZero(th1)

        avg = (nb * 100) / (height * width)  # Calculate the average of black pixel in the image

        self.prevFrame = gray

        # cv.DrawContours(currentframe, self.currentcontours, (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED)
        # cv.imshow("frame", current_frame)

        ret = avg > self.threshold   # If over the ceiling trigger the alarm

        if ret:
            self.updateMotionDetectionDts()

        return ret 
Example 4
Project: surface-crack-detection   Author: Khoronus   File: cracktile.py    MIT License 7 votes vote down vote up
def image_preprocessor(image):
    image = im.equalize_light(image)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    black_level = im.back_in_black(image)

    image = im.gauss_filter(image, (3,3))
    image = im.light(image, bright=-30, contrast=-30)
    
    if not black_level:
        image = cv2.bitwise_not(image)

    kernel = np.ones((5,5), np.uint8)
    mask = cv2.erode(image, kernel, iterations=1)
    mask = cv2.dilate(mask, kernel, iterations=1)
    
    image = np.subtract(image, mask)

    return im.threshold(image, clip=5) 
Example 5
Project: helloworld   Author: pip-uninstaller-python   File: 7.3.py    GNU General Public License v2.0 6 votes vote down vote up
def genCharVideo(self, filepath):
        self.charVideo = []
        # 方法读取视频文件,生成的对象我们赋值给 cap
        cap = cv2.VideoCapture(filepath)
        # cap.get(3) 和 cap.get(4) 分别返回视频的宽高信息,
        # cap.get(5) 则返回视频的帧率FPS,cap.get(7) 返回视频的总帧数
        # 存放播放时间间隔,用来让之后播放字符动画的帧率与原视频相同
        self.timeInterval = round(1 / cap.get(5), 3)
        nf = int(cap.get(7))
        print('正在转换每一帧成字符画,请稍后...')
        # 使用这个生成器进行迭代会自然在终端中输出进度条
        for i in pyprind.prog_bar(range(nf)):
            # cap.read() 读取视频的下一帧,其返回一个两元素的元组,第一个元素为 bool 值,
            # 指示帧是否被正确读取,第二个元素为2维度数组numpy对象 ,其存放的便是帧的数据
            rawFrame = cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY)
            frame = self.convert(rawFrame, os.get_terminal_size(), fill=True)
            self.charVideo.append(frame)
        cap.release()

    # charVideo 中的字符动画数据导出来,方便下次读取播放 
Example 6
Project: garden.facelock   Author: kivy-garden   File: __init__.py    MIT License 6 votes vote down vote up
def face_recognize(self):
        cap = cv2.VideoCapture(self.index)
        
        face_cascade = cv2.CascadeClassifier(self.cascade)
        '''
        face_cascade: cascade is entered here for further use.
        '''

        while(True):
            ret, frame = cap.read()
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            '''
            Converts coloured video to black and white(Grayscale).
            '''
            if np.any(face_cascade.detectMultiScale(gray, 1.3, 5)):
                
                print("Cascade found")
                
                self.dispatch('on_match')
                
                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)
                break
            
            else:
                print("Not recognized")

            cv2.imshow('frame', frame)
            #Comment the above statement not to show the camera screen
            if cv2.waitKey(1) & 0xFF == ord('q'):
                print("Forcefully Closed")

                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)
                break
        cap.release() 
Example 7
Project: Latex-Math   Author: 34-Matt   File: labeldata.py    MIT License 6 votes vote down vote up
def loadDataset(file_name1,file_name2,rate = 0.2): #file_name1 location of all characters, file_name2 dict
	dict = loadDict(file_name2)
	ds1 = os.listdir(file_name1)
	file_count = sum([len(files) for r, d, files in os.walk(file_name1)])
	counter = 0
	X = np.empty((0,45,45),dtype=np.uint8)
	Y = np.empty((0,1),dtype=np.uint8) 
	for d in ds1:
		folder = os.path.join(file_name1,d)
		ds2 = os.listdir(folder)
		d = d.lower()
		for d2 in ds2:
			filei = os.path.join(folder,d2)
			image = cv2.imread(filei)
			image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) # Convert to gray
			npi = np.asarray(image).reshape(45,45) #might need to change
			X = np.append(X, [npi],axis = 0) #might need to change
			Y = np.append(Y,dict[d])
			counter += 1
			output_string = f"Image File {counter} of {file_count}\n"
			sys.stdout.write(output_string)
			sys.stdout.flush()
	#x_train,x_test,y_train,y_test = train_test_split(X,Y,test_size = rate)	
	return X, Y 
Example 8
Project: dataflow   Author: tensorpack   File: imgproc.py    Apache License 2.0 6 votes vote down vote up
def _augment(self, img, r):
        old_dtype = img.dtype

        if img.ndim == 3:
            if self.rgb is not None:
                m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY
                grey = cv2.cvtColor(img.astype('float32'), m)
                mean = np.mean(grey)
            else:
                mean = np.mean(img, axis=(0, 1), keepdims=True)
        else:
            mean = np.mean(img)

        img = img * r + mean * (1 - r)
        if self.clip or old_dtype == np.uint8:
            img = np.clip(img, 0, 255)
        return img.astype(old_dtype) 
Example 9
Project: object-detection   Author: cristianpb   File: test_detection.py    MIT License 6 votes vote down vote up
def test_motion():
    image = cv2.imread("./imgs/image.jpeg")
    print(image.shape)

    detector = Detector_Motion()

    image2 = cv2.imread("./imgs/image_box.jpg")
    print(image2.shape)
    assert image.shape == image2.shape
    image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
    image2 = cv2.GaussianBlur(image2, (21, 21), 0)
    detector.avg = image2.astype(float)

    output = detector.prediction(image)
    df = detector.filter_prediction(output, image)
    image = detector.draw_boxes(image, df)
    print(df)
    assert df.shape[0] == 1

    cv2.imwrite("./imgs/outputcv.jpg", image) 
Example 10
Project: surface-crack-detection   Author: Khoronus   File: simple.py    MIT License 6 votes vote down vote up
def image_preprocessor(image):
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    image = im.threshold(image, min_limit=127)
    return cv2.bitwise_not(image) 
Example 11
Project: surface-crack-detection   Author: Khoronus   File: crackconcrete.py    MIT License 6 votes vote down vote up
def image_preprocessor(image):
    image = cv2.bitwise_not(image)

    image = im.gauss_filter(image, (3,3))
    image = im.equalize_light(image)
    image = im.light(image, bright=-20, contrast=-20)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    image = im.threshold(image, min_limit=127)
    return image 
Example 12
Project: tesseract_cardRecognition   Author: KaiJin1995   File: detection.py    Apache License 2.0 6 votes vote down vote up
def detect(img):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # 2. 形态学变换的预处理,得到可以查找矩形的图片
    dilation = morph_process(gray)

    # 3. 查找和筛选文字区域
    region = findTextRegion(dilation)

    # 4. 用绿线画出这些找到的轮廓
    ii = 0
    idImgs = []
    for box in region:
        h = abs(box[0][1] - box[2][1])
        w = abs(box[0][0] - box[2][0])
        Xs = [i[0] for i in box]
        Ys = [i[1] for i in box]
        x1 = min(Xs)
        y1 = min(Ys)
        img2 = img.copy()
        if w > 0 and h > 0:
            idImg = grayImg(img2[y1:y1 + h, x1:x1 + w])
            ii += 1
            idImgs.append(idImg)
    return idImgs 
Example 13
Project: 3D-HourGlass-Network   Author: Naman-ntc   File: my.py    MIT License 6 votes vote down vote up
def test_heatmaps(heatmaps,img,i):
    heatmaps=heatmaps.numpy()
    #heatmaps=np.squeeze(heatmaps)
    heatmaps=heatmaps[:,:64,:]
    heatmaps=heatmaps.transpose(1,2,0)
    print('heatmap inside shape is',heatmaps.shape)
##    print('----------------here')
##    print(heatmaps.shape)
    img=img.numpy()
    #img=np.squeeze(img)
    img=img.transpose(1,2,0)
    img=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#    print('heatmaps',heatmaps.shape)
    heatmaps = cv2.resize(heatmaps,(0,0), fx=4,fy=4)
#    print('heatmapsafter',heatmaps.shape)
    for j in range(0, 16):
        heatmap = heatmaps[:,:,j]
        heatmap = heatmap.reshape((256,256,1))
        heatmapimg = np.array(heatmap * 255, dtype = np.uint8)
        heatmap = cv2.applyColorMap(heatmapimg, cv2.COLORMAP_JET)
        heatmap = heatmap/255
        plt.imshow(img)
        plt.imshow(heatmap, alpha=0.5)
        plt.show()
        #plt.savefig('hmtestpadh36'+str(i)+js[j]+'.png') 
Example 14
Project: 3D-HourGlass-Network   Author: Naman-ntc   File: my.py    MIT License 6 votes vote down vote up
def test_heatmaps(heatmaps,img,i):
    heatmaps=heatmaps.numpy()
    #heatmaps=np.squeeze(heatmaps)
    heatmaps=heatmaps[:,:64,:]
    heatmaps=heatmaps.transpose(1,2,0)
    print('heatmap inside shape is',heatmaps.shape)
##    print('----------------here')
##    print(heatmaps.shape)
    img=img.numpy()
    #img=np.squeeze(img)
    img=img.transpose(1,2,0)
    img=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#    print('heatmaps',heatmaps.shape)
    heatmaps = cv2.resize(heatmaps,(0,0), fx=4,fy=4)
#    print('heatmapsafter',heatmaps.shape)
    for j in range(0, 16):
        heatmap = heatmaps[:,:,j]
        heatmap = heatmap.reshape((256,256,1))
        heatmapimg = np.array(heatmap * 255, dtype = np.uint8)
        heatmap = cv2.applyColorMap(heatmapimg, cv2.COLORMAP_JET)
        heatmap = heatmap/255
        plt.imshow(img)
        plt.imshow(heatmap, alpha=0.5)
        plt.show()
        #plt.savefig('hmtestpadh36'+str(i)+js[j]+'.png') 
Example 15
Project: esys-pbi   Author: fsxfreak   File: square_marker_detect.py    MIT License 6 votes vote down vote up
def bench(folder):
    from os.path import join
    from video_capture.av_file_capture import File_Capture
    cap = File_Capture(join(folder,'marker-test.mp4'))
    markers = []
    detected_count = 0

    for x in range(500):
        frame = cap.get_frame()
        img = frame.img
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        markers = detect_markers_robust(gray_img,5,prev_markers=markers,true_detect_every_frame=1,visualize=True)

        draw_markers(img, markers)
        cv2.imshow('Detected Markers', img)

        # for m in markers:
        #     if 'img' in m:
        #         cv2.imshow('id %s'%m['id'], m['img'])
        #         cv2.imshow('otsu %s'%m['id'], m['otsu'])
        if cv2.waitKey(1) == 27:
           break
        detected_count += len(markers)
    print(detected_count) #2900 #3042 #3021 
Example 16
Project: Mussy-Robot   Author: arnomoonens   File: emotion_recognition.py    MIT License 6 votes vote down vote up
def emotion_recognition(image):
    #return prediction_land(image)
    return prediction_mouth(image)
    #return prediction_Fisher(image)

    
    
    
# img = cv2.imread("smile.jpg")
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# emotion_recognition(gray)
## Draw a rectangle around the faces
#img = cv2.imread("webcam_normal.jpg")
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#emotion_recognition(gray)
#
#img = cv2.imread("webcam_happy.jpg")
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#emotion_recognition(gray) 
Example 17
Project: speed_estimation   Author: NeilNie   File: helper.py    MIT License 5 votes vote down vote up
def load_gray_image(image_file):

    img = cv2.imread(image_file)
    img = cv2.resize(img, (configs.IMG_WIDTH, configs.IMG_HEIGHT))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    return img 
Example 18
Project: visual-asset-generator   Author: Automattic   File: classify.py    GNU General Public License v3.0 5 votes vote down vote up
def classify(path, classifier):
    img = cv2.imread(path)
    print(path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = classifier.detectMultiScale(gray, 1.1, 5)

    if len(faces) == 0:
        return False

    for f in faces:
        print(f)
        (x,y,w,h) = f
        # if w > 300:
        #     break

    face = {}
    face['x'] = int(x)
    face['y'] = int(y)
    face['w'] = int(w)
    face['h'] = int(h)

    img_h, img_w, channels = img.shape
    img = {}
    img['w'] = int(img_w)
    img['h'] = int(img_h)
    data = { 'path': path, 'face': face, 'img': img }
    return data 
Example 19
Project: ultra_secret_scripts   Author: CharlesDankoff   File: image_search.py    GNU General Public License v3.0 5 votes vote down vote up
def search_image_in_image(small_image, large_image, precision=0.95):
    template = small_image.astype(np.float32)
    img_rgb = large_image.astype(np.float32)

    template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
    img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)

    res = cv2.matchTemplate(img_rgb, template, cv2.TM_CCOEFF_NORMED)
    threshold = precision
    loc = np.where(res >= threshold)

    found_positions = list(zip(*loc[::-1]))

    # print("FOUND: {}".format(found_positions))
    return found_positions 
Example 20
Project: Lane-And-Vehicle-Detection   Author: JustinHeaton   File: utils.py    MIT License 5 votes vote down vote up
def calibrate_camera():
    objp = np.zeros((6*9,3), np.float32)
    objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)

    objpoints = [] # 3d points in real world space
    imgpoints = [] # 2d points in image plane.

    images = glob.glob('camera_cal/calibration*.jpg')

    for idx, fname in enumerate(images):
        img = cv2.imread(fname)
        img_size = (img.shape[1], img.shape[0])
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chessboard corners
        ret, corners = cv2.findChessboardCorners(gray, (9,6), None)

        # If found, add object points, image points
        if ret == True:
            objpoints.append(objp)
            imgpoints.append(corners)

    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
    return mtx, dist 
Example 21
Project: oslodatascience-rl   Author: Froskekongen   File: space_invaders4.py    MIT License 5 votes vote down vote up
def preprocess(observation):
    observation = cv2.cvtColor(cv2.resize(observation, (84, 110)), cv2.COLOR_BGR2GRAY)
    observation = observation[26:110, :]
    ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)
    return np.reshape(observation, (84, 84, 1)) 
Example 22
Project: oslodatascience-rl   Author: Froskekongen   File: space_invaders3.py    MIT License 5 votes vote down vote up
def preprocess(observation):
    observation = cv2.cvtColor(cv2.resize(observation, (84, 110)), cv2.COLOR_BGR2GRAY)
    observation = observation[26:110, :]
    ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)
    return np.reshape(observation, (84, 84, 1)) 
Example 23
Project: rl_3d   Author: avdmitry   File: agent_a3c.py    MIT License 5 votes vote down vote up
def Preprocess(frame):
    if (channels == 1):
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    frame = cv2.resize(frame, (resolution[1], resolution[0]))
    return np.reshape(frame, resolution) 
Example 24
Project: rl_3d   Author: avdmitry   File: agent_dqn.py    MIT License 5 votes vote down vote up
def Preprocess(img):
    #cv2.imshow("frame-train", img)
    #cv2.waitKey(20)
    if (channels == 1):
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img = cv2.resize(img, (resolution[1], resolution[0]))
    #cv2.imshow("frame-train", img)
    #cv2.waitKey(200)
    return np.reshape(img, resolution) 
Example 25
Project: pynvr   Author: JFF-Bohdan   File: motion_detection.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (11, 11), 0)

        if self.prevPrevFrame is None:
            self.prevPrevFrame = gray
            return False

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        cv.normalize(gray, gray, 0, 255, cv.NORM_MINMAX)

        frameDiff = self.diffImg(self.prevPrevFrame, self.prevFrame, gray)
        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        cv.dilate(th1, None, iterations=15)
        cv.erode(th1, None, iterations=1)

        delta_count = cv.countNonZero(th1)

        cv.imshow("frame_th1", th1)

        self.prevPrevFrame = self.prevFrame
        self.prevFrame = gray

        ret = delta_count > self.threshold

        if ret:
            self.updateMotionDetectionDts()

        return ret 
Example 26
Project: pynvr   Author: JFF-Bohdan   File: motion_detection.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (11, 11), 0)

        if (self.multiFrameDetection) and (self.prevPrevFrame is None):
            self.prevPrevFrame = gray
            return False

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        cv.normalize(gray, gray, 0, 255, cv.NORM_MINMAX)

        frameDiff = self.diffImg(self.prevPrevFrame, self.prevFrame, gray)
        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        th1 = cv.dilate(th1, None, iterations=8)
        th1 = cv.erode(th1, None, iterations=4)

        delta_count = cv.countNonZero(th1)

        if self.multiFrameDetection:
            self.prevPrevFrame = self.prevFrame

        self.prevFrame = gray
        if delta_count < self.threshold:
            return False

        if self.multiFrameDetection:
            self.prevPrevFrame = self.prevFrame

        self.prevFrame = gray
        self.updateMotionDetectionDts()
        return True 
Example 27
Project: MeteorTracker   Author: heidtn   File: find_events.py    MIT License 5 votes vote down vote up
def find_motion_anomaly(self, previous_image, current_image):
        """
        Given two images, this function compares them to find potential
        meteors.

        Parameters
        ----------
        previous_image : cv2.Image
            The previous image to compare against
        current_image : cv2.Image
            The current image to compare against

        # TODO: the diff currently creates two keypoints for one image
        if the last frame has a meteor event in it
        """
        # Our operations on the frame come here
        gray1 = cv2.cvtColor(previous_image, cv2.COLOR_BGR2GRAY)
        gray2 = cv2.cvtColor(current_image, cv2.COLOR_BGR2GRAY)

        # gaussian to filter out noise
        gray1 = cv2.GaussianBlur(gray1, (3, 3), 0)
        gray2 = cv2.GaussianBlur(gray2, (3, 3), 0)

        diff = cv2.absdiff(gray1, gray2)
        thresh = cv2.threshold(diff, 35, 255, cv2.THRESH_BINARY)[1]

        # dilate the thresholded image to fill in holes, then find contours
        # on thresholded image
        dilated_image = cv2.dilate(thresh, None, iterations=2)

        keypoints = self.detector.detect(dilated_image)

        im_with_keypoints = current_image.copy()
        self.draw_keypoints(im_with_keypoints, keypoints, (0, 0, 255))

        return keypoints, im_with_keypoints 
Example 28
Project: photo4D   Author: ArcticSnow   File: Image_utils.py    MIT License 5 votes vote down vote up
def process_clahe(in_path, tileGridSize, grey=False, out_path="", clip_limit=2):
    """
    Appy CLAHE (contrast limited adaptive histogram equalization) method on an image
    for more information about CLAHE, see https://docs.opencv.org/3.1.0/d5/daf/tutorial_py_histogram_equalization.html

    Overwriting image will raise an error, as the initial image is needed to copy-past metadata
    :param in_path: input image
    :param tileGridSize: size of the "blocks" to apply local histogram equalization
    :param grey: if True, the image will be converted to grayscale
    :param out_path: output path, the folders must exists and the image extension must be valid
            by default, output will be saved as input_path/input_name_clahe.JPG
    :param clip_limit: contrast limit, used to avoid too much noise
    """
    if out_path == "":
        out_path = ".".join(in_path.split(".")[:-1]) + "_clahe.JPG"

    # read input
    print("Processing CLAHE method on " + in_path.split("/")[-1])
    img = cv.imread(in_path)

    # convert color to gray
    if grey: img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)

    # apply a median filter before clahe
    img = cv.medianBlur(img, 3)  
    
    # create clahe object
    clahe = cv.createCLAHE(clipLimit=clip_limit, tileGridSize=(tileGridSize, tileGridSize))  # CLAHE

    # apply CLAHE for each image channel, and then recreate the full image (only useful if gray==False)
    channels_ini = cv.split(img)
    channels_final = []
    for channel in channels_ini:
        # Apply CLAHE
        channels_final.append(clahe.apply(channel))
    img_final = cv.merge(channels_final)

    # save image and write metadata from initial file
    cv.imwrite(out_path, img_final)
    pyxif.transplant(in_path, out_path) 
Example 29
Project: photo4D   Author: ArcticSnow   File: Image_utils.py    MIT License 5 votes vote down vote up
def blurr(filename,ksize = 3):
    image_bgr = cv.imread(filename)  # todo la converstion en gray devrait être fait à cette ligne
    # image_gray = cv.cvtColor(image_bgr, cv.COLOR_BGR2GRAY)
    return np.log(cv.Laplacian(image_bgr, cv.CV_64F,ksize=ksize).var()) 
Example 30
Project: photo4D   Author: ArcticSnow   File: Image_utils.py    MIT License 5 votes vote down vote up
def process_clahe(in_path, tileGridSize, grey=False, out_path="", clip_limit=2):
    """
    Appy CLAHE (contrast limited adaptive histogram equalization) method on an image
    for more information about CLAHE, see https://docs.opencv.org/3.1.0/d5/daf/tutorial_py_histogram_equalization.html

    Overwriting image will raise an error, as the initial image is needed to copy-past metadata
    :param in_path: input image
    :param tileGridSize: size of the "blocks" to apply local histogram equalization
    :param grey: if True, the image will be converted to grayscale
    :param out_path: output path, the folders must exists and the image extension must be valid
            by default, output will be saved as input_path/input_name_clahe.JPG
    :param clip_limit: contrast limit, used to avoid too much noise
    """
    if out_path == "":
        out_path = ".".join(in_path.split(".")[:-1]) + "_clahe.JPG"

    # read input
    print("Processing CLAHE method on " + in_path.split("/")[-1])
    img = cv.imread(in_path)

    # convert color to gray
    if grey: img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)

    # apply a median filter before clahe
    img = cv.medianBlur(img, 3)  
    
    # create clahe object
    clahe = cv.createCLAHE(clipLimit=clip_limit, tileGridSize=(tileGridSize, tileGridSize))  # CLAHE

    # apply CLAHE for each image channel, and then recreate the full image (only useful if gray==False)
    channels_ini = cv.split(img)
    channels_final = []
    for channel in channels_ini:
        # Apply CLAHE
        channels_final.append(clahe.apply(channel))
    img_final = cv.merge(channels_final)

    # save image and write metadata from initial file
    cv.imwrite(out_path, img_final)
    pyxif.transplant(in_path, out_path) 
Example 31
Project: photo4D   Author: ArcticSnow   File: Image_utils.py    MIT License 5 votes vote down vote up
def blurr(filename,ksize = 3):
    image_bgr = cv.imread(filename)  # todo la converstion en gray devrait être fait à cette ligne
    # image_gray = cv.cvtColor(image_bgr, cv.COLOR_BGR2GRAY)
    return np.log(cv.Laplacian(image_bgr, cv.CV_64F,ksize=ksize).var()) 
Example 32
Project: adversarial_da_icvgip18   Author: abhinavagarwalla   File: gaze_cropper.py    MIT License 5 votes vote down vote up
def crop_parallel(json_path):
  """ Center Crop and resize the eye image using extracted center from correposding JSON file
  If image already processed, it is skipped
  """
  jpg_path = json_path.replace('json', 'jpg')

  if not os.path.exists(jpg_path):
    return

  if os.path.exists(jpg_path.replace(".jpg", "_cropped.png").replace('imgs', folder_name)):
    return

  try:
    with open(json_path) as json_f:
      img = imread(jpg_path)
      j = json.loads(json_f.read())

      key = "interior_margin_2d"
      j[key] = process_json_list(j[key], img)

      x_min, x_max = int(min(j[key][:,0])), int(max(j[key][:,0]))
      y_min, y_max = int(min(j[key][:,1])), int(max(j[key][:,1]))

      x_center, y_center = (x_min + x_max)/2, (y_min + y_max)/2
      cropped_img = img[y_center-63: y_center+63, x_center-105:x_center+105]

      cropped_img = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2GRAY)
      cropped_img = imresize(cropped_img, (35, 55))
      cropped_img = cv2.normalize(cropped_img, np.zeros_like(cropped_img), alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
      img_path = jpg_path.replace(".jpg", "_cropped.png").replace('imgs', folder_name)

      save_array_to_grayscale_image(cropped_img, img_path)
      print('.')
  except:
    print("Exception raised! missing JSON file: ", json_path) 
Example 33
Project: HardRLWithYoutube   Author: MaxSobolMark   File: train_featurizer.py    MIT License 5 votes vote down vote up
def preprocess_image(image, width, height):
    """ Changes size, makes image monochromatic """

    image = cv2.resize(image, (width, height))
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    image = np.array(image, dtype=np.uint8)
    image = np.expand_dims(image, -1)
    return image 
Example 34
Project: python--   Author: Leezhen2014   File: BlurDetection.py    GNU General Public License v3.0 5 votes vote down vote up
def preImgOps(self, imgName):
        """
        图像的预处理操作
        :param imgName: 图像的而明朝
        :return: 灰度化和resize之后的图片对象
        """
        strPath = self.strDir + imgName

        img = cv2.imread(strPath)  # 读取图片
        cv2.moveWindow("", 1000, 100)
        # cv2.imshow("原始图", img)
        # 预处理操作
        reImg = cv2.resize(img, (800, 900), interpolation=cv2.INTER_CUBIC)  #
        img2gray = cv2.cvtColor(reImg, cv2.COLOR_BGR2GRAY)  # 将图片压缩为单通道的灰度图
        return img2gray, reImg 
Example 35
Project: finger_vein_recognition   Author: bmxbmx3   File: FVRT.py    GNU General Public License v3.0 5 votes vote down vote up
def Denoise(self, OpenSide, ClosedSide, BlurSide):
        im = self.__im
        im = cv2.cvtColor(asarray(im).astype(uint8), cv2.COLOR_GRAY2BGR)
        openkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (OpenSide, OpenSide))
        opening = cv2.morphologyEx(array(im), cv2.MORPH_OPEN, openkernel)
        closedkernel = cv2.getStructuringElement(cv2.MORPH_RECT, (ClosedSide, ClosedSide))
        closed = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, closedkernel)
        result = cv2.medianBlur(closed, BlurSide)
        pix = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
        self.__im = fromarray(pix)

    # 图像细化 
Example 36
Project: Face-Recognition-for-Mobile-Robot   Author: gagolucasm   File: libs.py    MIT License 5 votes vote down vote up
def read_data(path):
    image_paths = [os.path.join(path, f) for f in os.listdir(path) if not f.endswith('re')]
    images=[]
    labels=[]
    for image_path in image_paths:
        #print image_path
        #cv2.namedWindow('Cargando fotos ...')
        imagen=cv2.imread(image_path)
        imagen=cv2.cvtColor(imagen, cv2.COLOR_BGR2GRAY)
        imagenn=np.array(imagen,'uint8')
        #cv2.imshow("Cargando fotos ...",imagenn)
        #cv2.waitKey(40)
        nbr = (os.path.split(image_path)[1].split(".")[0])
        images.append(imagenn)
        labels.append(nbr)
    id=set(labels)
    #print id
    dictid={}
    pos=0
    idlabel=[]
    for i in id:
        dictid[i]=pos
        pos=pos+1
    for i in labels:
        idlabel.append(dictid[i])
    return images,idlabel,dictid 
Example 37
Project: retina-simulation   Author: duguyue100   File: dataset.py    MIT License 5 votes vote down vote up
def get_image(image_path, color=True, size=True):
    """Get image by given image path.

    Parameters
    ----------
    image_path : string
        target image absolute path
    color : bool
        if color is True then return a color frame with BGR encoding.
        if color is False then return a grey scale frame.
    size : bool
        if size is True then return the size of the frame.
        if size is False then just return the frame.

    Returns
    -------
    frame : numpy.ndarray
        a frame that contains target image.
    size : tuple
        size of the frame (optional).
    """
    frame = cv2.imread(image_path)

    if color is False:
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    if size is True:
        return frame, frame.shape
    else:
        return frame 
Example 38
Project: retina-simulation   Author: duguyue100   File: dataset.py    MIT License 5 votes vote down vote up
def get_video(vid_path, color=True, size=True):
    """Get video by given video path.

    Parameters
    ----------
    image_path : string
        target image absolute path
    color : bool
        if color is True then return color frames with BGR encoding.
        if color is False then return grey scale frames.
    size : bool
        if size is True then return the size of the frame.
        if size is False then just return the frame.

    Returns
    -------
    frames : list
        a list of frames that contains the video
    size : tuple
        size of the frame (optional).
    """
    vid_container = FFMPEG_VideoReader(vid_path)

    frames = []
    for i in range(vid_container.nframes):
        frame_t = vid_container.read_frame()
        frame_t = cv2.cvtColor(frame_t, cv2.COLOR_RGB2BGR)
        if color is False:
            frame_t = cv2.cvtColor(frame_t, cv2.COLOR_BGR2GRAY)
        frames.append(frame_t)

    if size is True:
        return frames, frames[0].shape
    else:
        return frames 
Example 39
Project: cv_algorithms   Author: ulikoehler   File: TestThinning.py    Apache License 2.0 5 votes vote down vote up
def __init__(self):
        """Read example image"""
        img = cv2.imread("examples/thinning-example.png")
        # Convert to grayscale
        self.img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        self.img_thresh = cv2.threshold(self.img, 180, 255, cv2.THRESH_BINARY)[1] 
Example 40
Project: senior_design_spring   Author: wodiesan   File: preprocessing_func.py    MIT License 5 votes vote down vote up
def grayscale(frame):
    '''Convert a frame to grayscale.'''
    cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    return frame 
Example 41
Project: dataflow   Author: tensorpack   File: convert.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, keepdims=True, rgb=False):
        """
        Args:
            keepdims (bool): return image of shape [H, W, 1] instead of [H, W]
            rgb (bool): interpret input as RGB instead of the default BGR
        """
        mode = cv2.COLOR_RGB2GRAY if rgb else cv2.COLOR_BGR2GRAY
        super(Grayscale, self).__init__(mode, keepdims) 
Example 42
Project: dataflow   Author: tensorpack   File: imgproc.py    Apache License 2.0 5 votes vote down vote up
def _augment(self, img, v):
        old_dtype = img.dtype
        m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY
        grey = cv2.cvtColor(img, m)
        ret = img * v + (grey * (1 - v))[:, :, np.newaxis]
        if old_dtype == np.uint8:
            ret = np.clip(ret, 0, 255)
        return ret.astype(old_dtype) 
Example 43
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: dcgan_pytorch.py    MIT License 5 votes vote down vote up
def get_image(paths):
    xs = []
    
    for info in paths:
        path, hf, vf = info
        x = cv2.imread(path)
        
        if channel == 1:
            x = cv2.cvtColor(x, cv2.COLOR_BGR2GRAY)
        x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
        x = x / 127.5 - 1
        if channel == 3:
            x = x[..., ::-1]

        if hf:
            x = x[:, ::-1]

        if vf:
            x = x[::-1]

        xs.append(x)
                
    xs = np.array(xs, dtype=np.float32)
    
    if channel == 1:
        xs = np.expand_dims(xs, axis=-1)
    
    xs = np.transpose(xs, (0,3,1,2))
    
    return xs


# train 
Example 44
Project: selfieexpression   Author: andrewjtimmons   File: face.py    MIT License 5 votes vote down vote up
def _create_grayscale_image(self):
    """ Turn color image into grayscale."""
    return cv2.cvtColor(self.color_image, cv2.COLOR_BGR2GRAY) 
Example 45
Project: Super_TF   Author: Dhruv-Mohan   File: Dataset_writer_segmentation.py    MIT License 5 votes vote down vote up
def getweight(self, mask_mat=None):
        #gray_mask = cv2.cvtColor(mask_mat, cv2.COLOR_BGR2GRAY)
        gray_mask=mask_mat
        ret, bin_mask = cv2.threshold(gray_mask,1,1,0)
        _, contours, _ = cv2.findContours(bin_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        weights = np.zeros_like(bin_mask, dtype=np.float)

        weights = cv2.drawContours(weights, contours, -1, (1), 5)
        weights = cv2.GaussianBlur(weights, (41,41), 1000)
        weights = np.multiply(weights,10)+0.6
        return weights 
Example 46
Project: object-detection   Author: cristianpb   File: test_detection.py    MIT License 5 votes vote down vote up
def test_cascade():
    image = cv2.imread("./imgs/image.jpeg")
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    detector = Detector_Cascade()
    output = detector.prediction(image)
    df = detector.filter_prediction(output, image)
    image = detector.draw_boxes(image, df)

    print(df)
    #cv2.imwrite("./imgs/outputcv.jpg", image) 
Example 47
Project: object-detection   Author: cristianpb   File: motion.py    MIT License 5 votes vote down vote up
def prediction(self, image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = cv2.GaussianBlur(image, (21, 21), 0)
        if self.avg is None:
            self.avg = image.copy().astype(float)
        cv2.accumulateWeighted(image, self.avg, 0.5)
        frameDelta = cv2.absdiff(image, cv2.convertScaleAbs(self.avg))
        thresh = cv2.threshold(
                frameDelta, DELTA_THRESH, 255,
                cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(
                thresh.copy(), cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        self.avg = image.copy().astype(float)
        return cnts 
Example 48
Project: ml_stuff   Author: chrisranderson   File: dsp.py    MIT License 5 votes vote down vote up
def grayscale(image):
  return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 
Example 49
Project: ml_stuff   Author: chrisranderson   File: datasets.py    MIT License 5 votes vote down vote up
def single_image(grayscale=False):
  image = cv2.imread('data/grace.png')

  if grayscale:
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

  return image 
Example 50
Project: surface-crack-detection   Author: Khoronus   File: cracktile.py    MIT License 5 votes vote down vote up
def label_preprocessor(label):
    label = cv2.cvtColor(label, cv2.COLOR_BGR2GRAY)
    label = im.threshold(label, min_limit=127)
    return label