Python cv2.COLOR_BGR2GRAY Examples

The following are 30 code examples for showing how to use cv2.COLOR_BGR2GRAY(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module cv2 , or try the search function .

Example 1
Project: pedestrian-haar-based-detector   Author: felipecorrea   File: histcomparison.py    License: GNU General Public License v2.0 10 votes vote down vote up
def main():
	imagePath = "img.jpg"
	
	img = cv2.imread(imagePath)
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	
	generate_histogram(gray)
	
	cv2.imwrite("before.jpg", gray)

	gray = cv2.equalizeHist(gray)
	
	generate_histogram(gray)
	
	cv2.imwrite("after.jpg",gray)
	
	return 0 
Example 2
Project: object-detection   Author: cristianpb   File: motion.py    License: MIT License 10 votes vote down vote up
def prediction(self, image):
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = cv2.GaussianBlur(image, (21, 21), 0)
        if self.avg is None:
            self.avg = image.copy().astype(float)
        cv2.accumulateWeighted(image, self.avg, 0.5)
        frameDelta = cv2.absdiff(image, cv2.convertScaleAbs(self.avg))
        thresh = cv2.threshold(
                frameDelta, DELTA_THRESH, 255,
                cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(
                thresh.copy(), cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        self.avg = image.copy().astype(float)
        return cnts 
Example 3
Project: derplearning   Author: notkarol   File: calibrate_camera.py    License: MIT License 8 votes vote down vote up
def live_undistort(camera, camera_matrix, distortion_coefficients):
    """ Using a given calibration matrix, display the distorted, undistorted, and cropped frame"""
    scaled_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(
        camera_matrix, distortion_coefficients, camera.size, 1, camera.size
    )
    while True:
        ret, frame = camera.cap.read()
        assert ret
        distorted_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        undistorted_frame = cv2.undistort(
            distorted_frame, camera_matrix, distortion_coefficients, None, scaled_camera_matrix,
        )
        roi_x, roi_y, roi_w, roi_h = roi
        cropped_frame = undistorted_frame[roi_y : roi_y + roi_h, roi_x : roi_x + roi_w]
        cv2.imshow("distorted %s" % (distorted_frame.shape,), distorted_frame)
        cv2.imshow("undistorted %s" % (undistorted_frame.shape,), undistorted_frame)
        cv2.imshow("cropped %s" % (cropped_frame.shape,), cropped_frame)
        cv2.waitKey(10) 
Example 4
Project: pedestrian-haar-based-detector   Author: felipecorrea   File: detect.py    License: GNU General Public License v2.0 7 votes vote down vote up
def main():
	#IMG PATHS
	imagePath = "test3.jpg"
	cascPath = "cascades/haarcascade_pedestrian.xml"

	pplCascade = cv2.CascadeClassifier(cascPath)
	image = cv2.imread(imagePath)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	
	gray = normalize_grayimage(gray)
	 
	pedestrians = pplCascade.detectMultiScale(
		gray,
		scaleFactor=1.2,
		minNeighbors=10,
		minSize=(32,96),
		flags = cv2.cv.CV_HAAR_SCALE_IMAGE
	)

	print "Found {0} ppl!".format(len(pedestrians))

	#Draw a rectangle around the detected objects
	for (x, y, w, h) in pedestrians:
		cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)

	cv2.imwrite("saida.jpg", image)
	cv2.imshow("Ppl found", image)
	cv2.waitKey(0)
	
	return 0 
Example 5
Project: Advanced_Lane_Lines   Author: ChengZhongShen   File: camera_calibration.py    License: MIT License 7 votes vote down vote up
def test():
	"""
	read the pickle file on disk and implement undistor on image
	show the oringal/undistort image
	"""
	print("Reading the pickle file...")
	pickle_file = open("./camera_cal.p", "rb")
	dist_pickle = pickle.load(pickle_file)
	mtx = dist_pickle["mtx"]  
	dist = dist_pickle["dist"]
	pickle_file.close()

	print("Reading the sample image...")
	img = cv2.imread('corners_founded/corners_found13.jpg')
	img_size = (img.shape[1],img.shape[0])
	dst = cv2.undistort(img, mtx, dist, None, mtx)

	# dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
	# Visualize undistortion
	print("Visulize the result...")
	f, (ax1,ax2) = plt.subplots(1,2, figsize=(20,10))
	ax1.imshow(img), ax1.set_title('Original Image', fontsize=15)
	ax2.imshow(dst), ax2.set_title('Undistored Image', fontsize=15)
	plt.show() 
Example 6
Project: PiCamNN   Author: PiSimo   File: picam.py    License: MIT License 7 votes vote down vote up
def movement(mat_1,mat_2):
    mat_1_gray     = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY)
    mat_1_gray     = cv2.blur(mat_1_gray,(blur1,blur1))
    _,mat_1_gray   = cv2.threshold(mat_1_gray,100,255,0)
    mat_2_gray     = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur1,blur1))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,100,255,0)
    mat_2_gray     = cv2.bitwise_xor(mat_1_gray,mat_2_gray)
    mat_2_gray     = cv2.blur(mat_2_gray,(blur2,blur2))
    _,mat_2_gray   = cv2.threshold(mat_2_gray,70,255,0)
    mat_2_gray     = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval)))
    mat_2_gray     = cv2.dilate(mat_2_gray,np.ones((4,4)))
    _, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:return True #If there were any movements
    return  False                    #if not


#Pedestrian Recognition Thread 
Example 7
Project: face-search   Author: lehgtrung   File: face_detect.py    License: MIT License 7 votes vote down vote up
def detect_face(img_path, cc_path='../files/haarcascade_frontalface_default.xml'):
    """
    Detect the face from the image, return colored face
    """

    cc = cv2.CascadeClassifier(os.path.abspath(cc_path))
    img_path = os.path.abspath(img_path)
    img = cv2.imread(img_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    faces = cc.detectMultiScale(gray, 1.3, 5)
    roi_color = None

    if len(faces) == 0:
        logging.exception(img_path + ': No face found')
    else:
        x,y,w,h = faces[0]
        _h, _w = compute_size(h, w)
        roi_color = img[y - _h:y + h + _h, x - _w:x + w + _w]

    return roi_color 
Example 8
Project: dataflow   Author: tensorpack   File: imgproc.py    License: Apache License 2.0 6 votes vote down vote up
def _augment(self, img, r):
        old_dtype = img.dtype

        if img.ndim == 3:
            if self.rgb is not None:
                m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY
                grey = cv2.cvtColor(img.astype('float32'), m)
                mean = np.mean(grey)
            else:
                mean = np.mean(img, axis=(0, 1), keepdims=True)
        else:
            mean = np.mean(img)

        img = img * r + mean * (1 - r)
        if self.clip or old_dtype == np.uint8:
            img = np.clip(img, 0, 255)
        return img.astype(old_dtype) 
Example 9
Project: ConvolutionalEmotion   Author: Zebreu   File: emotionclassification.py    License: MIT License 6 votes vote down vote up
def getPeakFeatures():
    net = DecafNet()

    features = numpy.zeros((number_sequences,feature_length))
    labels = numpy.zeros((number_sequences,1))
    counter = 0
    # Maybe sort them
    for participant in os.listdir(os.path.join(data_dir,image_dir)):
        for sequence in os.listdir(os.path.join(data_dir,image_dir, participant)):
            if sequence != ".DS_Store":
                image_files = sorted(os.listdir(os.path.join(data_dir,image_dir, participant,sequence)))
                image_file = image_files[-1]
                print counter, image_file
                imarray = cv2.imread(os.path.join(data_dir,image_dir, participant,sequence,image_file))
                imarray = cv2.cvtColor(imarray,cv2.COLOR_BGR2GRAY)
                scores = net.classify(imarray, center_only=True)
                features[counter] = net.feature(feature_level)#.flatten()
                label_file = open(os.path.join(data_dir,label_dir, participant,sequence,image_file[:-4]+"_emotion.txt"))
                labels[counter] = eval(label_file.read())
                label_file.close()
                counter += 1

    numpy.save("featuresPeak5",features)
    numpy.save("labelsPeak5",labels) 
Example 10
Project: object-detection   Author: cristianpb   File: test_detection.py    License: MIT License 6 votes vote down vote up
def test_motion():
    image = cv2.imread("./imgs/image.jpeg")
    print(image.shape)

    detector = Detector_Motion()

    image2 = cv2.imread("./imgs/image_box.jpg")
    print(image2.shape)
    assert image.shape == image2.shape
    image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
    image2 = cv2.GaussianBlur(image2, (21, 21), 0)
    detector.avg = image2.astype(float)

    output = detector.prediction(image)
    df = detector.filter_prediction(output, image)
    image = detector.draw_boxes(image, df)
    print(df)
    assert df.shape[0] == 1

    cv2.imwrite("./imgs/outputcv.jpg", image) 
Example 11
Project: derplearning   Author: notkarol   File: calibrate_camera.py    License: MIT License 6 votes vote down vote up
def live_calibrate(camera, pattern_shape, n_matches_needed):
    """ Find calibration parameters as the user moves a checkerboard in front of the camera """
    print("Looking for %s checkerboard" % (pattern_shape,))
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    example_3d = np.zeros((pattern_shape[0] * pattern_shape[1], 3), np.float32)
    example_3d[:, :2] = np.mgrid[0 : pattern_shape[1], 0 : pattern_shape[0]].T.reshape(-1, 2)
    points_3d = []
    points_2d = []
    while len(points_3d) < n_matches_needed:
        ret, frame = camera.cap.read()
        assert ret
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        ret, corners = cv2.findCirclesGrid(
            gray_frame, pattern_shape, flags=cv2.CALIB_CB_ASYMMETRIC_GRID
        )
        cv2.imshow("camera", frame)
        if ret:
            points_3d.append(example_3d.copy())
            points_2d.append(corners)
            print("Found calibration %i of %i" % (len(points_3d), n_matches_needed))
            drawn_frame = cv2.drawChessboardCorners(frame, pattern_shape, corners, ret)
            cv2.imshow("calib", drawn_frame)
        cv2.waitKey(10)
    ret, camera_matrix, distortion_coefficients, _, _ = cv2.calibrateCamera(
        points_3d, points_2d, gray_frame.shape[::-1], None, None
    )
    assert ret
    return camera_matrix, distortion_coefficients 
Example 12
Project: 3D-HourGlass-Network   Author: Naman-ntc   File: my.py    License: MIT License 6 votes vote down vote up
def test_heatmaps(heatmaps,img,i):
    heatmaps=heatmaps.numpy()
    #heatmaps=np.squeeze(heatmaps)
    heatmaps=heatmaps[:,:64,:]
    heatmaps=heatmaps.transpose(1,2,0)
    print('heatmap inside shape is',heatmaps.shape)
##    print('----------------here')
##    print(heatmaps.shape)
    img=img.numpy()
    #img=np.squeeze(img)
    img=img.transpose(1,2,0)
    img=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#    print('heatmaps',heatmaps.shape)
    heatmaps = cv2.resize(heatmaps,(0,0), fx=4,fy=4)
#    print('heatmapsafter',heatmaps.shape)
    for j in range(0, 16):
        heatmap = heatmaps[:,:,j]
        heatmap = heatmap.reshape((256,256,1))
        heatmapimg = np.array(heatmap * 255, dtype = np.uint8)
        heatmap = cv2.applyColorMap(heatmapimg, cv2.COLORMAP_JET)
        heatmap = heatmap/255
        plt.imshow(img)
        plt.imshow(heatmap, alpha=0.5)
        plt.show()
        #plt.savefig('hmtestpadh36'+str(i)+js[j]+'.png') 
Example 13
Project: 3D-HourGlass-Network   Author: Naman-ntc   File: my.py    License: MIT License 6 votes vote down vote up
def test_heatmaps(heatmaps,img,i):
    heatmaps=heatmaps.numpy()
    #heatmaps=np.squeeze(heatmaps)
    heatmaps=heatmaps[:,:64,:]
    heatmaps=heatmaps.transpose(1,2,0)
    print('heatmap inside shape is',heatmaps.shape)
##    print('----------------here')
##    print(heatmaps.shape)
    img=img.numpy()
    #img=np.squeeze(img)
    img=img.transpose(1,2,0)
    img=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#    print('heatmaps',heatmaps.shape)
    heatmaps = cv2.resize(heatmaps,(0,0), fx=4,fy=4)
#    print('heatmapsafter',heatmaps.shape)
    for j in range(0, 16):
        heatmap = heatmaps[:,:,j]
        heatmap = heatmap.reshape((256,256,1))
        heatmapimg = np.array(heatmap * 255, dtype = np.uint8)
        heatmap = cv2.applyColorMap(heatmapimg, cv2.COLORMAP_JET)
        heatmap = heatmap/255
        plt.imshow(img)
        plt.imshow(heatmap, alpha=0.5)
        plt.show()
        #plt.savefig('hmtestpadh36'+str(i)+js[j]+'.png') 
Example 14
def search(self, query_keypoints, query_descriptors):
		# Initialize the dictionary of results
		results = {}

		# Loop over the book cover images
		for path in self.cover_paths:
			# Load the query image, convert it to greyscale, and extract keypoints and descriptors
			cover = cv2.imread(path)
			gray = cv2.cvtColor(cover, cv2.COLOR_BGR2GRAY)
			(keypoints, descriptors) = self.descriptor.describe(gray)

			# Determine the number of matched, inlier keypoints, and update the results
			score = self.match(query_keypoints, query_descriptors, keypoints, descriptors)
			results[path] = score

		# If matches were found, sort them
		if len(results) > 0:
			results = sorted([(v, k) for (k, v) in results.items() if v > 0], reverse=True)

		# Return the results
		return results 
Example 15
Project: VTuber_Unity   Author: kwea123   File: demo.py    License: MIT License 6 votes vote down vote up
def get_face(detector, image, cpu=False):
    if cpu:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        try:
            box = detector(image)[0]
            x1 = box.left()
            y1 = box.top()
            x2 = box.right()
            y2 = box.bottom()
            return [x1, y1, x2, y2]
        except:
            return None
    else:
        image = cv2.resize(image, None, fx=0.5, fy=0.5)
        box = detector.detect_from_image(image)[0]
        if box is None:
            return None
        return (2*box[:4]).astype(int) 
Example 16
Project: face-search   Author: lehgtrung   File: face.py    License: MIT License 6 votes vote down vote up
def detect_face(img_path, cc_path='../files/haarcascade_frontalface_default.xml'):
    """
    Detect the face from the image, return colored face
    """

    cc = cv2.CascadeClassifier(os.path.abspath(cc_path))
    img_path = os.path.abspath(img_path)
    img = cv2.imread(img_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    faces = cc.detectMultiScale(gray, 1.3, 5)
    roi_color = None

    if len(faces) == 0:
        logging.exception(img_path + ': No face found')
    else:
        x,y,w,h = faces[0]
        _h, _w = compute_size(h, w)
        roi_color = img[y - _h:y + h + _h, x - _w:x + w + _w]

    return roi_color 
Example 17
Project: ScanSSD   Author: MaliParag   File: stitch_patches_page.py    License: MIT License 6 votes vote down vote up
def find_blank_rows_h(image):

    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    im_bw = np.zeros(gray_image.shape)
    im_bw[gray_image > 127] = 0
    im_bw[gray_image <= 127] = 1

    row_sum = np.sum(im_bw, axis=1)

    cum_sum = np.zeros(row_sum.shape)

    cum_sum[0] = row_sum[0]

    for i, sum in enumerate(row_sum[1:]):
        cum_sum[i+1] = cum_sum[i] + sum

    blank_rows = []
    for i, sum in enumerate(cum_sum):
        if is_blank(cum_sum, i):
            blank_rows.append(i)

    return blank_rows

# check n last rows 
Example 18
Project: OpenCV-Python-Tutorial   Author: makelove   File: coherence.py    License: MIT License 6 votes vote down vote up
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
    h, w = img.shape[:2]

    for i in xrange(iter_n):
        print(i)

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3)
        eigen = eigen.reshape(h, w, 3, 2)  # [[e1, e2], v1, v2]
        x, y = eigen[:,:,1,0], eigen[:,:,1,1]

        gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma)
        gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma)
        gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma)
        gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy
        m = gvv < 0

        ero = cv2.erode(img, None)
        dil = cv2.dilate(img, None)
        img1 = ero
        img1[m] = dil[m]
        img = np.uint8(img*(1.0 - blend) + img1*blend)
    print('done')
    return img 
Example 19
Project: ultra_secret_scripts   Author: CharlesDankoff   File: image_search.py    License: GNU General Public License v3.0 5 votes vote down vote up
def search_image_in_image(small_image, large_image, precision=0.95):
    template = small_image.astype(np.float32)
    img_rgb = large_image.astype(np.float32)

    template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
    img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)

    res = cv2.matchTemplate(img_rgb, template, cv2.TM_CCOEFF_NORMED)
    threshold = precision
    loc = np.where(res >= threshold)

    found_positions = list(zip(*loc[::-1]))

    # print("FOUND: {}".format(found_positions))
    return found_positions 
Example 20
Project: pynvr   Author: JFF-Bohdan   File: motion_detection.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (21, 21), 0)

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        frameDiff = cv.absdiff(gray, self.prevFrame)

        # kernel = np.ones((5, 5), np.uint8)

        opening = cv.morphologyEx(frameDiff, cv.MORPH_OPEN, None)  # noqa
        closing = cv.morphologyEx(frameDiff, cv.MORPH_CLOSE, None)  # noqa

        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        height = np.size(th1, 0)
        width = np.size(th1, 1)

        nb = cv.countNonZero(th1)

        avg = (nb * 100) / (height * width)  # Calculate the average of black pixel in the image

        self.prevFrame = gray

        # cv.DrawContours(currentframe, self.currentcontours, (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED)
        # cv.imshow("frame", current_frame)

        ret = avg > self.threshold   # If over the ceiling trigger the alarm

        if ret:
            self.updateMotionDetectionDts()

        return ret 
Example 21
Project: pynvr   Author: JFF-Bohdan   File: motion_detection.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (11, 11), 0)

        if self.prevPrevFrame is None:
            self.prevPrevFrame = gray
            return False

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        cv.normalize(gray, gray, 0, 255, cv.NORM_MINMAX)

        frameDiff = self.diffImg(self.prevPrevFrame, self.prevFrame, gray)
        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        cv.dilate(th1, None, iterations=15)
        cv.erode(th1, None, iterations=1)

        delta_count = cv.countNonZero(th1)

        cv.imshow("frame_th1", th1)

        self.prevPrevFrame = self.prevFrame
        self.prevFrame = gray

        ret = delta_count > self.threshold

        if ret:
            self.updateMotionDetectionDts()

        return ret 
Example 22
Project: pynvr   Author: JFF-Bohdan   File: motion_detection.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (11, 11), 0)

        if (self.multiFrameDetection) and (self.prevPrevFrame is None):
            self.prevPrevFrame = gray
            return False

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        cv.normalize(gray, gray, 0, 255, cv.NORM_MINMAX)

        frameDiff = self.diffImg(self.prevPrevFrame, self.prevFrame, gray)
        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        th1 = cv.dilate(th1, None, iterations=8)
        th1 = cv.erode(th1, None, iterations=4)

        delta_count = cv.countNonZero(th1)

        if self.multiFrameDetection:
            self.prevPrevFrame = self.prevFrame

        self.prevFrame = gray
        if delta_count < self.threshold:
            return False

        if self.multiFrameDetection:
            self.prevPrevFrame = self.prevFrame

        self.prevFrame = gray
        self.updateMotionDetectionDts()
        return True 
Example 23
Project: display_ocr   Author: arturaugusto   File: digital_display_ocr.py    License: GNU General Public License v2.0 5 votes vote down vote up
def cnvt_edged_image(img_arr, should_save=False):
  # ratio = img_arr.shape[0] / 300.0
  image = imutils.resize(img_arr,height=300)
  gray_image = cv2.bilateralFilter(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY),11, 17, 17)
  edged_image = cv2.Canny(gray_image, 30, 200)

  if should_save:
    cv2.imwrite('cntr_ocr.jpg')

  return edged_image 
Example 24
Project: display_ocr   Author: arturaugusto   File: digital_display_ocr.py    License: GNU General Public License v2.0 5 votes vote down vote up
def process_image(orig_image_arr):
  ratio = orig_image_arr.shape[0] / 300.0

  display_image_arr = normalize_contrs(orig_image_arr,crop_display(orig_image_arr))
  #display image is now segmented.
  gry_disp_arr = cv2.cvtColor(display_image_arr, cv2.COLOR_BGR2GRAY)
  gry_disp_arr = exposure.rescale_intensity(gry_disp_arr, out_range= (0,255))

  #thresholding
  ret, thresh = cv2.threshold(gry_disp_arr,127,255,cv2.THRESH_BINARY)
  return thresh 
Example 25
Project: OpenCV-Computer-Vision-Projects-with-Python   Author: PacktPublishing   File: saliency.py    License: MIT License 5 votes vote down vote up
def calc_magnitude_spectrum(self):
        """Plots the magnitude spectrum

            This method calculates the magnitude spectrum of the image passed
            to the class constructor.

            :returns: magnitude spectrum
        """
        # convert the frame to grayscale if necessary
        if len(self.frame_orig.shape) > 2:
            frame = cv2.cvtColor(self.frame_orig, cv2.COLOR_BGR2GRAY)
        else:
            frame = self.frame_orig

        # expand the image to an optimal size for FFT
        rows, cols = self.frame_orig.shape[:2]
        nrows = cv2.getOptimalDFTSize(rows)
        ncols = cv2.getOptimalDFTSize(cols)
        frame = cv2.copyMakeBorder(frame, 0, ncols-cols, 0, nrows-rows,
                                   cv2.BORDER_CONSTANT, value=0)

        # do FFT and get log-spectrum
        img_dft = np.fft.fft2(frame)
        spectrum = np.log10(np.abs(np.fft.fftshift(img_dft)))

        # return for plotting
        return 255*spectrum/np.max(spectrum) 
Example 26
Project: HardRLWithYoutube   Author: MaxSobolMark   File: train_featurizer.py    License: MIT License 5 votes vote down vote up
def preprocess_image(image, width, height):
    """ Changes size, makes image monochromatic """

    image = cv2.resize(image, (width, height))
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    image = np.array(image, dtype=np.uint8)
    image = np.expand_dims(image, -1)
    return image 
Example 27
Project: python--   Author: Leezhen2014   File: BlurDetection.py    License: GNU General Public License v3.0 5 votes vote down vote up
def preImgOps(self, imgName):
        """
        图像的预处理操作
        :param imgName: 图像的而明朝
        :return: 灰度化和resize之后的图片对象
        """
        strPath = self.strDir + imgName

        img = cv2.imread(strPath)  # 读取图片
        cv2.moveWindow("", 1000, 100)
        # cv2.imshow("原始图", img)
        # 预处理操作
        reImg = cv2.resize(img, (800, 900), interpolation=cv2.INTER_CUBIC)  #
        img2gray = cv2.cvtColor(reImg, cv2.COLOR_BGR2GRAY)  # 将图片压缩为单通道的灰度图
        return img2gray, reImg 
Example 28
Project: dataflow   Author: tensorpack   File: convert.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, keepdims=True, rgb=False, keepshape=False):
        """
        Args:
            keepdims (bool): return image of shape [H, W, 1] instead of [H, W]
            rgb (bool): interpret input as RGB instead of the default BGR
            keepshape (bool): whether to duplicate the gray image into 3 channels
                so the result has the same shape as input.
        """
        mode = cv2.COLOR_RGB2GRAY if rgb else cv2.COLOR_BGR2GRAY
        if keepshape:
            assert keepdims, "keepdims must be True when keepshape==True"
        super(Grayscale, self).__init__(mode, keepdims)
        self.keepshape = keepshape
        self.rgb = rgb 
Example 29
Project: dataflow   Author: tensorpack   File: imgproc.py    License: Apache License 2.0 5 votes vote down vote up
def _augment(self, img, v):
        old_dtype = img.dtype
        m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY
        grey = cv2.cvtColor(img, m)
        ret = img * v + (grey * (1 - v))[:, :, np.newaxis]
        if self.clip or old_dtype == np.uint8:
            ret = np.clip(ret, 0, 255)
        return ret.astype(old_dtype) 
Example 30
Project: ConvolutionalEmotion   Author: Zebreu   File: emotionclassification.py    License: MIT License 5 votes vote down vote up
def getPeakFaceFeatures():
    net = DecafNet()
    cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')

    features = numpy.zeros((number_sequences,feature_length))
    labels = numpy.zeros((number_sequences,1))
    counter = 0
    # Maybe sort them
    for participant in os.listdir(os.path.join(data_dir,image_dir)):
        for sequence in os.listdir(os.path.join(data_dir,image_dir, participant)):
            if sequence != ".DS_Store":
                image_files = sorted(os.listdir(os.path.join(data_dir,image_dir, participant,sequence)))
                image_file = image_files[-1]
                print counter, image_file
                imarray = cv2.imread(os.path.join(data_dir,image_dir, participant,sequence,image_file))
                imarray = cv2.cvtColor(imarray,cv2.COLOR_BGR2GRAY)
                rects = cascade.detectMultiScale(imarray, 1.3, 3, cv2.cv.CV_HAAR_SCALE_IMAGE, (150,150))
                if len(rects) > 0:
                    facerect=rects[0]
                    imarray = imarray[facerect[1]:facerect[1]+facerect[3], facerect[0]:facerect[0]+facerect[2]]
                scores = net.classify(imarray, center_only=True)
                features[counter] = net.feature(feature_level).flatten()
                label_file = open(os.path.join(data_dir,label_dir, participant,sequence,image_file[:-4]+"_emotion.txt"))
                labels[counter] = eval(label_file.read())
                label_file.close()
                counter += 1

    numpy.save("featuresPeakFace5",features)
    numpy.save("labelsPeakFace5",labels)