Python cv2.cvtColor() Examples

The following are code examples for showing how to use cv2.cvtColor(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the ones you don't like. You can also save this page to your account.

Example 1
Project: Speedy-TSLSR   Author: talhaHavadar   File: tslsr.py    (license) View Source Project 21 votes vote down vote up
def __bound_contours(roi):
    """
        returns modified roi(non-destructive) and rectangles that founded by the algorithm.
        @roi region of interest to find contours
        @return (roi, rects)
    """

    roi_copy = roi.copy()
    roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV)
    # filter black color
    mask1 = cv2.inRange(roi_hsv, np.array([0, 0, 0]), np.array([180, 255, 125]))
    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    mask1 = cv2.Canny(mask1, 100, 300)
    mask1 = cv2.GaussianBlur(mask1, (1, 1), 0)
    mask1 = cv2.Canny(mask1, 100, 300)

    # mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))

    # Find contours for detected portion of the image
    im2, cnts, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5] # get largest five contour area
    rects = []
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        x, y, w, h = cv2.boundingRect(approx)
        if h >= 15:
            # if height is enough
            # create rectangle for bounding
            rect = (x, y, w, h)
            rects.append(rect)
            cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 1);

    return (roi_copy, rects) 
Example 2
Project: SelfDrivingCar   Author: aguijarro   File: calibration_camera.py    (license) View Source Project 15 votes vote down vote up
def get_points():

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6*8,3), np.float32)
    objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2)

    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d points in real world space
    imgpoints = [] # 2d points in image plane.

    # Make a list of calibration images
    images = glob.glob('calibration_wide/GO*.jpg')

    # Step through the list and search for chessboard corners
    for idx, fname in enumerate(images):
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chessboard corners
        ret, corners = cv2.findChessboardCorners(gray, (8,6), None)

        # If found, add object points, image points
        if ret == True:
            objpoints.append(objp)
            imgpoints.append(corners)

            # Draw and display the corners
            cv2.drawChessboardCorners(img, (8,6), corners, ret)
            #write_name = 'corners_found'+str(idx)+'.jpg'
            #cv2.imwrite(write_name, img)
            cv2.imshow('img', img)
            cv2.waitKey(500)

    cv2.destroyAllWindows()
    return objpoints, imgpoints 
Example 3
Project: esys-pbi   Author: fsxfreak   File: vis_light_points.py    (license) View Source Project 14 votes vote down vote up
def update(self,frame,events):
        falloff = self.falloff

        img = frame.img
        pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[]) if pt['confidence']>=self.g_pool.min_data_confidence]

        overlay = np.ones(img.shape[:-1],dtype=img.dtype)

        # draw recent gaze postions as black dots on an overlay image.
        for gaze_point in pts:
            try:
                overlay[int(gaze_point[1]),int(gaze_point[0])] = 0
            except:
                pass

        out = cv2.distanceTransform(overlay,cv2.DIST_L2, 5)

        # fix for opencv binding inconsitency
        if type(out)==tuple:
            out = out[0]

        overlay =  1/(out/falloff+1)

        img[:] = np.multiply(img, cv2.cvtColor(overlay,cv2.COLOR_GRAY2RGB), casting="unsafe") 
Example 4
Project: speed   Author: keon   File: preprocess.py    (license) View Source Project 12 votes vote down vote up
def optical_flow(one, two):
    """
    method taken from (https://chatbotslife.com/autonomous-vehicle-speed-estimation-from-dashboard-cam-ca96c24120e4)
    """
    one_g = cv2.cvtColor(one, cv2.COLOR_RGB2GRAY)
    two_g = cv2.cvtColor(two, cv2.COLOR_RGB2GRAY)
    hsv = np.zeros((120, 320, 3))
    # set saturation
    hsv[:,:,1] = cv2.cvtColor(two, cv2.COLOR_RGB2HSV)[:,:,1]
    # obtain dense optical flow paramters
    flow = cv2.calcOpticalFlowFarneback(one_g, two_g, flow=None,
                                        pyr_scale=0.5, levels=1, winsize=15,
                                        iterations=2,
                                        poly_n=5, poly_sigma=1.1, flags=0)
    # convert from cartesian to polar
    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    # hue corresponds to direction
    hsv[:,:,0] = ang * (180/ np.pi / 2)
    # value corresponds to magnitude
    hsv[:,:,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
    # convert HSV to int32's
    hsv = np.asarray(hsv, dtype= np.float32)
    rgb_flow = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
    return rgb_flow 
Example 5
Project: Deep360Pilot-optical-flow   Author: yenchenlin   File: flo2img.py    (license) View Source Project 11 votes vote down vote up
def convert_wrapper(path, outpath, Debug=False):
    for filename in sorted(os.listdir(path)):
        if filename.endswith('.flo'):
            filename = filename.replace('.flo','')

            flow = read_flow(path, filename)
            flow_img = convert_flow(flow, 2.0)

            # NOTE: Change from BGR (OpenCV format) to RGB (Matlab format) to fit Matlab output
            flow_img = cv2.cvtColor(flow_img, cv2.COLOR_BGR2RGB)

            #print "Saving {}.png with shape: {}".format(filename, flow_img.shape)
            cv2.imwrite(outpath + filename + '.png', flow_img)

            if Debug:
                ret = imchecker(outpath + filename)



# Sanity check and comparison if we have matlab version image 
Example 6
Project: Mini-Projects   Author: gaborvecsei   File: Capture_Img_To_Drive.py    (license) View Source Project 11 votes vote down vote up
def CaptureImage():
	imageName = 'DontCare.jpg' #Just a random string
	cap = cv2.VideoCapture(0)
	while(True):
	    # Capture frame-by-frame
	    ret, frame = cap.read()

	    #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
	    rgbImage = frame #For capture the image in RGB color space

	    # Display the resulting frame
	    cv2.imshow('Webcam',rgbImage)
	    #Wait to press 'q' key for capturing
	    if cv2.waitKey(1) & 0xFF == ord('q'):
	        #Set the image name to the date it was captured
	        imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
	        #Save the image
	        cv2.imwrite(imageName, rgbImage)
	        break
	# When everything done, release the capture
	cap.release()
	cv2.destroyAllWindows()
	#Returns the captured image's name
	return imageName 
Example 7
Project: facial_emotion_recognition   Author: adamaulia   File: image_test.py    (license) View Source Project 11 votes vote down vote up
def test_image(addr):
    target = ['angry','disgust','fear','happy','sad','surprise','neutral']
    font = cv2.FONT_HERSHEY_SIMPLEX
    
    im = cv2.imread(addr)
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1)
    
    for (x, y, w, h) in faces:
            cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5)
            face_crop = im[y:y+h,x:x+w]
            face_crop = cv2.resize(face_crop,(48,48))
            face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY)
            face_crop = face_crop.astype('float32')/255
            face_crop = np.asarray(face_crop)
            face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1])
            result = target[np.argmax(model.predict(face_crop))]
            cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA)
            
    cv2.imshow('result', im)
    cv2.imwrite('result.jpg',im)
    cv2.waitKey(0) 
Example 8
Project: pyku   Author: dubvulture   File: sudoku_steps.py    (GNU General Public License v3.0) View Source Project 10 votes vote down vote up
def __init__(self, filename, folder=None, classifier=None):
        """
        :param filename: image with sudoku
        :param folder: folder where to save debug images
        :param classifier: digit classifier
        """
        self.filename = os.path.basename(filename)
        image = cv2.imread(filename)
        self.image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        self.folder = folder or FOLDER
        os.mkdir(os.path.join(self.folder, 'debug/'))
        self.classifier = classifier or DigitClassifier()
        # Default initial values
        self.perspective = False
        self.debug = True
        self.counter = 0
        self.step = -1 
Example 9
Project: Easitter   Author: TomoyaFujita2016   File: byFaceDetection.py    (license) View Source Project 10 votes vote down vote up
def detectFace(image):
    cascadePath = "/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"
    FACE_SHAPE = 0.45
    result = image.copy()
    imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    cascade = cv2.CascadeClassifier(cascadePath)
    faceRect = cascade.detectMultiScale(imageGray, scaleFactor=1.1, minNeighbors=1, minSize=(1,1))
    
    if len(faceRect) <= 0:
        return False
    else:
        # confirm face
        imageSize = image.shape[0] * image.shape[1]
        #print("d1")
        filteredFaceRects = []
        for faceR in faceRect:
            faceSize = faceR[2]*faceR[3]
            if FACE_SHAPE > min(faceR[2], faceR[3])/max(faceR[2], faceR[3]):
                break
            filteredFaceRects.append(faceR)
        
        if len(filteredFaceRects) > 0:
            return True
        else:
            return False 
Example 10
Project: camera_calibration_frontend   Author: groundmelon   File: calibrator.py    (license) View Source Project 9 votes vote down vote up
def _get_corners(img, board, refine = True, checkerboard_flags=0):
    """
    Get corners for a particular chessboard for an image
    """
    h = img.shape[0]
    w = img.shape[1]
    if len(img.shape) == 3 and img.shape[2] == 3:
        mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    else:
        mono = img
    (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH |
                                              cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags)
    if not ok:
        return (ok, corners)

    # If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false
    # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction
    # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras
    BORDER = 8
    if not all([(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) for i in range(corners.shape[0])]):
        ok = False

    if refine and ok:
        # Use a radius of half the minimum distance between corners. This should be large enough to snap to the
        # correct corner, but not so large as to include a wrong corner in the search window.
        min_distance = float("inf")
        for row in range(board.n_rows):
            for col in range(board.n_cols - 1):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0]))
        for row in range(board.n_rows - 1):
            for col in range(board.n_cols):
                index = row*board.n_rows + col
                min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0]))
        radius = int(math.ceil(min_distance * 0.5))
        cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1),
                                      ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 ))

    return (ok, corners) 
Example 11
Project: SelfDrivingCar   Author: aguijarro   File: calibration_camera.py    (license) View Source Project 9 votes vote down vote up
def corners_unwarp(img, nx, ny, undistorted):
    M = None
    warped = np.copy(img)
    # Use the OpenCV undistort() function to remove distortion
    undist = undistorted
    # Convert undistorted image to grayscale
    gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
    # Search for corners in the grayscaled image
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    if ret == True:
        # If we found corners, draw them! (just for fun)
        cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
        # Choose offset from image corners to plot detected corners
        # This should be chosen to present the result at the proper aspect ratio
        # My choice of 100 pixels is not exact, but close enough for our purpose here
        offset = 100 # offset for dst points
        # Grab the image shape
        img_size = (gray.shape[1], gray.shape[0])

        # For source points I'm grabbing the outer four detected corners
        src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
        # For destination points, I'm arbitrarily choosing some points to be
        # a nice fit for displaying our warped result
        # again, not exact, but close enough for our purposes
        dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
                                     [img_size[0]-offset, img_size[1]-offset],
                                     [offset, img_size[1]-offset]])
        # Given src and dst points, calculate the perspective transform matrix
        M = cv2.getPerspectiveTransform(src, dst)
        # Warp the image using OpenCV warpPerspective()
        warped = cv2.warpPerspective(undist, M, img_size)

    # Return the resulting image and matrix
    return warped, M 
Example 12
Project: chainer-cyclegan   Author: Aixile   File: lsun_bedroom_line2color.py    (license) View Source Project 9 votes vote down vote up
def get_example(self, i):
        id = self.all_keys[i]
        img = None
        val = self.db.get(id.encode())

        img = cv2.imdecode(np.fromstring(val, dtype=np.uint8), 1)
        img = self.do_augmentation(img)

        img_color = img
        img_color = self.preprocess_image(img_color)

        img_line = XDoG(img)
        img_line = cv2.cvtColor(img_line, cv2.COLOR_GRAY2RGB)
        #if img_line.ndim == 2:
        #    img_line = img_line[:, :, np.newaxis]
        img_line = self.preprocess_image(img_line)

        return img_line, img_color 
Example 13
Project: specularity-removal   Author: gmichaeljaison   File: main.py    (GNU General Public License v3.0) View Source Project 8 votes vote down vote up
def _resolve_spec(im1, im2):
    im = im1.copy()

    img1 = cv.cvtColor(im1, cv.COLOR_BGR2GRAY)
    img2 = cv.cvtColor(im2, cv.COLOR_BGR2GRAY)

    # Best pixel selection criteria
    #   1. Pixel difference should be more than 20. (just an experimentally value. Free to change!)
    #   2. Best pixel should have less intensity
    #   3. pixel should not be pure black. (just an additional constraint
    #       to remove black background created by warping)
    mask = np.logical_and((img1 - img2) > DIFF_THRESHOLD, img1 > img2)
    mask = np.logical_and(mask, img2 != 0)

    im[mask] = im2[mask]
    return im 
Example 14
Project: hardware_demo   Author: llSourcell   File: mypivideostream.py    (license) View Source Project 8 votes vote down vote up
def update(self):
        # keep looping infinitely until the thread is stopped
        for f in self.stream:
            # grab the frame from the stream and clear the stream in
            # preparation for the next frame
            self.frame = f.array
            self.rawCapture.truncate(0)

            # convert the image to grayscale, load the face cascade detector,
            # and detect faces in the image
            # Using data trained from here:
            #   http://www.pyimagesearch.com/2015/05/11/creating-a-face-detection-api-with-python-and-opencv-in-just-5-minutes/
            image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
            detector = cv2.CascadeClassifier(FACE_DETECTOR_PATH)
            rects = detector.detectMultiScale(image, scaleFactor=1.1, minNeighbors=5,minSize=(30, 30), flags=cv2.cv.CV_HAAR_SCALE_IMAGE)
            
            # construct a list of bounding boxes from the detection
            self.rects = [(int(x), int(y), int(x + w), int(y + h)) for (x, y, w, h) in rects]

            # if the thread indicator variable is set, stop the thread
            # and resource camera resources
            if self.stopped:
                self.stream.close()
                self.rawCapture.close()
                self.camera.close()
                return 
Example 15
Project: FaceDetected   Author: ttchin   File: DetectFaces.py    (license) View Source Project 8 votes vote down vote up
def detect_faces_from_picture(pic_file_path):
    print(">>> Let me check this picture: " + pic_file_path)
    frame = cv2.imread(pic_file_path)

    # Detect faces in the frame
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray_frame, 1.3, 5)
    
    # Match the detected faces with the trained model
    if len(faces) > 0:
        print(">>> Someone is in the picture!")
        for (x, y, w, h) in faces:
            face = frame[y:y+h, x:x+w]
            result = model.predict(face)
            for index, name in model.getTrainCfg():
                if result == index:
                    print(">>> Aha, it's %s!" % name) 
Example 16
Project: pyku   Author: dubvulture   File: sudoku_steps.py    (GNU General Public License v3.0) View Source Project 7 votes vote down vote up
def extract_corners(self, image):
        """
        Find the 4 corners of a binary image
        :param image: binary image
        :return: 4 main vertices or None
        """
        cnts, _ = cv2.findContours(image.copy(),
                                   cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)[-2:]
        cnt = cnts[0]
        _, _, h, w = cv2.boundingRect(cnt)
        epsilon = min(h, w) * 0.5
        o_vertices = cv2.approxPolyDP(cnt, epsilon, True)
        vertices = cv2.convexHull(o_vertices, clockwise=True)
        vertices = self.correct_vertices(vertices)

        if self.debug:
            temp = cv2.cvtColor(image.copy(), cv2.COLOR_GRAY2BGR)
            cv2.drawContours(temp, cnts, -1, (0, 255, 0), 10)
            cv2.drawContours(temp, o_vertices, -1, (255, 0, 0), 30)
            cv2.drawContours(temp, vertices, -1, (0, 0, 255), 20)
            self.save2image(temp)

        return vertices 
Example 17
Project: SudokuSolver   Author: Anve94   File: ImageExtractor.py    (license) View Source Project 7 votes vote down vote up
def apply_filters(self, image, denoise=False):
        """ This method is used to apply required filters to the
            to extracted regions of interest. Every square in a
            sudoku square is considered to be a region of interest,
            since it can potentially contain a value. """
        # Convert to grayscale
        source_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # Denoise the grayscale image if requested in the params
        if denoise:
            denoised_gray = cv2.fastNlMeansDenoising(source_gray, None, 9, 13)
            source_blur = cv2.GaussianBlur(denoised_gray, BLUR_KERNEL_SIZE, 3)
            # source_blur = denoised_gray
        else:
            source_blur = cv2.GaussianBlur(source_gray, (3, 3), 3)
        source_thresh = cv2.adaptiveThreshold(source_blur, 255, 0, 1, 5, 2)
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        source_eroded = cv2.erode(source_thresh, kernel, iterations=1)
        source_dilated = cv2.dilate(source_eroded, kernel, iterations=1)
        if ENABLE_PREVIEW_ALL:
            image_preview(source_dilated)
        return source_dilated 
Example 18
Project: pybot   Author: spillai   File: optflow_utils.py    (license) View Source Project 7 votes vote down vote up
def draw_flow(img, flow, step=16):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
    fx, fy = flow[y,x].T
    m = np.bitwise_and(np.isfinite(fx), np.isfinite(fy))
    lines = np.vstack([x[m], y[m], x[m]+fx[m], y[m]+fy[m]]).T.reshape(-1, 2, 2)
    lines = np.int32(lines + 0.5)
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
    return vis 
Example 19
Project: DmsMsgRcg   Author: bshao001   File: imgreader.py    (Apache License 2.0) View Source Project 7 votes vote down vote up
def get_image_features(self, img_file, stride=5, padding=True):
        """
        Take an image file as input, and output an array of image features whose matrix size is
        based on the image size. When no padding, and the image size is smaller than the required
        feature space size (in x or y direction), the image is not checked, and this method will
        return a tuple of two empty lists; When padding is True, and the image size is more than
        4 pixels smaller than the require feature space size (in x or y direction), the image is
        not checked either. This method can be used by both the trainer and predictor.
        Args:
            img_file: The file name of the image.
            stride: Optional. The stride of the sliding.
            padding: Optional. Whether to pad the image to fit the feature space size or to
                discard the extra pixels if padding is False.
        Returns:
            coordinates: A list of coordinates, each of which contains y and x that are the top
                left corner offsets of the sliding window.
            features: A matrix (python list), in which each row contains the features of the
                sampling sliding window, while the number of rows depends on the image size of
                the input.
        """
        img = cv2.imread(img_file)
        img_arr = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        return self.get_image_array_features(img_arr, stride, padding) 
Example 20
Project: pokedex-as-it-should-be   Author: leotok   File: preprocess.py    (license) View Source Project 7 votes vote down vote up
def extract_color_histogram(image, bins=(8, 8, 8)):
    # extract a 3D color histogram from the HSV color space using
    # the supplied number of `bins` per channel
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    hist = cv2.calcHist([hsv], [0, 1, 2], None, bins, [0, 180, 0, 256, 0, 256])

    # handle normalizing the histogram if we are using OpenCV 2.4.X
    if imutils.is_cv2():
        hist = cv2.normalize(hist)

    # otherwise, perform "in place" normalization in OpenCV 3 (I
    # personally hate the way this is done
    else:
        cv2.normalize(hist, hist)

    # return the flattened histogram as the feature vector
    return hist.flatten() 
Example 21
Project: yonkoma2data   Author: esuji5   File: cut.py    (license) View Source Project 7 votes vote down vote up
def homography(self, img, outdir_name=''):
        orig = img
        # 2??????
        gray = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)
        gauss = cv2.GaussianBlur(gray, (5, 5), 0)
        canny = cv2.Canny(gauss, 50, 150)

        # 2??????????
        contours = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[1]
        # ???????????
        contours.sort(key=cv2.contourArea, reverse=True)

        if len(contours) > 0:
            arclen = cv2.arcLength(contours[0], True)
            # ???????????
            approx = cv2.approxPolyDP(contours[0], 0.01 * arclen, True)
            # warp = approx.copy()
            if len(approx) >= 4:
                self.last_approx = approx.copy()
            elif self.last_approx is not None:
                approx = self.last_approx
        else:
            approx = self.last_approx
        rect = self.get_rect_by_points(approx)
        # warped = self.transform_by4(orig, warp[:, 0, :])
        return orig[rect[0]:rect[1], rect[2]:rect[3]] 
Example 22
Project: reconstruction   Author: microelly2   File: CV.py    (license) View Source Project 7 votes vote down vote up
def animpingpong(self):
		print self
		print self.Object
		print self.Object.Name
		obj=self.Object
		img = cv2.imread(obj.imageFile)
		gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
		gray = np.float32(gray)
		dst = cv2.cornerHarris(gray,3,3,0.00001)
		dst = cv2.dilate(dst,None)
		img[dst>0.01*dst.max()]=[0,0,255]

		from matplotlib import pyplot as plt
		plt.subplot(121),plt.imshow(img,cmap = 'gray')
		plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
		plt.subplot(122),plt.imshow(dst,cmap = 'gray')
		plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
		plt.show() 
Example 23
Project: RunescapeBots   Author: lukegarbutt   File: hsv-tuner.py    (license) View Source Project 6 votes vote down vote up
def print_img_array(self):
        img = self.take_screenshot('array')
        #converts image to HSV 
        img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        # gets the values from the sliders
        low_hue = self.low_hue.get()
        low_sat = self.low_sat.get()
        low_val = self.low_val.get()
        # gets upper values from sliders
        high_hue = self.high_hue.get()
        high_sat = self.high_sat.get()
        high_val = self.high_val.get()
        lower_color = np.array([low_hue,low_sat,low_val]) 
        upper_color= np.array([high_hue,high_sat,high_val])
        #creates the mask and result
        mask = cv2.inRange(self.hsv_image, lower_color, upper_color)
        mask = np.array(mask)
        mask.view


# Instance of Tkinter 
Example 24
Project: robik   Author: RecunchoMaker   File: scanner.py    (GNU General Public License v2.0) View Source Project 6 votes vote down vote up
def get_color_medio(self, roi, a,b,imprimir = False):
        xl,yl,ch = roi.shape
        roiyuv = cv2.cvtColor(roi,cv2.COLOR_RGB2YUV)
        roihsv = cv2.cvtColor(roi,cv2.COLOR_RGB2HSV)
        h,s,v=cv2.split(roihsv)
        mask=(h<5)
        h[mask]=200
        
        roihsv = cv2.merge((h,s,v))
        std = np.std(roiyuv.reshape(xl*yl,3),axis=0)
        media = np.mean(roihsv.reshape(xl*yl,3), axis=0)-60
        mediayuv = np.mean(roiyuv.reshape(xl*yl,3), axis=0)

        if std[0]<12 and std[1]<12 and std[2]<12:
        #if (std[0]<15 and std[2]<15) or ((media[0]>100 or media[0]<25) and (std[0]>10)):
            media = np.mean(roihsv.reshape(xl*yl,3), axis=0)
            # el amarillo tiene 65 de saturacion y sobre 200
            if media[1]<60: #and (abs(media[0]-30)>10):
                # blanco
                return [-10,0,0]
            else:
                return media
        else:
            return None 
Example 25
Project: robik   Author: RecunchoMaker   File: scanner.py    (GNU General Public License v2.0) View Source Project 6 votes vote down vote up
def get_frame(self):

        ret,frame = self.cap.read(self.camera_id)
        self.frame = cv2.resize(frame,None,fx=self.img_zoomx, fy=self.img_zoomy, \
                interpolation = cv2.INTER_AREA)

        self.frame = cv2.blur(self.frame, (3,3))
        self.hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
        self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)

        self.colors = []
        if self.escaneando:
            self.draw_osd(self.frame)

        return self.frame 
Example 26
Project: yolo_tensorflow   Author: hizhangp   File: test.py    (MIT License) View Source Project 6 votes vote down vote up
def detect(self, img):
        img_h, img_w, _ = img.shape
        inputs = cv2.resize(img, (self.image_size, self.image_size))
        inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
        inputs = (inputs / 255.0) * 2.0 - 1.0
        inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))

        result = self.detect_from_cvmat(inputs)[0]

        for i in range(len(result)):
            result[i][1] *= (1.0 * img_w / self.image_size)
            result[i][2] *= (1.0 * img_h / self.image_size)
            result[i][3] *= (1.0 * img_w / self.image_size)
            result[i][4] *= (1.0 * img_h / self.image_size)

        return result 
Example 27
Project: guided-filter   Author: lisabug   File: main.py    (MIT License) View Source Project 6 votes vote down vote up
def test_color():
    image = cv2.imread('data/Lenna.png')
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    noise = (np.random.rand(image.shape[0], image.shape[1], 3) - 0.5) * 50
    image_noise = image + noise

    radius = [1, 2, 4]
    eps = [0.005]

    combs = list(itertools.product(radius, eps))

    vis.plot_single(to_32F(image), title='origin')
    vis.plot_single(to_32F(image_noise), title='noise')

    for r, e in combs:
        GF = GuidedFilter(image, radius=r, eps=e)
        vis.plot_single(to_32F(GF.filter(image_noise)), title='r=%d, eps=%.3f' % (r, e)) 
Example 28
Project: pybot   Author: spillai   File: color_utils.py    (license) View Source Project 6 votes vote down vote up
def colormap(im, min_threshold=0.01):
    mask = im<min_threshold
    if im.ndim == 1: 
        print im
        hsv = np.zeros((len(im), 3), dtype=np.uint8)
        hsv[:,0] = (im * 180).astype(np.uint8)
        hsv[:,1] = 255
        hsv[:,2] = 255
        bgr = cv2.cvtColor(hsv.reshape(-1,1,3), cv2.COLOR_HSV2BGR).reshape(-1,3)
        bgr[mask] = 0
    else: 
        hsv = np.zeros((im.shape[0], im.shape[1], 3), np.uint8)
        hsv[...,0] = (im * 180).astype(np.uint8)
        hsv[...,1] = 255
        hsv[...,2] = 255
        bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        bgr[mask] = 0
    return bgr 
Example 29
Project: Millennium-Eye   Author: Elysium1937   File: Falafel.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def brightnessFiltering(img):
    #this function filters out the darker pixels
    hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
    lower_bright = np.array([0,15,220])
    #0,0,220
    upper_bright = np.array([150,150,255])
    #110,5,255
    """cv2.imshow("imago", hsv)
    cv2.waitKey()"""
    mask = cv2.inRange(hsv, lower_bright, upper_bright)
    """cv2.imshow("imagiu", mask)
    cv2.waitKey()"""
    return mask 
Example 30
Project: Millennium-Eye   Author: Elysium1937   File: Falafel Vision Processing.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def brightnessFiltering(img):
    #this function filters out the darker pixels
    hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
    lower_bright = numpy.array([0,15,220])
    #0,0,220
    upper_bright = numpy.array([150,150,255])
    #110,5,255
    mask = cv2.inRange(hsv, lower_bright, upper_bright)
    return mask 
Example 31
Project: Tensormodels   Author: asheshjain399   File: image_color_augment.py    (MIT License) View Source Project 6 votes vote down vote up
def random_saturation(img, label, lower=0.5, upper=1.5):
    """
    Multiplies saturation with a constant and clips the value between [0,1.0]
    Args:
        img: input image in float32
        label: returns label unchanged
        lower: lower val for sampling
        upper: upper val for sampling
    """
    alpha = lower + (upper - lower) * rand.rand()
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    # saturation should always be within [0,1.0]
    hsv[:, :, 1] = np.clip(alpha * hsv[:, :, 1], 0.0, 1.0)

    return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), label 
Example 32
Project: Tensormodels   Author: asheshjain399   File: image_color_augment.py    (MIT License) View Source Project 6 votes vote down vote up
def random_hue(img, label, max_delta=10):
    """
    Rotates the hue channel
    Args:
        img: input image in float32
        max_delta: Max number of degrees to rotate the hue channel
    """
    # Rotates the hue channel by delta degrees
    delta = -max_delta + 2.0 * max_delta * rand.rand()
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    hchannel = hsv[:, :, 0]
    hchannel = delta + hchannel

    # hue should always be within [0,360]
    idx = np.where(hchannel > 360)
    hchannel[idx] = hchannel[idx] - 360
    idx = np.where(hchannel < 0)
    hchannel[idx] = hchannel[idx] + 360

    hsv[:, :, 0] = hchannel
    return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), label 
Example 33
Project: chainer-gan-experiments   Author: Aixile   File: datasets_base.py    (MIT License) View Source Project 6 votes vote down vote up
def do_random_brightness(self, img):
        if np.random.rand() > 0.7:
            return img
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int)
        hsv[:,:,2] += np.random.randint(-40,70)
        hsv = np.clip(hsv, 0, 255).astype(np.uint8)
        img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        return img 
Example 34
Project: OCV_Vehicles_Features   Author: dan-masek   File: cars.py    (BSD 3-Clause "New" or "Revised" License) View Source Project 6 votes vote down vote up
def process_frame(frame_number, frame, keypoint_data, detector, matcher):
    log = logging.getLogger("process_frame")

    # Create a copy of source frame to draw into
    processed = frame.copy()

    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    kp, des = detector.detectAndCompute(frame, None)
    
    # Match descriptors
    matches = matcher.match(keypoint_data.descriptors, des)
    
    # Sort them in order of distance
    matches = sorted(matches, key = lambda x:x.distance)
    
    processed = drawMatches(cv2.imread('car.png',0), keypoint_data.keypoints, gray_frame, kp, matches[:])
    
    return processed
    
# ============================================================================ 
Example 35
Project: Vision2016   Author: Team3309   File: goal_test.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def check_image(name):
    expected_data = json.loads(open('./img/' + name + '.json').read())
    if not expected_data['enabled']:
        return

    expected_targets = expected_data['targets']

    img = cv2.imread('./img/' + name + '.jpg', cv2.IMREAD_COLOR)
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    args = config.copy()
    args['img'] = hsv
    args['output_images'] = {}

    actual_targets = find(**args)

    # make sure same number of targets are detected
    assert len(expected_targets) == len(actual_targets)

    # targets is a list of 2-tuples with expected and actual results
    targets = zip(expected_targets, actual_targets)
    # compare all the different features of targets to make sure they match
    for pair in targets:
        expected, actual = pair
        # make sure that the targets are close to where they are supposed to be
        assert is_close(expected['pos']['x'], actual['pos']['x'], 0.02)
        assert is_close(expected['pos']['y'], actual['pos']['y'], 0.02)
        # make sure that the targets are close to the size they are supposed to be
        assert is_close(expected['size']['width'], actual['size']['width'], 0.02)
        assert is_close(expected['size']['height'], actual['size']['height'], 0.02) 
Example 36
Project: Vision2016   Author: Team3309   File: server.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def handle_image(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    new_data_condition.acquire()
    state['img'] = hsv
    args = config['target'].copy()
    args['img'] = hsv
    args['draw_output'] = state['draw_output']
    args['output_images'] = {}

    targets = vision.find(**args)
    state['targets'] = targets
    state['output_images'] = args['output_images']
    new_data_condition.notify_all()
    new_data_condition.release()

    fps, processing_time = update_fps()
    state['fps'] = round(fps, 1)
    print 'Processed in', processing_time, 'ms, max fps =', round(fps_smoothed, 1) 
Example 37
Project: STS-PiLot   Author: mark-orion   File: camera_cv.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def _thread(cls):
        # frame grabber loop
        while cfg.camera_active:
            sbuffer = StringIO.StringIO()
            camtest = False
            while camtest == False:
                camtest, rawimg = cfg.camera.read()
            if cfg.cv_hflip:
                rawimg = cv2.flip(rawimg, 1)
            if cfg.cv_vflip:
                rawimg = cv2.flip(rawimg, 0)
            imgRGB=cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
            img = Image.fromarray(imgRGB)
            img.save(sbuffer, 'JPEG')
            cls.frame = sbuffer.getvalue()
            # if there hasn't been any clients asking for frames in
            # the last 10 seconds stop the thread
            if time.time() - cls.last_access > 10:
                break 
Example 38
Project: structured-output-ae   Author: sbelharbi   File: facedataset.py    (GNU Lesser General Public License v3.0) View Source Project 6 votes vote down vote up
def plot_over_img(self, img, x, y, x_pr, y_pr, bb_gt):
        """Plot the landmarks over the image with the bbox."""
        plt.close("all")
        fig = plt.figure(frameon=False)  # , figsize=(15, 10.8), dpi=200
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.set_axis_off()
        ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), aspect="auto")
        ax.scatter(x, y, s=10, color='r')
        ax.scatter(x_pr, y_pr, s=10, color='g')
        rect = patches.Rectangle(
            (bb_gt[0], bb_gt[1]), bb_gt[2]-bb_gt[0], bb_gt[3]-bb_gt[1],
            linewidth=1, edgecolor='b', facecolor='none')
        ax.add_patch(rect)
        fig.add_axes(ax)

        return fig 
Example 39
Project: onionstack   Author: ntddk   File: onionstack.py    (license) View Source Project 6 votes vote down vote up
def repaint_skin(filename):
    import cv2
    shutil.copy(filename, filename + '.bak')
    frame = cv2.imread(filename)
    HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    l = np.array([0, 50, 80], dtype = "uint8")
    u = np.array([23, 255, 255], dtype = "uint8")
    skin_area = cv2.inRange(HSV, l, u)
    not_skin_area = cv2.bitwise_not(frame, frame, mask = skin_area)
    cv2.imwrite(filename, not_skin_area) 
Example 40
Project: ab2016-ros-gazebo   Author: akademikbilisim   File: robot.py    (license) View Source Project 6 votes vote down vote up
def camera_callback(self, msg):
        try:
            self.camera_data = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8")
        except cv_bridge.CvBridgeError:
            return

        gray = cv2.cvtColor(self.camera_data, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(gray, (5, 5), 0)
        canny = cv2.Canny(blur, 30, 150)

        cv2.imshow("Robot Camera", canny)
        cv2.waitKey(1) 
Example 41
Project: PixivAvatarBot   Author: kophy   File: avatar.py    (license) View Source Project 6 votes vote down vote up
def generate_avatar(dir, filename):
    """
    ????????????dir/avatar_filename
    :return: ?????????bool?
    """
    pil_image = numpy.array(Image.open(os.path.join(dir, filename)));
    image = None;
    try:
        image = cv2.cvtColor(numpy.array(pil_image), cv2.COLOR_RGB2BGR);
    except:
        image = numpy.array(pil_image);
    avatar = crop_avatar(image);
    if avatar is None:
        return False;
    else:
        cv2.imwrite(os.path.join(dir, "avatar_" + filename), avatar);
        return True; 
Example 42
Project: watermark   Author: lishuaijuly   File: blind_watermark.py    (license) View Source Project 6 votes vote down vote up
def embed(self,ori_img, wm, key=10):
        B =  ori_img
        if len(ori_img.shape ) > 2 :
            img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2YUV)
            signature = BlindWatermark._gene_signature(wm,256,key).flatten()
            B= img[:,:,0]
        
        w,h = B.shape[:2]
        if w< 64 or h <64 :
            print('????????????? 64 pixel.?????????.')
            return ori_img
        
        if len(ori_img.shape ) > 2 :
            img[:,:,0] = self.inner_embed(B,signature)  
            ori_img = cv2.cvtColor(img, cv2.COLOR_YUV2BGR)
        else :
            ori_img = B
        return ori_img 
Example 43
Project: traffic_detection_yolo2   Author: wAuner   File: process_predictions.py    (license) View Source Project 6 votes vote down vote up
def create_heatmaps(img, pred):
    """
    Uses objectness probability to draw a heatmap on the image and returns it
    """
    # find anchors with highest prediction
    best_pred = np.max(pred[..., 0], axis=-1)
    # convert probabilities to colormap scale
    best_pred = np.uint8(best_pred * 255)
    # apply color map
    # cv2 colormaps create BGR, not RGB
    cmap = cv2.cvtColor(cv2.applyColorMap(best_pred, cv2.COLORMAP_JET), cv2.COLOR_BGR2RGB)
    # resize the color map to fit image
    cmap = cv2.resize(cmap, img.shape[1::-1], interpolation=cv2.INTER_NEAREST)

    # overlay cmap with image
    return cv2.addWeighted(cmap, 1, img, 0.5, 0) 
Example 44
Project: ssd.pytorch   Author: amdegroot   File: augmentations.py    (license) View Source Project 6 votes vote down vote up
def __call__(self, image, boxes=None, labels=None):
        if self.current == 'BGR' and self.transform == 'HSV':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        elif self.current == 'HSV' and self.transform == 'BGR':
            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        else:
            raise NotImplementedError
        return image, boxes, labels 
Example 45
Project: Speedy-TSLSR   Author: talhaHavadar   File: tslsr.py    (license) View Source Project 6 votes vote down vote up
def tslsr(image):
    """
        Takes an image then returns (mask, circles, rois for each circle)
    """
    image_hsv = cv2.cvtColor(cv2.GaussianBlur(image, (7, 7), 0), cv2.COLOR_BGR2HSV)
    mask = __filterRedColor(image_hsv)
    circles = __findCircles(mask)
    rois = []
    if circles is not None:
        circles = np.round(circles[0, :]).astype("int")
        for (x, y, r) in circles:
            rois.append(__extract_sign_roi(image, (x, y, r)))

    return (mask, circles, rois) 
Example 46
Project: YOLO-Object-Detection-Tensorflow   Author: huseinzol05   File: main.py    (license) View Source Project 6 votes vote down vote up
def detect(img):
    img_h, img_w, _ = img.shape
    inputs = cv2.resize(img, (settings.image_size, settings.image_size))
    inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
    inputs = (inputs / 255.0) * 2.0 - 1.0
    inputs = np.reshape(inputs, (1, settings.image_size, settings.image_size, 3))
    result = detect_from_cvmat(inputs)[0]
    print result

    for i in range(len(result)):
        result[i][1] *= (1.0 * img_w / settings.image_size)
        result[i][2] *= (1.0 * img_h / settings.image_size)
        result[i][3] *= (1.0 * img_w / settings.image_size)
        result[i][4] *= (1.0 * img_h / settings.image_size)

    return result 
Example 47
Project: kaggle-dstl-satellite-imagery-feature-detection   Author: u1234x1234   File: b3_data_iter.py    (license) View Source Project 6 votes vote down vote up
def get_data(image_id, a_size, m_size, p_size, sf):
    rgb_data = get_rgb_data(image_id)
    rgb_data = cv2.resize(rgb_data, (p_size*sf, p_size*sf),
                          interpolation=cv2.INTER_LANCZOS4)

#    rgb_data = rgb_data.astype(np.float) / 2500.
#    print(np.max(rgb_data), np.mean(rgb_data))

#    rgb_data[:, :, 0] = exposure.equalize_adapthist(rgb_data[:, :, 0], clip_limit=0.04)
#    rgb_data[:, :, 1] = exposure.equalize_adapthist(rgb_data[:, :, 1], clip_limit=0.04)
#    rgb_data[:, :, 2] = exposure.equalize_adapthist(rgb_data[:, :, 2], clip_limit=0.04)    

    A_data = get_spectral_data(image_id, a_size*sf, a_size*sf, bands=['A'])
    M_data = get_spectral_data(image_id, m_size*sf, m_size*sf, bands=['M'])
    P_data = get_spectral_data(image_id, p_size*sf, p_size*sf, bands=['P'])

#    lab_data = cv2.cvtColor(rgb_data, cv2.COLOR_BGR2LAB)
    P_data = np.concatenate([rgb_data, P_data], axis=2)

    return A_data, M_data, P_data 
Example 48
Project: face_detection   Author: chintak   File: plotting.py    (license) View Source Project 6 votes vote down vote up
def plot_face_bb(p, bb, scale=True, path=True, plot=True):
    if path:
        im = cv2.imread(p)
    else:
        im = cv2.cvtColor(p, cv2.COLOR_RGB2BGR)
    if scale:
        h, w, _ = im.shape
        cv2.rectangle(im, (int(bb[0] * h), int(bb[1] * w)),
                      (int(bb[2] * h), int(bb[3] * w)),
                      (255, 255, 0), thickness=4)
        # print bb * np.asarray([h, w, h, w])
    else:
        cv2.rectangle(im, (int(bb[0]), int(bb[1])), (int(bb[2]), int(bb[3])),
                      (255, 255, 0), thickness=4)
        print "no"
    if plot:
        plt.figure()
        plt.imshow(im[:, :, ::-1])
    else:
        return im[:, :, ::-1] 
Example 49
Project: reconstruction   Author: microelly2   File: CV_opening.py    (license) View Source Project 6 votes vote down vote up
def animpingpong(self):
		obj=self.Object
		img=None
		if not obj.imageFromNode:
			img = cv2.imread(obj.imageFile)
		else:
			print "copy image ..."
			img = obj.imageNode.ViewObject.Proxy.img.copy()
			print "cpied"
		
		print " loaded"
		
		# print (obj.blockSize,obj.ksize,obj.k)
#		edges = cv2.Canny(img,obj.minVal,obj.maxVal)
#		color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
#		edges=color
#

		kernel = np.ones((obj.xsize,obj.ysize),np.uint8)
		
		opening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel, iterations = obj.iterations)


		if True:
			print "zeige"
			cv2.imshow(obj.Label,opening)
			print "gezeigt"
		else:
			from matplotlib import pyplot as plt
			plt.subplot(121),plt.imshow(img,cmap = 'gray')
			plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
			plt.subplot(122),plt.imshow(dst,cmap = 'gray')
			plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
			plt.show()
		print "fertig"
		self.img=opening 
Example 50
Project: reconstruction   Author: microelly2   File: CV_canny.py    (license) View Source Project 6 votes vote down vote up
def animpingpong(self):
		obj=self.Object
		img=None
		if not obj.imageFromNode:
			img = cv2.imread(obj.imageFile)
		else:
			print "copy image ..."
			img = obj.imageNode.ViewObject.Proxy.img.copy()
			print "cpied"
		
		print " loaded"
		
		# print (obj.blockSize,obj.ksize,obj.k)
		edges = cv2.Canny(img,obj.minVal,obj.maxVal)
		color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
		edges=color

		if True:
			print "zeige"
			cv2.imshow(obj.Label,edges)
			print "gezeigt"
		else:
			from matplotlib import pyplot as plt
			plt.subplot(121),plt.imshow(img,cmap = 'gray')
			plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
			plt.subplot(122),plt.imshow(dst,cmap = 'gray')
			plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
			plt.show()
		print "fertig"
		self.img=edges