Python cv2.waitKey() Examples

The following are 50 code examples for showing how to use cv2.waitKey(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don't like. You can also save this page to your account.

Example 1
Project: SelfDrivingCar   Author: aguijarro   File: calibration_camera.py    (license) View Source Project 17 votes vote down vote up
def get_points():

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6*8,3), np.float32)
    objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2)

    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d points in real world space
    imgpoints = [] # 2d points in image plane.

    # Make a list of calibration images
    images = glob.glob('calibration_wide/GO*.jpg')

    # Step through the list and search for chessboard corners
    for idx, fname in enumerate(images):
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chessboard corners
        ret, corners = cv2.findChessboardCorners(gray, (8,6), None)

        # If found, add object points, image points
        if ret == True:
            objpoints.append(objp)
            imgpoints.append(corners)

            # Draw and display the corners
            cv2.drawChessboardCorners(img, (8,6), corners, ret)
            #write_name = 'corners_found'+str(idx)+'.jpg'
            #cv2.imwrite(write_name, img)
            cv2.imshow('img', img)
            cv2.waitKey(500)

    cv2.destroyAllWindows()
    return objpoints, imgpoints 
Example 2
Project: CE264-Computer_Vision   Author: RobinCPC   File: gesture_hci.py    (license) View Source Project 12 votes vote down vote up
def skin_calib(self, raw_yrb):
        mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb)
        cal_skin = cv2.bitwise_and(raw_yrb, raw_yrb, mask=mask_skin)
        cv2.imshow('YRB_calib', cal_skin)
        k = cv2.waitKey(5) & 0xFF
        if k == ord('s'):
            self.calib_switch = False
            cv2.destroyWindow('YRB_calib')

        ymin = cv2.getTrackbarPos('Ymin', 'YRB_calib')
        ymax = cv2.getTrackbarPos('Ymax', 'YRB_calib')
        rmin = cv2.getTrackbarPos('CRmin', 'YRB_calib')
        rmax = cv2.getTrackbarPos('CRmax', 'YRB_calib')
        bmin = cv2.getTrackbarPos('CBmin', 'YRB_calib')
        bmax = cv2.getTrackbarPos('CBmax', 'YRB_calib')
        self.mask_lower_yrb = np.array([ymin, rmin, bmin])
        self.mask_upper_yrb = np.array([ymax, rmax, bmax])


# Do skin detection with some filtering 
Example 3
Project: facial_emotion_recognition   Author: adamaulia   File: image_test.py    (license) View Source Project 11 votes vote down vote up
def test_image(addr):
    target = ['angry','disgust','fear','happy','sad','surprise','neutral']
    font = cv2.FONT_HERSHEY_SIMPLEX
    
    im = cv2.imread(addr)
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1)
    
    for (x, y, w, h) in faces:
            cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5)
            face_crop = im[y:y+h,x:x+w]
            face_crop = cv2.resize(face_crop,(48,48))
            face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY)
            face_crop = face_crop.astype('float32')/255
            face_crop = np.asarray(face_crop)
            face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1])
            result = target[np.argmax(model.predict(face_crop))]
            cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA)
            
    cv2.imshow('result', im)
    cv2.imwrite('result.jpg',im)
    cv2.waitKey(0) 
Example 4
Project: Mini-Projects   Author: gaborvecsei   File: Capture_Img_To_Drive.py    (license) View Source Project 10 votes vote down vote up
def CaptureImage():
	imageName = 'DontCare.jpg' #Just a random string
	cap = cv2.VideoCapture(0)
	while(True):
	    # Capture frame-by-frame
	    ret, frame = cap.read()

	    #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
	    rgbImage = frame #For capture the image in RGB color space

	    # Display the resulting frame
	    cv2.imshow('Webcam',rgbImage)
	    #Wait to press 'q' key for capturing
	    if cv2.waitKey(1) & 0xFF == ord('q'):
	        #Set the image name to the date it was captured
	        imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
	        #Save the image
	        cv2.imwrite(imageName, rgbImage)
	        break
	# When everything done, release the capture
	cap.release()
	cv2.destroyAllWindows()
	#Returns the captured image's name
	return imageName 
Example 5
Project: camera_calibration_frontend   Author: groundmelon   File: cameracalibrator.py    (license) View Source Project 10 votes vote down vote up
def run(self):
        cv2.namedWindow("display", cv2.WINDOW_NORMAL)
        cv2.setMouseCallback("display", self.opencv_calibration_node.on_mouse)
        cv2.createTrackbar("scale", "display", 0, 100, self.opencv_calibration_node.on_scale)

        if self.extra_queue:
            cv2.namedWindow("extra", cv2.WINDOW_NORMAL)

        while True:
            # wait for an image (could happen at the very beginning when the queue is still empty)
            while len(self.queue) == 0:
                time.sleep(0.1)
            im = self.queue[0]
            cv2.imshow("display", im)
            k = cv2.waitKey(6) & 0xFF
            if k in [27, ord('q')]:
                rospy.signal_shutdown('Quit')
            elif k == ord('s'):
                self.opencv_calibration_node.screendump(im)
            if self.extra_queue:
                if len(self.extra_queue):
                    extra_img = self.extra_queue[0]
                    cv2.imshow("extra", extra_img) 
Example 6
Project: MultiObjectTracker   Author: alokwhitewolf   File: get_line.py    (license) View Source Project 9 votes vote down vote up
def run(im):
    im_disp = im.copy()
    window_name = "Draw line here."
    cv2.namedWindow(window_name,cv2.WINDOW_AUTOSIZE)
    cv2.moveWindow(window_name, 910, 0)

    print " Drag across the screen to set lines.\n Do it twice"
    print " After drawing the lines press 'r' to resume\n"

    l1 = np.empty((2, 2), np.uint32)
    l2 = np.empty((2, 2), np.uint32)

    list = [l1,l2]

    mouse_down = False
    def callback(event, x, y, flags, param):
        global trigger, mouse_down

        if trigger<2:
            if event == cv2.EVENT_LBUTTONDOWN:
                mouse_down = True
                list[trigger][0] = (x, y)

            if event == cv2.EVENT_LBUTTONUP and mouse_down:
                mouse_down = False
                list[trigger][1] = (x,y)
                cv2.line(im_disp, (list[trigger][0][0], list[trigger][0][1]),
                         (list[trigger][1][0], list[trigger][1][1]), (255, 0, 0), 2)
                trigger += 1
        else:
            pass
    cv2.setMouseCallback(window_name, callback)
    while True:
        cv2.imshow(window_name,im_disp)
        key = cv2.waitKey(10) & 0xFF

        if key == ord('r'):
            # Press key `q` to quit the program
            return list
            exit() 
Example 7
Project: FaceSwap   Author: Aravind-Suresh   File: main.py    (MIT License) View Source Project 7 votes vote down vote up
def videoize(func, args, src = 0, win_name = "Cam", delim_wait = 1, delim_key = 27):
    cap = cv2.VideoCapture(src)
    while(1):
        ret, frame = cap.read()
        # To speed up processing; Almost real-time on my PC
        frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5)
        frame = cv2.flip(frame, 1)
        out = func(frame, args)
        if out is None:
            continue
        out = cv2.resize(out, dsize=None, fx=1.4, fy=1.4)
        cv2.imshow(win_name, out)
        cv2.moveWindow(win_name, (s_w - out.shape[1])/2, (s_h - out.shape[0])/2)
        k = cv2.waitKey(delim_wait)

        if k == delim_key:
            cv2.destroyAllWindows()
            cap.release()
            return 
Example 8
Project: PyIntroduction   Author: tody411   File: video_capture.py    (license) View Source Project 7 votes vote down vote up
def cvCaptureVideo():
    capture = cv2.VideoCapture(0)

    if capture.isOpened() is False:
        raise("IO Error")

    cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)

    while True:
        ret, image = capture.read()

        if ret == False:
            continue

        cv2.imshow("Capture", image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    capture.release()
    cv2.destroyAllWindows()


# Matplot???Web???????????? 
Example 9
Project: lan-ichat   Author: Forec   File: vchat.py    (license) View Source Project 7 votes vote down vote up
def run(self):
        print("VEDIO server starts...")
        self.sock.bind(self.ADDR)
        self.sock.listen(1)
        conn, addr = self.sock.accept()
        print("remote VEDIO client success connected...")
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
        while True:
            while len(data) < payload_size:
                data += conn.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += conn.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            cv2.imshow('Remote', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break 
Example 10
Project: piwall-cvtools   Author: infinnovation   File: interactive_demo_crop.py    (license) View Source Project 7 votes vote down vote up
def on_mouse(event, x, y, flags, params):
    # global img
    t = time()
    
    if event == cv2.EVENT_LBUTTONDOWN:
        print 'Start Mouse Position: '+str(x)+', '+str(y)
        sbox = [x, y]
        boxes.append(sbox)
             # print count
             # print sbox
             
    elif event == cv2.EVENT_LBUTTONUP:
        print 'End Mouse Position: '+str(x)+', '+str(y)
        ebox = [x, y]
        boxes.append(ebox)
        print boxes
        crop = img[boxes[-2][1]:boxes[-1][1],boxes[-2][0]:boxes[-1][0]]

        cv2.imshow('crop',crop)
        k =  cv2.waitKey(0)
        if ord('r')== k:
            cv2.imwrite('Crop'+str(t)+'.jpg',crop)
            print "Written to file" 
Example 11
Project: yolo_tensorflow   Author: hizhangp   File: test.py    (MIT License) View Source Project 6 votes vote down vote up
def camera_detector(self, cap, wait=10):
        detect_timer = Timer()
        ret, _ = cap.read()

        while ret:
            ret, frame = cap.read()
            detect_timer.tic()
            result = self.detect(frame)
            detect_timer.toc()
            print('Average detecting time: {:.3f}s'.format(detect_timer.average_time))

            self.draw_result(frame, result)
            cv2.imshow('Camera', frame)
            cv2.waitKey(wait)

            ret, frame = cap.read() 
Example 12
Project: pybot   Author: spillai   File: uw_rgbd.py    (license) View Source Project 6 votes vote down vote up
def test_uw_rgbd_scene(version='v1'): 
    from pybot.vision.image_utils import to_color
    from pybot.vision.imshow_utils import imshow_cv

    v1_directory = '/media/spillai/MRG-HD1/data/rgbd-scenes-v1/'
    v2_directory = '/media/spillai/MRG-HD1/data/rgbd-scenes-v2/rgbd-scenes-v2/'

    if version == 'v1': 
        rgbd_data_uw = UWRGBDSceneDataset(version='v1', 
                                          directory=os.path.join(v1_directory, 'rgbd-scenes'), 
                                          aligned_directory=os.path.join(v1_directory, 'rgbd-scenes-aligned'))
    elif version == 'v2': 
        rgbd_data_uw = UWRGBDSceneDataset(version='v2', directory=v2_directory)
    else: 
        raise RuntimeError('''Version %s not supported. '''
                           '''Check dataset and choose v1/v2 scene dataset''' % version)

    for f in rgbd_data_uw.iteritems(every_k_frames=5, with_ground_truth=True): 
        vis = rgbd_data_uw.annotate(f)
        imshow_cv('frame', np.hstack([f.img, vis]), text='Image')
        imshow_cv('depth', (f.depth / 16).astype(np.uint8), text='Depth')
        cv2.waitKey(100)

    return rgbd_data_uw 
Example 13
Project: pybot   Author: spillai   File: imshow_utils.py    (license) View Source Project 6 votes vote down vote up
def imshow_cv(label, im, block=False, text=None, wait=2): 
    vis = im.copy()
    print_status(vis, text=text)
    window_manager.imshow(label, vis)
    ch = cv2.waitKey(0 if block else wait) & 0xFF
    if ch == ord(' '):
        cv2.waitKey(0)
    if ch == ord('v'):
        print('Entering debug mode, image callbacks active')
        while True: 
            ch = cv2.waitKey(10) & 0xFF
            if ch == ord('q'): 
                print('Exiting debug mode!')
                break
    if ch == ord('s'):
        fn = 'img-%s.png' % time.strftime("%Y-%m-%d-%H-%M-%S")
        print 'Saving %s' % fn
        cv2.imwrite(fn, vis)
    elif ch == 27 or ch == ord('q'):
        sys.exit(1) 
Example 14
Project: Millennium-Eye   Author: Elysium1937   File: Falafel.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def brightnessFiltering(img):
    #this function filters out the darker pixels
    hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
    lower_bright = np.array([0,15,220])
    #0,0,220
    upper_bright = np.array([150,150,255])
    #110,5,255
    """cv2.imshow("imago", hsv)
    cv2.waitKey()"""
    mask = cv2.inRange(hsv, lower_bright, upper_bright)
    """cv2.imshow("imagiu", mask)
    cv2.waitKey()"""
    return mask 
Example 15
Project: Millennium-Eye   Author: Elysium1937   File: Falafel.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def findCorners(contour):
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, contour, -1, (255, 255, 255))
    rows,cols = img.shape[0], img.shape[1]
    M = cv2.getRotationMatrix2D((cols/2,rows/2),-45,0.5)
    dst = cv2.warpAffine(blank_image,M,(cols,rows))
    cv2.imshow("rotatio", dst)
    cv2.waitKey()"""
    rect = cv2.minAreaRect(contour)
    box = cv2.boxPoints(rect)
    box = np.int0(box)
    height_px_1 = box[0][1] - box[3][1]
    height_px_2 = box[1][1] - box[2][1]
    print height_px_1, height_px_2
    if height_px_1 < height_px_2:
        close_height_px = height_px_2
        far_height_px = height_px_1
    else:
        close_height_px = height_px_1
        far_height_px = height_px_2

    return close_height_px, far_height_px 
Example 16
Project: ab2016-ros-gazebo   Author: akademikbilisim   File: robot.py    (license) View Source Project 6 votes vote down vote up
def camera_callback(self, msg):
        try:
            self.camera_data = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8")
        except cv_bridge.CvBridgeError:
            return

        gray = cv2.cvtColor(self.camera_data, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(gray, (5, 5), 0)
        canny = cv2.Canny(blur, 30, 150)

        cv2.imshow("Robot Camera", canny)
        cv2.waitKey(1) 
Example 17
Project: esys-pbi   Author: fsxfreak   File: square_marker_detect.py    (license) View Source Project 6 votes vote down vote up
def bench(folder):
    from os.path import join
    from video_capture.av_file_capture import File_Capture
    cap = File_Capture(join(folder,'marker-test.mp4'))
    markers = []
    detected_count = 0

    for x in range(500):
        frame = cap.get_frame()
        img = frame.img
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        markers = detect_markers_robust(gray_img,5,prev_markers=markers,true_detect_every_frame=1,visualize=True)

        draw_markers(img, markers)
        cv2.imshow('Detected Markers', img)

        # for m in markers:
        #     if 'img' in m:
        #         cv2.imshow('id %s'%m['id'], m['img'])
        #         cv2.imshow('otsu %s'%m['id'], m['otsu'])
        if cv2.waitKey(1) == 27:
           break
        detected_count += len(markers)
    print(detected_count) #2900 #3042 #3021 
Example 18
Project: Machine-Learning   Author: Jegathis   File: color_quantization.py    (license) View Source Project 6 votes vote down vote up
def color_quant(input,K,output):
    img = cv2.imread(input)
    Z = img.reshape((-1,3))
    # convert to np.float32
    Z = np.float32(Z)
    # define criteria, number of clusters(K) and apply kmeans()
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 15, 1.0)

    ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)

    # Now convert back into uint8, and make original image
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))

    cv2.imshow('res2',res2)
    cv2.waitKey(0)
    cv2.imwrite(output, res2)
    cv2.destroyAllWindows() 
Example 19
Project: reconstruction   Author: microelly2   File: pathfinder.py    (license) View Source Project 6 votes vote down vote up
def findpathlist(ed,showPics=True):
	''' generate list of pathes '''

	pathlist=[]
	w,l=ed.shape

	for x in range(l):
		for y in range(w):
			if ed[y][x] : 
				path=runpath(ed,x,y)
				if len(path)>4:
					if showPics:
						cv2.imshow('remaining points',ed)
						cv2.waitKey(1)
					Gui.updateGui()
				pathlist.append(path)

	return  pathlist 
Example 20
Project: camera_calibration_frontend   Author: groundmelon   File: kalibr_camera_focus.py    (license) View Source Project 6 votes vote down vote up
def callback(self, msg):
        #convert image to opencv
        try:
            cv_image = self.bridge.imgmsg_to_cv2(msg)
            np_image= np.array(cv_image)
        except CvBridgeError, e:
            print "Could not convert ros message to opencv image: ", e
            return
    
        #calculate the fft magnitude
        img_float32 = np.float32(np_image)
        dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
        dft_shift = np.fft.fftshift(dft)
        magnitude_spectrum = cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1])
        
        #normalize
        magnitude_spectrum_normalized = magnitude_spectrum / np.sum(magnitude_spectrum)
        
        #frequency domain entropy (-> Entropy Based Measure of Camera Focus. Matej Kristan, Franjo Pernu. University of Ljubljana. Slovenia)
        fde = np.sum( magnitude_spectrum_normalized * np.log(magnitude_spectrum_normalized) )
    
        y = 20; x = 20
        text = "fde: {0}   (minimize this for focus)".format(np.sum(fde))
        np_image = cv2.cvtColor(np_image, cv2.COLOR_GRAY2BGR)
        cv2.putText(np_image, text, (x,y), cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(0, 0, 255), thickness=2)
        cv2.imshow(self.windowNameOrig, np_image)
        cv2.waitKey(10) 
Example 21
Project: video_labeler   Author: hahnyuan   File: viewer.py    (license) View Source Project 6 votes vote down vote up
def run(self):
        stop = False
        _, im = self.cam.read()
        while 1:
            if not self.video_stat.is_drug and not stop:
                _, im = self.cam.read()
                self.video_stat.update(im)
                self.box_saver.save(im,self.video_stat.frame_id,self.video_stat.get_boxes())
            else:
                if self.video_stat.is_drug:
                    self.video_stat.draw_update(im, self.label_stat.get_label_name())
            chr=cv2.waitKey(1) & 0xFF
            if chr==ord(' '): # press space to stop the frame
                stop= not stop
            if chr==27: # Esc key to exit
                break 
Example 22
Project: lan-ichat   Author: Forec   File: vchat.py    (license) View Source Project 6 votes vote down vote up
def run(self):
        print("VEDIO server starts...")
        self.sock.bind(self.ADDR)
        self.sock.listen(1)
        conn, addr = self.sock.accept()
        print("remote VEDIO client success connected...")
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
        while True:
            while len(data) < payload_size:
                data += conn.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += conn.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            cv2.imshow('Remote', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break 
Example 23
Project: piwall-cvtools   Author: infinnovation   File: piwall.py    (license) View Source Project 6 votes vote down vote up
def hdSolidBlock(fn = "redHDSolidBlock.jpg", bgr = None):
    '''Generate test images as solid blocks of colour of known size, save to filename fn.'''
    # Create a zero (black) image of HD size with 3 colour dimensions.  Colour space assumed BGR by default.
    h = 1080
    w = 1920
    img = np.zeros((h,w,3),dtype="uint8")
    # Want to set all of the pixels to bgr tuple, default red, 8 bit colour
    if not bgr:
        bgr = [0,0,255]
    img[:,:] = bgr
    vw = ImageViewer(img)
    vw.windowShow()
    #cv2.imshow("zeroes", frame)
    #ch = 0xff & cv2.waitKey(10000)
    #cv2.destroyAllWindows()
    cv2.imwrite(fn, img) 
Example 24
Project: piwall-cvtools   Author: infinnovation   File: piwall.py    (license) View Source Project 6 votes vote down vote up
def gimpMarkup(self, hints = gimpContours, image = "2x2-red-1.jpg", feature = "top-left-monitor"):
        r = Rectangle(*hints[image][feature])
        contour = r.asContour()
        cv2.drawContours(self.img, [contour], -1, (0, 255, 0), 5 )
        title = self.tgen.next(feature)
        if self.show: ImageViewer(self.img).show(window=title, destroy = self.destroy, info = self.info, thumbnailfn = title)
        roi = r.getRoi(self.img)
        self.rois[feature] = roi
        # Histogram the ROI to get the spread of intensities, in each channel and grayscale
        title = '%s-roi.jpg' % feature
        if self.show: ImageViewer(roi).show(window=title, destroy = self.destroy, info = self.info, thumbnailfn = title)
        colors = ('b','g','r')
        for i,col in enumerate(colors):
            hist = cv2.calcHist([roi], [i], None, [256], [0,256])
            plt.plot(hist, color = col)
            plt.xlim([0,256])
            #plt.hist(roi.ravel(), 256, [0,256])
        plt.show()
        cmap = ColorMapper(roi)
        cmap.mapit(1)
        title = self.tgen.next('colourMapping')
        if self.show: ImageViewer(self.img).show(window=title, destroy = self.destroy, info = self.info, thumbnailfn = title)
        cv2.waitKey() 
Example 25
Project: piwall-cvtools   Author: infinnovation   File: piwall.py    (license) View Source Project 6 votes vote down vote up
def do_warp(M, warp):
    warp = cv2.warpPerspective(orig, M, (maxWidth, maxHeight))
    # convert the warped image to grayscale and then adjust
    # the intensity of the pixels to have minimum and maximum
    # values of 0 and 255, respectively
    warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
    warp = exposure.rescale_intensity(warp, out_range = (0, 255))
    
    # the pokemon we want to identify will be in the top-right
    # corner of the warped image -- let's crop this region out
    (h, w) = warp.shape
    (dX, dY) = (int(w * 0.4), int(h * 0.45))
    crop = warp[10:dY, w - dX:w - 10]
    
    # save the cropped image to file
    cv2.imwrite("cropped.png", crop)
    
    # show our images
    cv2.imshow("image", image)
    cv2.imshow("edge", edged)
    cv2.imshow("warp", imutils.resize(warp, height = 300))
    cv2.imshow("crop", imutils.resize(crop, height = 300))
    cv2.waitKey(0) 
Example 26
Project: logo-detect   Author: sunbinbin1991   File: train.py    (license) View Source Project 6 votes vote down vote up
def show_cut_img(img_name):
  img = cv2.imread(img_name, 0)

  cut_img = cut(img)

  cv2.imshow('cut image', cut_img)
  cv2.waitKey(0)
  cv2.destroyAllWindows()

  return cut_img








# ??????????????????id??logoDirs???? 
Example 27
Project: tensorflow-yolo   Author: hjimce   File: misc.py    (license) View Source Project 6 votes vote down vote up
def show(im, allobj, S, w, h, cellx, celly):
    for obj in allobj:
        a = obj[5] % S
        b = obj[5] // S
        cx = a + obj[1]
        cy = b + obj[2]
        centerx = cx * cellx
        centery = cy * celly
        ww = obj[3]**2 * w
        hh = obj[4]**2 * h
        cv2.rectangle(im,
            (int(centerx - ww/2), int(centery - hh/2)),
            (int(centerx + ww/2), int(centery + hh/2)),
            (0,0,255), 2)
    cv2.imshow("result", im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 28
Project: cv-lane   Author: kendricktan   File: EyeCanSee.py    (license) View Source Project 6 votes vote down vote up
def get_hsv(self):
        cv2.namedWindow('hsv_extractor')
        while True:
            self.grab_frame()

            # Bottom ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_BOTTOM-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_BOTTOM + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Top ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_TOP-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Object
            cv2.rectangle(self.img_debug, (0, cvsettings.OBJECT_HEIGHT_PADDING), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP - cvsettings.OBJECT_HEIGHT_PADDING), (238, 130, 238), 2)

            self.hsv_frame = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)

            # Mouse handler
            cv2.setMouseCallback('hsv_extractor', self.on_mouse, 0)
            cv2.imshow('hsv_extractor', self.img_debug)

            key = cv2.waitKey(0) & 0xFF
            if key == ord('q'):
                break
        self.stop_camera()
        cv2.destroyAllWindows()

    # Starts camera (needs to be called before run) 
Example 29
Project: ATX   Author: NetEaseGame   File: test_android.py    (license) View Source Project 6 votes vote down vote up
def test_minicap():
    from atx.drivers.android_minicap import AndroidDeviceMinicap

    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    while True:
        try:
            h, w = d._screen.shape[:2]
            img = cv2.resize(d._screen, (w/2, h/2))
            cv2.imshow('preview', img)
            key = cv2.waitKey(1)
            if key == 100: # d for dump
                filename = time.strftime('%Y%m%d%H%M%S.png')
                cv2.imwrite(filename, d._screen)
        except KeyboardInterrupt:
            break
    cv2.destroyWindow('preview') 
Example 30
Project: traffic_video_analysis   Author: polltooh   File: plot_density.py    (license) View Source Project 6 votes vote down vote up
def plot_conf_mat(densmap_name):
    fig = plt.figure(figsize = (20,20))
    plt.clf()
    ax = fig.add_subplot(111)
    #ax.set_aspect(1)
    densmap = np.fromfile(densmap_name, np.float32)
    densmap = densmap.reshape(227, 227)
    densmap *= 100
    densmap[densmap > 1] = 1
    res = ax.imshow(densmap, cmap = plt.cm.jet,
            interpolation = 'nearest')

    plt.savefig('density.jpg')
    img = cv2.imread("density.jpg")
    img = cv2.resize(img, (227,227))
    cv2.imshow("i", img)#
    cv2.waitKey(0)
    #plt.show() 
Example 31
Project: nelpy   Author: nelpy   File: homography.py    (license) View Source Project 6 votes vote down vote up
def pick_corrs(images, n_pts_to_pick=4):
    data = [ [[], 0, False, False, False, image, "Image %d" % i, n_pts_to_pick]
            for i, image in enumerate(images)]

    for d in data:
        win_name = d[6]
        cv2.namedWindow(win_name)
        cv2.setMouseCallback(win_name, corr_picker_callback, d)
        cv2.startWindowThread()
        cv2.imshow(win_name, d[5])

    key = None
    while key != '\n' and key != '\r' and key != 'q':
        key = cv2.waitKey(33)
        key = chr(key & 255) if key >= 0 else None

    cv2.destroyAllWindows()

    if key == 'q':
        return None
    else:
        return [d[0] for d in data] 
Example 32
Project: AutomatorX   Author: xiaoyaojjian   File: test_android.py    (license) View Source Project 6 votes vote down vote up
def test_minicap():
    from atx.drivers.android_minicap import AndroidDeviceMinicap

    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    while True:
        try:
            h, w = d._screen.shape[:2]
            img = cv2.resize(d._screen, (w/2, h/2))
            cv2.imshow('preview', img)
            key = cv2.waitKey(1)
            if key == 100: # d for dump
                filename = time.strftime('%Y%m%d%H%M%S.png')
                cv2.imwrite(filename, d._screen)
        except KeyboardInterrupt:
            break
    cv2.destroyWindow('preview') 
Example 33
Project: AutomatorX   Author: xiaoyaojjian   File: test_monkey.py    (license) View Source Project 6 votes vote down vote up
def test_grid():
    m = StupidMonkey({'touch':10})
    poss = []
    while True:
        pos = m.get_touch_point()
        if not pos:
            break
        poss.append(pos)
    print 'grid point count:', len(poss)

    import cv2
    import numpy
    img = numpy.zeros((1920, 1080))
    for x,y in poss:
        img[x,y] = 255
    img = cv2.resize(img, (540, 960))
    cv2.imshow('grid', img)
    cv2.waitKey() 
Example 34
Project: AutomatorX   Author: xiaoyaojjian   File: monkey.py    (license) View Source Project 6 votes vote down vote up
def main(serial=None, host=None, port=None):
    d = atx.connect(serial, host=host, port=port)
    while True:
        pilimg = d.screenshot()
        cv2img = imutils.from_pillow(pilimg)
        # cv2img = cv2.imread('tmp.png')
        # cv2.imwrite('tmp.png', cv2img)
        cv2img = cv2.resize(cv2img, fx=0.5, fy=0.5, dsize=(0, 0))
        pt = choose_point(cv2img)
        print 'click:', pt
        if pt:
            x, y = pt
            d.click(2*x, 2*y)
        cv2.waitKey(100)
        # import time
        # time.sleep(0.1) 
Example 35
Project: faceNet_RealTime   Author: jack55436001   File: facevalid_real_time.py    (license) View Source Project 6 votes vote down vote up
def main(args):

	saveFace = None;
	cap = cv2.VideoCapture(0)
	face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
	while(True):
	    # Capture frame-by-frame
	    ret, frame = cap.read()
	    faces = face_cascade.detectMultiScale(frame, 1.3, 5)
	    if len(faces) > 0:
	    	saveFace = frame
	    	break;
	    # Display the resulting frame
	    cv2.imshow('frame',frame)
	    if cv2.waitKey(1) & 0xFF == ord('q'):
	        break

	# When everything done, release the capture
	cap.release()
	cv2.destroyAllWindows()
	cv2.imwrite('C:/Users/USER/Desktop/facenet-RealTime/src/face_data/saveFace.jpg',frame)
	
	mypath = 'C:/Users/USER/Desktop/facenet-RealTime/src/face_data'
	onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
	myImage = []
	for file in onlyfiles:
		isImage = None
		file = mypath + '/' + file
		isImage = imghdr.what(file)
		if isImage != None:
			myImage.append(file)

	#begin facenet
	cp.main(args,myImage); 
Example 36
Project: BlurDetection   Author: whdcumt   File: main.py    (license) View Source Project 6 votes vote down vote up
def evaluate(img_col, args):
    numpy.seterr(all='ignore')
    assert isinstance(img_col, numpy.ndarray), 'img_col must be a numpy array'
    assert img_col.ndim == 3, 'img_col must be a color image ({0} dimensions currently)'.format(img_col.ndim)
    assert isinstance(args, argparse.Namespace), 'args must be of type argparse.Namespace not {0}'.format(type(args))
    img_gry = cv2.cvtColor(img_col, cv2.COLOR_RGB2GRAY)
    rows, cols = img_gry.shape
    crow, ccol = rows/2, cols/2
    f = numpy.fft.fft2(img_gry)
    fshift = numpy.fft.fftshift(f)
    fshift[crow-75:crow+75, ccol-75:ccol+75] = 0
    f_ishift = numpy.fft.ifftshift(fshift)
    img_fft = numpy.fft.ifft2(f_ishift)
    img_fft = 20*numpy.log(numpy.abs(img_fft))
    if args.display and not args.testing:
        cv2.destroyAllWindows()
        scripts.display('img_fft', img_fft)
        scripts.display('img_col', img_col)
        cv2.waitKey(0)
    result = numpy.mean(img_fft)
    return img_fft, result, result < args.thresh 
Example 37
Project: AVSR-Deep-Speech   Author: pandeydivesh15   File: image_handler.py    (GNU General Public License v2.0) View Source Project 5 votes vote down vote up
def visualize_image(image, name="Image", resize=False, save_image=False, path=None):
	"""Helper function to visualize and save any image"""
	image = image.reshape([IMAGE_WIDTH, IMAGE_HEIGHT])
	image = image.astype(np.uint8)

	if resize: 
		image = cv2.resize(image, (IMAGE_WIDTH * 10, IMAGE_HEIGHT * 10))

	cv2.imshow(name, image)
	if cv2.waitKey(0) & 0xFF == ord('q'):
		cv2.destroyAllWindows()

	if save_image:
		assert path is not None
		cv2.imwrite(path, image) 
Example 38
Project: SudokuSolver   Author: Anve94   File: helper_functions.py    (license) View Source Project 5 votes vote down vote up
def image_preview(image):
    cv2.imshow('Image preview', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 39
Project: SudokuSolver   Author: Anve94   File: helper_functions.py    (license) View Source Project 5 votes vote down vote up
def display_solution(square_borders, start_grid, solution, image):
    """ Writes the solution to an image and displays said image.
        Params:
            square_borders  -- A list containing the borders of all squares
            start_grid      -- A list containing the sudoku starting values
            solution        -- A list containing the sudoku solution
            image           -- The image to write to """
    cur_row = 0
    cur_col = 0
    for i, b in enumerate(square_borders):
        x, y, x2, y2 = b  # Tuple unpacking
        # Calculate bottom-left position for text
        text_x, text_y = ((x2+x) / 2) - 10, ((y2+y) / 2) + 10
        # Bottom-left corner for text position
        org = (text_x, text_y)
        # Only write text if the position was not set in the start_grid
        if start_grid[cur_row][cur_col] is 0:
            value = str(solution[cur_row][cur_col])
            cv2.putText(
                img=image,
                text=value,
                org=org,
                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=1,
                color=(0, 255, 0),
                thickness=2)
        cur_col += 1
        if cur_col % 9 == 0:
            cur_row += 1
            cur_col = 0

    cv2.imshow('Solution', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 40
Project: RunescapeBots   Author: lukegarbutt   File: debug.py    (license) View Source Project 5 votes vote down vote up
def process_image(image):
	array_image = numpy.array(image)
	array_image = cv2.resize(array_image, (0,0), fx=2, fy=2)
	#cv2.imshow('image', array_image)
	#cv2.waitKey(0)
	image = PIL.Image.fromarray(array_image)
	return(image) 
Example 41
Project: human-rl   Author: gsastry   File: human_feedback.py    (MIT License) View Source Project 5 votes vote down vote up
def online_receive_frame(viewer_state, episode):
    logger.info('Waiting for message...')
    if not viewer_state.conn.poll(1000.0):
        raise EnvironmentError("Failed to receive message!")
    msg = viewer_state.conn.recv()
    if isinstance(msg, frame_module.Frame):
        viewer_state.current_frame = msg
        episode.frames.append(viewer_state.current_frame)
    elif msg['msg'] == 'init':
        viewer_state.prev_frame = msg
        viewer_state.action_set = msg['action_set']
        viewer_state.episode_num_in_session += 1
        print('Initial message received')
        print('... got action_set: {}'.format(viewer_state.action_set))
        print('... episode_num: {}'.format(viewer_state.episode_num_in_session))
        viewer_state.env_id = msg['env_id']
        viewer_state.skip_frame = True
    elif msg['msg'] == 'close':
        viewer_state.conn.close()
        viewer_state.close = True
    elif msg['msg'] == 'done':
        viewer_state.frame_index = 0
        proceed = input('Proceed to next episode?')

        if proceed != 'y':
            viewer_state.EXIT = True

        while True:
            k = cv2.waitKey(viewer_state.delay) & 0xFF
            if k == 255: break
        viewer_state.skip_episode = True
    else:
        print('Unknown message received: {}'.format(msg))
        viewer_state.skip_frame = True

    return viewer_state, episode 
Example 42
Project: ICGan-tensorflow   Author: zhangqianhui   File: Gan.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def test(self):

        init = tf.global_variables_initializer()

        with tf.Session() as sess:

            sess.run(init)

            self.saver_z.restore(sess, self.encode_z_model)
            self.saver_y.restore(sess, self.encode_y_model)

            realbatch_array, _ = MnistData.getNextBatch(self.ds_train, self.label_y, 0, 50,
                                                        self.batch_size)

            output_image , label_y = sess.run([self.fake_images,self.e_y], feed_dict={self.images: realbatch_array})

            #one-hot
            #label_y = tf.arg_max(label_y, 1)

            print label_y

            save_images(output_image , [8 , 8] , './{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0))
            save_images(realbatch_array , [8 , 8] , './{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0))

            gen_img = cv2.imread('./{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0), 0)
            real_img = cv2.imread('./{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0), 0)


            cv2.imshow("test_EGan", gen_img)
            cv2.imshow("Real_Image", real_img)

            cv2.waitKey(-1)

            print("Test finish!") 
Example 43
Project: yolo_tensorflow   Author: hizhangp   File: test.py    (MIT License) View Source Project 5 votes vote down vote up
def image_detector(self, imname, wait=0):
        detect_timer = Timer()
        image = cv2.imread(imname)

        detect_timer.tic()
        result = self.detect(image)
        detect_timer.toc()
        print('Average detecting time: {:.3f}s'.format(detect_timer.average_time))

        self.draw_result(image, result)
        cv2.imshow('Image', image)
        cv2.waitKey(wait) 
Example 44
Project: moVi   Author: netsecIITK   File: webcam.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def getFrame(self):
        ret = False
        count = 0
        while not ret:
            # Capture frame-by-frame
            count = count + 1
            ret, frame = self.cap.read()

            if cv2.waitKey(1) & 0xFF == ord('q'):
                return (False, None)

            if count > 10:
                return (False, None)

        return (True, frame) 
Example 45
Project: moVi   Author: netsecIITK   File: frame.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def showFrame(self, frame):
        # Display the resulting frame
        cv2.imshow(self.frame_name, frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            return False
        else:
            return True 
Example 46
Project: pybot   Author: spillai   File: nyu_rgbd.py    (license) View Source Project 5 votes vote down vote up
def test_nyu_rgbd(version='v2'): 
    from pybot.vision.image_utils import to_color
    from pybot.vision.imshow_utils import imshow_cv

    v2_directory = '/media/spillai/MRG-HD1/data/nyu_rgbd/'
    v2_gt = os.path.join(v2_directory, 'nyu_depth_v2_labeled.mat')

    # if version == 'v1': 
    #     dataset = UWRGBDSceneDataset(version='v1', 
    #                                       directory=os.path.join(v1_directory, 'rgbd-scenes'), 
    #                                       aligned_directory=os.path.join(v1_directory, 'rgbd-scenes-aligned'))
    if version == 'v2': 
        dataset = NYURGBDDataset(version='v2', directory=v2_directory, ground_truth=v2_gt)
    else: 
        raise RuntimeError('''Version %s not supported. '''
                           '''Check dataset and choose v1/v2 scene dataset''' % version)


    for f in dataset.iteritems(every_k_frames=5): 
        # vis = rgbd_data_uw.annotate(f)
        imshow_cv('frame', f.img, text='Image')
        imshow_cv('depth', (f.depth / 16).astype(np.uint8), text='Depth')
        imshow_cv('instance', (f.instance).astype(np.uint8), text='Instance')
        imshow_cv('label', (f.label).astype(np.uint8), text='Label')
        cv2.waitKey(100)

    return dataset 
Example 47
Project: pybot   Author: spillai   File: sun3d_utils.py    (license) View Source Project 5 votes vote down vote up
def test_sun_rgbd(): 
    from pybot.vision.image_utils import to_color
    from pybot.vision.imshow_utils import imshow_cv
    from pybot.utils.io_utils import write_video
    from pybot.vision.color_utils import colormap

    directory = '/media/HD1/data/SUNRGBD/'
    dataset = SUNRGBDDataset(directory)

    colors = cv2.imread('data/sun3d/sun.png').astype(np.uint8)
    for (rgb, depth, label) in dataset.segmentationdb(None): 
        cout = np.dstack([label, label, label])
        colored = cv2.LUT(cout, colors)
        cdepth = colormap(depth / 64000.0)
        for j in range(5): 
            write_video('xtion.avi', np.hstack([rgb, cdepth, colored]))

    # for f in dataset.iteritems(every_k_frames=5): 
    #     # vis = rgbd_data_uw.annotate(f)
    #     imshow_cv('frame', f.img, text='Image')
    #     imshow_cv('depth', (f.depth / 16).astype(np.uint8), text='Depth')
    #     imshow_cv('instance', (f.instance).astype(np.uint8), text='Instance')
    #     imshow_cv('label', (f.label).astype(np.uint8), text='Label')
    #     cv2.waitKey(100)

    return dataset 
Example 48
Project: opencv-gui-helper-tool   Author: maunesh   File: guiutils.py    (MIT License) View Source Project 5 votes vote down vote up
def __init__(self, image, filter_size=1, threshold1=0, threshold2=0):
        self.image = image
        self._filter_size = filter_size
        self._threshold1 = threshold1
        self._threshold2 = threshold2

        def onchangeThreshold1(pos):
            self._threshold1 = pos
            self._render()

        def onchangeThreshold2(pos):
            self._threshold2 = pos
            self._render()

        def onchangeFilterSize(pos):
            self._filter_size = pos
            self._filter_size += (self._filter_size + 1) % 2
            self._render()

        cv2.namedWindow('edges')

        cv2.createTrackbar('threshold1', 'edges', self._threshold1, 255, onchangeThreshold1)
        cv2.createTrackbar('threshold2', 'edges', self._threshold2, 255, onchangeThreshold2)
        cv2.createTrackbar('filter_size', 'edges', self._filter_size, 20, onchangeFilterSize)

        self._render()

        print "Adjust the parameters as desired.  Hit any key to close."

        cv2.waitKey(0)

        cv2.destroyWindow('edges')
        cv2.destroyWindow('smoothed') 
Example 49
Project: LensCalibrator   Author: 1024jp   File: createimage.py    (MIT License) View Source Project 5 votes vote down vote up
def show_image(image, scale=1.0, window_title='Image'):
    """Display given image in a window.

    Arguments:
    image () -- Image to display.
    scale (float) -- Magnification of image.
    window_title (str) -- Title of window.
    """
    scaled_image = scale_image(image, scale)

    cv2.imshow(window_title, scaled_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 50
Project: iNaturalist   Author: phunterlau   File: sub.py    (license) View Source Project 5 votes vote down vote up
def oversample(images, crop_dims):

    im_shape = np.array(images.shape)
    crop_dims = np.array(crop_dims)
    im_center = im_shape[:2] / 2.0

    h_indices = (0, im_shape[0] - crop_dims[0])
    w_indices = (0, im_shape[1] - crop_dims[1])
    crops_ix = np.empty((5, 4), dtype=int)
    curr = 0
    for i in h_indices:
        for j in w_indices:
            crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1])
            curr += 1
    crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([
        -crop_dims / 2.0,
         crop_dims / 2.0
    ])
    crops_ix = np.tile(crops_ix, (2, 1))

    # print crops_ix

    # Extract crops
    crops = np.empty((10, crop_dims[0], crop_dims[1],
                      im_shape[-1]), dtype=np.float32)
    ix = 0
    # for im in images:
    im = images
    # print im.shape
    for crop in crops_ix:
        # print crop
        crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :]
        # cv2.imshow('crop', im[crop[0]:crop[2], crop[1]:crop[3], :])
        # cv2.waitKey()
        ix += 1
    crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :]
    # cv2.imshow('crop', crops[0,:,:,:])
    # cv2.waitKey()
    return crops