Python cv2.namedWindow() Examples

The following are 50 code examples for showing how to use cv2.namedWindow(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don't like. You can also save this page to your account.

Example 1
Project: Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials   Author: taochenshh   File: get_extrinsics.py    (license) View Source Project 11 votes vote down vote up
def rgb_callback(self,data):
        try:
        	self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)

        gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY)
        rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (x_num,y_num),None)
        cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL)
        cv2.imshow('rgb_img',self.rgb_img)
        cv2.waitKey(5)
        if rgb_ret == True:
            rgb_tempimg = self.rgb_img.copy()
            cv2.cornerSubPix(gray,rgb_corners,(5,5),(-1,-1),criteria)            
            cv2.drawChessboardCorners(rgb_tempimg, (x_num,y_num), rgb_corners,rgb_ret)
            rgb_rvec, self.rgb_tvec, rgb_inliers = cv2.solvePnPRansac(objpoints, rgb_corners, rgb_mtx, rgb_dist)
            self.rgb_rmat, _ = cv2.Rodrigues(rgb_rvec)
            print("The world coordinate system's origin in camera's coordinate system:")
            print("===rgb_camera rvec:")
            print(rgb_rvec)
            print("===rgb_camera rmat:")
            print(self.rgb_rmat)
            print("===rgb_camera tvec:")
            print(self.rgb_tvec)
            print("rgb_camera_shape: ")
            print(self.rgb_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.rgb_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.rgb_rmat.T, self.rgb_tvec))

            rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "w")
            data = {'rmat':self.rgb_rmat.tolist(), 'tvec':self.rgb_tvec.tolist()}
            yaml.dump(data, rgb_stream)

            
            cv2.imshow('rgb_img',rgb_tempimg)
            cv2.waitKey(5) 
Example 2
Project: camera_calibration_frontend   Author: groundmelon   File: cameracalibrator.py    (license) View Source Project 10 votes vote down vote up
def run(self):
        cv2.namedWindow("display", cv2.WINDOW_NORMAL)
        cv2.setMouseCallback("display", self.opencv_calibration_node.on_mouse)
        cv2.createTrackbar("scale", "display", 0, 100, self.opencv_calibration_node.on_scale)

        if self.extra_queue:
            cv2.namedWindow("extra", cv2.WINDOW_NORMAL)

        while True:
            # wait for an image (could happen at the very beginning when the queue is still empty)
            while len(self.queue) == 0:
                time.sleep(0.1)
            im = self.queue[0]
            cv2.imshow("display", im)
            k = cv2.waitKey(6) & 0xFF
            if k in [27, ord('q')]:
                rospy.signal_shutdown('Quit')
            elif k == ord('s'):
                self.opencv_calibration_node.screendump(im)
            if self.extra_queue:
                if len(self.extra_queue):
                    extra_img = self.extra_queue[0]
                    cv2.imshow("extra", extra_img) 
Example 3
Project: Kinect-ASUS-Xtion-Pro-Live-Calibration-Tutorials   Author: taochenshh   File: get_extrinsics.py    (license) View Source Project 10 votes vote down vote up
def ir_callback(self,data):
    	try:
    		self.ir_img = self.mkgray(data)
        except CvBridgeError as e:
            print(e)

        ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (x_num,y_num))
        cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL)
        cv2.imshow('ir_img',self.ir_img)
        cv2.waitKey(5)
        if ir_ret == True:
            ir_tempimg = self.ir_img.copy()
            cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria)            
            cv2.drawChessboardCorners(ir_tempimg, (x_num,y_num), ir_corners,ir_ret)
            # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP)
            ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(objpoints, ir_corners, depth_mtx, depth_dist)
            self.ir_rmat, _ = cv2.Rodrigues(ir_rvec)

            print("The world coordinate system's origin in camera's coordinate system:")
            print("===ir_camera rvec:")
            print(ir_rvec)
            print("===ir_camera rmat:")
            print(self.ir_rmat)
            print("===ir_camera tvec:")
            print(self.ir_tvec)
            print("ir_camera_shape: ")
            print(self.ir_img.shape)

            print("The camera origin in world coordinate system:")
            print("===camera rmat:")
            print(self.ir_rmat.T)
            print("===camera tvec:")
            print(-np.dot(self.ir_rmat.T, self.ir_tvec))

            depth_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "w")
            data = {'rmat':self.ir_rmat.tolist(), 'tvec':self.ir_tvec.tolist()}
            yaml.dump(data, depth_stream)

            
            cv2.imshow('ir_img',ir_tempimg)
            cv2.waitKey(5) 
Example 4
Project: MultiObjectTracker   Author: alokwhitewolf   File: get_line.py    (license) View Source Project 9 votes vote down vote up
def run(im):
    im_disp = im.copy()
    window_name = "Draw line here."
    cv2.namedWindow(window_name,cv2.WINDOW_AUTOSIZE)
    cv2.moveWindow(window_name, 910, 0)

    print " Drag across the screen to set lines.\n Do it twice"
    print " After drawing the lines press 'r' to resume\n"

    l1 = np.empty((2, 2), np.uint32)
    l2 = np.empty((2, 2), np.uint32)

    list = [l1,l2]

    mouse_down = False
    def callback(event, x, y, flags, param):
        global trigger, mouse_down

        if trigger<2:
            if event == cv2.EVENT_LBUTTONDOWN:
                mouse_down = True
                list[trigger][0] = (x, y)

            if event == cv2.EVENT_LBUTTONUP and mouse_down:
                mouse_down = False
                list[trigger][1] = (x,y)
                cv2.line(im_disp, (list[trigger][0][0], list[trigger][0][1]),
                         (list[trigger][1][0], list[trigger][1][1]), (255, 0, 0), 2)
                trigger += 1
        else:
            pass
    cv2.setMouseCallback(window_name, callback)
    while True:
        cv2.imshow(window_name,im_disp)
        key = cv2.waitKey(10) & 0xFF

        if key == ord('r'):
            # Press key `q` to quit the program
            return list
            exit() 
Example 5
Project: CVMazeRunner   Author: M-Niedoba   File: setup.py    (license) View Source Project 8 votes vote down vote up
def get_start_points(image):
    window = cv2.namedWindow(MAZE_NAME, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(MAZE_NAME, 900,900)
    cv2.imshow(MAZE_NAME,image)
    cv2.moveWindow(MAZE_NAME,100,100)
    print("Please \'A\' to use default start and end points, or press \'S\' to choose your own)")

    while(True):
        key = cv2.waitKey(0)
        if key == ord('a'):
            print("Using Default Start and End Points")
            imageProcessor = ImageProcessor(image)
            start_x,start_y = imageProcessor.getDefaultStart(image)
            end_x, end_y = imageProcessor.getDefaultEnd(image)
            print("Start Point: {0}, End Point: {1}".format((start_x,start_y),(end_x,end_y)))
            break
        elif key == ord ('s'):
            print("Please select a start point")
            start_x,start_y = get_user_selected_point(image)
            print ("Start Point: {0}, please select an end point".format((start_x,start_y)))
            end_x,end_y = get_user_selected_point(image)
            print("End Pont: {0}".format((end_x,end_y)))
            break
        else:
            print("Invalid")
            continue
    cv2.destroyAllWindows()
    return start_x,start_y,end_x,end_y 
Example 6
Project: PyIntroduction   Author: tody411   File: video_capture.py    (license) View Source Project 7 votes vote down vote up
def cvCaptureVideo():
    capture = cv2.VideoCapture(0)

    if capture.isOpened() is False:
        raise("IO Error")

    cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)

    while True:
        ret, image = capture.read()

        if ret == False:
            continue

        cv2.imshow("Capture", image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    capture.release()
    cv2.destroyAllWindows()


# Matplot???Web???????????? 
Example 7
Project: lan-ichat   Author: Forec   File: vchat.py    (license) View Source Project 7 votes vote down vote up
def run(self):
        print("VEDIO server starts...")
        self.sock.bind(self.ADDR)
        self.sock.listen(1)
        conn, addr = self.sock.accept()
        print("remote VEDIO client success connected...")
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
        while True:
            while len(data) < payload_size:
                data += conn.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += conn.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            cv2.imshow('Remote', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break 
Example 8
Project: lan-ichat   Author: Forec   File: vchat.py    (license) View Source Project 6 votes vote down vote up
def run(self):
        print("VEDIO server starts...")
        self.sock.bind(self.ADDR)
        self.sock.listen(1)
        conn, addr = self.sock.accept()
        print("remote VEDIO client success connected...")
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
        while True:
            while len(data) < payload_size:
                data += conn.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += conn.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            cv2.imshow('Remote', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break 
Example 9
Project: cv-lane   Author: kendricktan   File: EyeCanSee.py    (license) View Source Project 6 votes vote down vote up
def get_hsv(self):
        cv2.namedWindow('hsv_extractor')
        while True:
            self.grab_frame()

            # Bottom ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_BOTTOM-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_BOTTOM + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Top ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_TOP-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Object
            cv2.rectangle(self.img_debug, (0, cvsettings.OBJECT_HEIGHT_PADDING), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP - cvsettings.OBJECT_HEIGHT_PADDING), (238, 130, 238), 2)

            self.hsv_frame = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)

            # Mouse handler
            cv2.setMouseCallback('hsv_extractor', self.on_mouse, 0)
            cv2.imshow('hsv_extractor', self.img_debug)

            key = cv2.waitKey(0) & 0xFF
            if key == ord('q'):
                break
        self.stop_camera()
        cv2.destroyAllWindows()

    # Starts camera (needs to be called before run) 
Example 10
Project: ATX   Author: NetEaseGame   File: test_android.py    (license) View Source Project 6 votes vote down vote up
def test_minicap():
    from atx.drivers.android_minicap import AndroidDeviceMinicap

    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    while True:
        try:
            h, w = d._screen.shape[:2]
            img = cv2.resize(d._screen, (w/2, h/2))
            cv2.imshow('preview', img)
            key = cv2.waitKey(1)
            if key == 100: # d for dump
                filename = time.strftime('%Y%m%d%H%M%S.png')
                cv2.imwrite(filename, d._screen)
        except KeyboardInterrupt:
            break
    cv2.destroyWindow('preview') 
Example 11
Project: Stereo-Pose-Machines   Author: ppwwyyxx   File: main.py    (license) View Source Project 6 votes vote down vote up
def dump_2dcoor():
    camera = libcpm.Camera()
    camera.setup()
    runner = get_parallel_runner('../data/cpm.npy')
    cv2.namedWindow('color')
    cv2.startWindowThread()
    cnt = 0
    while True:
        cnt += 1
        m1 = camera.get_for_py(0)
        m1 = np.array(m1, copy=False)
        m2 = camera.get_for_py(1)
        m2 = np.array(m2, copy=False)

        o1, o2 = runner(m1, m2)
        pts = []
        for k in range(14):
            pts.append((argmax_2d(o1[:,:,k]),
                argmax_2d(o2[:,:,k])))
        pts = np.asarray(pts)
        np.save('pts{}.npy'.format(cnt), pts)
        cv2.imwrite("frame{}.png".format(cnt), m1);
        if cnt == 10:
            break 
Example 12
Project: nelpy   Author: nelpy   File: homography.py    (license) View Source Project 6 votes vote down vote up
def pick_corrs(images, n_pts_to_pick=4):
    data = [ [[], 0, False, False, False, image, "Image %d" % i, n_pts_to_pick]
            for i, image in enumerate(images)]

    for d in data:
        win_name = d[6]
        cv2.namedWindow(win_name)
        cv2.setMouseCallback(win_name, corr_picker_callback, d)
        cv2.startWindowThread()
        cv2.imshow(win_name, d[5])

    key = None
    while key != '\n' and key != '\r' and key != 'q':
        key = cv2.waitKey(33)
        key = chr(key & 255) if key >= 0 else None

    cv2.destroyAllWindows()

    if key == 'q':
        return None
    else:
        return [d[0] for d in data] 
Example 13
Project: AutomatorX   Author: xiaoyaojjian   File: test_android.py    (license) View Source Project 6 votes vote down vote up
def test_minicap():
    from atx.drivers.android_minicap import AndroidDeviceMinicap

    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    while True:
        try:
            h, w = d._screen.shape[:2]
            img = cv2.resize(d._screen, (w/2, h/2))
            cv2.imshow('preview', img)
            key = cv2.waitKey(1)
            if key == 100: # d for dump
                filename = time.strftime('%Y%m%d%H%M%S.png')
                cv2.imwrite(filename, d._screen)
        except KeyboardInterrupt:
            break
    cv2.destroyWindow('preview') 
Example 14
Project: AutonomousParking   Author: jovanduy   File: manual_park.py    (license) View Source Project 6 votes vote down vote up
def __init__(self):
        """ Initialize the parking spot recognizer """
        
        #Subscribe to topics of interest
        rospy.Subscriber("/camera/image_raw", Image, self.process_image)
        rospy.Subscriber('/cmd_vel', Twist, self.process_twist)

        # Initialize video images
        cv2.namedWindow('video_window')        
        self.cv_image = None # the latest image from the camera
        self.dst =  np.zeros(IMG_SHAPE, np.uint8)
        self.arc_image = np.zeros(IMG_SHAPE, np.uint8)
        self.transformed = np.zeros(IMG_SHAPE, np.uint8)
        
        # Declare instance variables
        self.bridge = CvBridge() # used to convert ROS messages to OpenCV
        self.hsv_lb = np.array([0, 70, 60]) # hsv lower bound to filter for parking lines
        self.hsv_ub = np.array([30, 255, 140]) # hsv upper bound
        self.vel = None
        self.omega = None
        self.color = (0,0,255) 
Example 15
Project: tbotnav   Author: patilnabhi   File: train_faces2.py    (license) View Source Project 6 votes vote down vote up
def img_callback(self, image):
        try:
            inImg = self.bridge.imgmsg_to_cv2(image, 'bgr8')
        except CvBridgeError, e:
            print e    

        inImgarr = np.array(inImg)
        self.outImg = self.process_image(inImgarr)
        
        # self.train_img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8"))    

        # cv2.namedWindow("Capture Face")
        cv2.imshow('Capture Face', self.outImg)
        cv2.waitKey(3)

        if self.count == 100*self.cp_rate:
            rospy.loginfo("Data Captured!")
            rospy.loginfo("Training Data...")
            self.fisher_train_data()
            rospy.signal_shutdown('done') 
Example 16
Project: tbotnav   Author: patilnabhi   File: get_hand_gestures.py    (license) View Source Project 6 votes vote down vote up
def __init__(self):
        self.node_name = "hand_gestures"
        rospy.init_node(self.node_name)

        rospy.on_shutdown(self.cleanup)

        # self.cv_window_name = self.node_name
        # cv2.namedWindow("Depth Image", 1)
        # cv2.moveWindow("Depth Image", 20, 350)

        self.bridge = CvBridge()
        self.numFingers = RecognizeNumFingers() 

        self.depth_sub = rospy.Subscriber("/asus/depth/image_raw", Image, self.depth_callback)
        self.num_pub = rospy.Publisher('num_fingers', Int32, queue_size=10, latch=True)       
        # self.img_pub = rospy.Publisher('hand_img', Image, queue_size=10)
        rospy.loginfo("Waiting for image topics...") 
Example 17
Project: tbotnav   Author: patilnabhi   File: train_faces.py    (license) View Source Project 6 votes vote down vote up
def img_callback(self, image):
        try:
            inImg = self.bridge.imgmsg_to_cv2(image, 'bgr8')
        except CvBridgeError, e:
            print e    

        inImgarr = np.array(inImg)
        self.outImg = self.process_image(inImgarr)
        
        # self.train_img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8"))    

        # cv2.namedWindow("Capture Face")
        cv2.imshow('Capture Face', self.outImg)
        cv2.waitKey(3)

        if self.count == 100*self.cp_rate:
            rospy.loginfo("Data Captured!")
            rospy.loginfo("Training Data...")
            self.fisher_train_data()
            rospy.signal_shutdown('done') 
Example 18
Project: fathom   Author: rdadolf   File: emulator.py    (license) View Source Project 6 votes vote down vote up
def __init__(self, rom_name, vis,frameskip=1,windowname='preview'):
    self.ale = ALEInterface()
    self.max_frames_per_episode = self.ale.getInt("max_num_frames_per_episode");
    self.ale.setInt("random_seed",123)
    self.ale.setInt("frame_skip",frameskip)
    romfile = str(ROM_PATH)+str(rom_name)
    if not os.path.exists(romfile):
      print 'No ROM file found at "'+romfile+'".\nAdjust ROM_PATH or double-check the filt exists.'
    self.ale.loadROM(romfile)
    self.legal_actions = self.ale.getMinimalActionSet()
    self.action_map = dict()
    self.windowname = windowname
    for i in range(len(self.legal_actions)):
      self.action_map[self.legal_actions[i]] = i

    # print(self.legal_actions)
    self.screen_width,self.screen_height = self.ale.getScreenDims()
    print("width/height: " +str(self.screen_width) + "/" + str(self.screen_height))
    self.vis = vis
    if vis:
      cv2.startWindowThread()
      cv2.namedWindow(self.windowname, flags=cv2.WINDOW_AUTOSIZE) # permit manual resizing 
Example 19
Project: pynephoscope   Author: neXyon   File: frame_difference.py    (license) View Source Project 6 votes vote down vote up
def __init__(self, files):
		if len(files) < 2:
			raise Exception('Need at least two files to compare.')
		
		self.image_window = 'Image'
		self.threshold_window = 'Threshold'
		self.difference_window = 'Difference'
		self.files = files
		self.tb_threshold = 'Threshold'
		self.tb_image = 'Image'
		self.current_image = 0
		
		self.image1 = None
		self.image2 = None
		self.difference = None
		self.threshold = 25
		self.gray = None
		
		cv2.namedWindow(self.image_window, cv2.WINDOW_AUTOSIZE)
		cv2.namedWindow(self.difference_window, cv2.WINDOW_AUTOSIZE)
		cv2.namedWindow(self.threshold_window, cv2.WINDOW_AUTOSIZE)
		cv2.createTrackbar(self.tb_image, self.difference_window, 0, len(self.files) - 2, self.selectImage)
		cv2.createTrackbar(self.tb_threshold, self.threshold_window, self.threshold, 255, self.renderThreshold)
		self.render() 
Example 20
Project: curly-fortnight   Author: sManohar201   File: track.py    (license) View Source Project 6 votes vote down vote up
def __init__(self):
        self.bridge = cv_bridge.CvBridge()
        cv2.namedWindow("input", 1)
        cv2.createTrackbar('param1', 'input', 10, 300, nothing)
        cv2.createTrackbar('param2', 'input', 15, 300, nothing)
        cv2.namedWindow("processed", 1)
        self.image_sb = rospy.Subscriber('/usb_cam/image_raw', Image, self.image_callback)

        self.motion = Twist()

        rate = rospy.Rate(20)

        # publish to cmd_vel of the jackal
        pub = rospy.Publisher("/jackal_velocity_controller/cmd_vel", Twist, queue_size=10)

        while not rospy.is_shutdown():
            # publish Twist
            pub.publish(self.motion)
            rate.sleep() 
Example 21
Project: NGImageProcessor   Author: artzers   File: BasePixelTransfer.py    (license) View Source Project 6 votes vote down vote up
def HistDemo(self):
        img = cv2.imread("lena.jpg")
        # invertImg=self.InvertImage(img)
        # logImg = self.LogImage(img, 10)
        # powerImg = self.PowerImage(img, 2.0)
        eqimg = self.HistEQ(img)
        # gray = baseTransfer.RGB2Gray(img)
        # eqgray = baseTransfer.HistEQ(gray)

        reghist = self.ImhistColor(img, 255)
        dImg, regMap = self.RegulateHist(eqimg, reghist)

        cv2.namedWindow("lena")
        cv2.imshow("lena", img)
        cv2.namedWindow("eq")
        cv2.imshow("eq", eqimg)
        cv2.namedWindow("reg")
        cv2.imshow("reg", dImg)
        cv2.waitKey(0) 
Example 22
Project: NGImageProcessor   Author: artzers   File: BaseImageRestore.py    (license) View Source Project 6 votes vote down vote up
def WienerFilterDemo(self, img):
        img = np.int16(img)
        noise = self.noiseGenerator.GuassNoise(img, 0, 10, np.int32(img.size))
        nImg = img + noise
        fn = np.fft.fftshift(np.fft.fft2(noise))
        Sn = np.abs(fn) ** 2.0
        ffImg = np.fft.fftshift(np.fft.fft2(img))
        Sf = np.abs(ffImg) ** 2.0
        H = self.GenerateHDemo(img.shape)
        fImg = np.fft.fftshift(np.fft.fft2(img))
        gImg = np.fft.ifft2(np.fft.ifftshift(fImg * H))
        gImg += noise
        fgImg = np.fft.fftshift(np.fft.fft2(gImg))
        wH = (H * (np.abs(H) ** 2.0 + Sn / Sf)) / np.abs(H) ** 2.0
        H1 = self.IdeaLowHInverse(wH, 500)
        ggImg = np.fft.ifft2(np.fft.ifftshift(fgImg * H1))
        cv2.namedWindow("orig")
        cv2.imshow("orig", np.uint8(img))
        cv2.namedWindow("g")
        cv2.imshow("g", np.uint8(gImg))
        cv2.namedWindow("Wiener filter restore")
        cv2.imshow("Wiener filter restore", np.uint8(ggImg))
        cv2.waitKey(0) 
Example 23
Project: NGImageProcessor   Author: artzers   File: BaseFFTProcessor.py    (license) View Source Project 6 votes vote down vote up
def FFTDemo(self):
        img = cv2.imread("lena.jpg")
        baseTransfer = BasePixelTransfer.BasePixelTransfer()
        img = baseTransfer.RGB2Gray(img)
        fimg = img.copy()
        fimg = np.float32(fimg)
        # for i in xrange(0, img.shape[0]):
        #     for j in xrange(0, img.shape[1]):
        #         fimg[i, j] *= (-1) ** (i + j)

        fftimg = np.fft.fftshift(np.fft.fft2(fimg))
        ifft = np.real(np.fft.ifft2(fftimg))
        for i in xrange(0, ifft.shape[0]):
            for j in xrange(0, ifft.shape[1]):
                ifft[i, j] *= (-1) ** (i + j)
        cv2.namedWindow("lena")
        cv2.imshow("lena", img)
        cv2.namedWindow("fft")
        cv2.imshow("fft", np.real(fftimg))
        cv2.imshow("ifft", np.uint8(ifft))
        cv2.waitKey(0) 
Example 24
Project: NGImageProcessor   Author: artzers   File: BaseFFTProcessor.py    (license) View Source Project 6 votes vote down vote up
def LaplaceFFTDemo(self):
        origfftimg = self.PrepareFFT()
        fftimg = origfftimg.copy()
        sz = fftimg.shape
        center = np.mat(fftimg.shape) / 2.0
        for i in xrange(0, 512):
            for j in xrange(0, 512):
                #pass
                #print -(np.float64(i - center[0, 0]) ** 2.0 + np.float64(j - center[0, 1]) ** 2.0)
                fftimg[i, j] *= - 0.00001* (np.float64(i - 256) ** 2.0 + np.float64(j - 256) ** 2.0)
        ifft = self.GetIFFT(fftimg)
        #plt.imshow(np.real(fftimg))
        #plt.show()
        # cv2.namedWindow("fft1")
        # cv2.imshow("fft1", np.real(origfftimg))
        cv2.namedWindow("fft")
        cv2.imshow("fft", np.real(fftimg))
        # cv2.imshow("ifft", np.uint8(ifft))
        cv2.namedWindow("ifft")
        cv2.imshow("ifft", ifft)
        cv2.waitKey(0) 
Example 25
Project: NGImageProcessor   Author: artzers   File: ImageNoiseGenerator.py    (license) View Source Project 6 votes vote down vote up
def GuassNoiseDemo(self, img, miu, theta, noiseNum):
        sz = img.shape
        noiseImg = self.GuassNoise(img, miu, theta, noiseNum)
        gImg = img + noiseImg
        #pdf = self.GuassianPDF(miu, theta)
        # for i in xrange(0, noiseNum):
        #     x = np.random.random() * (sz[0] - 1)
        #     y = np.random.random() * (sz[1] - 1)
        #     noise = self.PDFMap(pdf, np.random.random())
        #     print noise
        #     noiseImg[x, y] = noiseImg[x, y] + noise
        cv2.namedWindow("lena")
        cv2.namedWindow("gauss noise")
        cv2.namedWindow("dst")
        cv2.imshow("lena", img)
        cv2.imshow("gauss noise", np.uint8(noiseImg))
        cv2.imshow("dst", np.uint8(gImg))
        cv2.waitKey(0) 
Example 26
Project: ssd_tensorflow   Author: seann999   File: trainer.py    (license) View Source Project 6 votes vote down vote up
def evaluate_images():
    ssd = SSD()

    cv2.namedWindow("outputs", cv2.WINDOW_NORMAL)
    loader = coco.Loader(False)
    test_batches = loader.create_batches(1, shuffle=True)
    global i2name
    i2name = loader.i2name

    while True:
        batch = test_batches.next()
        imgs, anns = loader.preprocess_batch(batch)
        pred_labels_f, pred_locs_f, step = ssd.sess.run([ssd.pred_labels, ssd.pred_locs, ssd.global_step],
                                                        feed_dict={ssd.imgs_ph: imgs, ssd.bn: False})
        boxes_, confidences_ = matcher.format_output(pred_labels_f[0], pred_locs_f[0])
        draw_outputs(imgs[0], boxes_, confidences_, wait=0) 
Example 27
Project: ssd_tensorflow   Author: seann999   File: trainer.py    (license) View Source Project 6 votes vote down vote up
def show_webcam(address):
    cam = webcam.WebcamStream(address)
    cam.start_stream_threads()

    ssd = SSD()

    global i2name
    i2name = pickle.load(open("i2name.p", "rb"))

    cv2.namedWindow("outputs", cv2.WINDOW_NORMAL)

    boxes_ = None
    confidences_ = None

    while True:
        sample = cam.image
        resized_img = skimage.transform.resize(sample, (image_size, image_size))

        pred_labels_f, pred_locs_f = ssd.sess.run([ssd.pred_labels, ssd.pred_locs],
                                                        feed_dict={ssd.imgs_ph: [resized_img], ssd.bn: False})

        boxes_, confidences_ = matcher.format_output(pred_labels_f[0], pred_locs_f[0], boxes_, confidences_)

        resize_boxes(resized_img, sample, boxes_)
        draw_outputs(np.asarray(sample) / 255.0, boxes_, confidences_, wait=10) 
Example 28
Project: Face-Recognition-for-Mobile-Robot   Author: gagolucasm   File: libs.py    (license) View Source Project 6 votes vote down vote up
def gui():
    size=100
    img = np.zeros((1000,700,3), np.uint8)
    cv2.namedWindow('GUI')
    xmar=ymar=50
    for i in range(6):
        for j in range(4):
            img1 = cv2.imread("faces/cara"+str(i+j+1)+".JPEG")
            img1=resize(img1,width = size,height=size)
            if (img1.shape[0] == 100 and img1.shape[1] == 100):
                img[ymar:ymar+size, xmar+(j*(size+xmar)):xmar+(j*(size+xmar)+size)] = img1
            else:
                img[ymar:ymar+img1.shape[0], xmar+(j*(size+xmar)):xmar+(j*(size+xmar)+img1.shape[1])] = img1
        ymar+=150
    cv2.putText(img, "Presiona Q para salir", (5, 25),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
    cv2.putText(img, "TFG Lucas Gago", (500, 925),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
    cv2.putText(img, "Version 3", (500, 950),cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255))
    cv2.imshow('GUI',img) 
Example 29
Project: atari-human-checkpoint-replay   Author: ionelhosu   File: emulator.py    (license) View Source Project 6 votes vote down vote up
def __init__(self, rom_name, vis,windowname='preview'):
		self.ale = ALEInterface()
		self.max_frames_per_episode = self.ale.getInt("max_num_frames_per_episode");
		self.ale.setInt("random_seed",123)
		self.ale.setInt("frame_skip",4)
		self.ale.loadROM('roms/' + rom_name )
		self.legal_actions = self.ale.getMinimalActionSet()
		self.action_map = dict()
		self.windowname = windowname
		for i in range(len(self.legal_actions)):
			self.action_map[self.legal_actions[i]] = i
		self.init_frame_number = 0

		# print(self.legal_actions)
		self.screen_width,self.screen_height = self.ale.getScreenDims()
		print("width/height: " +str(self.screen_width) + "/" + str(self.screen_height))
		self.vis = vis
		if vis: 
			cv2.startWindowThread()
			cv2.namedWindow(self.windowname) 
Example 30
Project: CameraTablet   Author: dmvlasenk   File: save_template_file.py    (license) View Source Project 6 votes vote down vote up
def setFingerTemplate(big_image, name_template_file):
    global add_frame
    name_window = 'big image'
    cv2.namedWindow(name_window)
    cv2.setMouseCallback(name_window,save_corners)
    add_frame = np.zeros(big_image.shape, np.uint8) 
    while(True):
        frame_with_rect = cv2.add(add_frame, big_image)
        cv2.imshow(name_window,frame_with_rect)
        cur_key = cv2.waitKey(1)
        if cur_key == 27:
            break
        if cur_key == ord('s') and (len(corners_x) == 2):
            template_img = big_image[corners_y[0]:corners_y[1], corners_x[0]:corners_x[1]]
            cv2.imwrite(name_template_file,template_img)
            break
    cv2.destroyAllWindows() 
Example 31
Project: CameraTablet   Author: dmvlasenk   File: setCameraPropertiesScript.py    (license) View Source Project 6 votes vote down vote up
def setCameraProperties():
    name_window = 'Press esc after finish'
    cv2.namedWindow(name_window)
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_SETTINGS, 0);
    ret, frame_from = cap.read()
    
    
    while(cap.isOpened()):
        ret, frame_from = cap.read()
        frame = cv2.flip(frame_from, -1)
        if ret==True:
            cv2.imshow(name_window,frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break
        else:
            break
    # Release everything if job is finished
    cap.release()
    cv2.destroyAllWindows() 
Example 32
Project: CameraTablet   Author: dmvlasenk   File: saveTemplateFileScript.py    (license) View Source Project 6 votes vote down vote up
def getFrameFromCamera():
    name_window = 'camera window'
    cv2.namedWindow(name_window)
    cap = cv2.VideoCapture(0)
    ret, frame_from = cap.read()
    output = []
    while(cap.isOpened()):
        ret, frame = cap.read()
        frame  = cv2.flip(frame, -1)
        if ret==True:
            cv2.imshow(name_window,frame)
            cur_key = cv2.waitKey(1) 
            if cur_key == 27:
                break
            if cur_key == ord('s'):
                output = frame
                break
        else:
            break
    # Release everything if job is finished
    cap.release()
    #out.release()
    cv2.destroyAllWindows()
    return output 
Example 33
Project: SmartCap   Author: TusharChugh   File: camera_image.py    (license) View Source Project 6 votes vote down vote up
def run_main():
    # First initialize the camera capture object with the cv2.VideoCapture class.
    camera = cv2.VideoCapture(camera_port)
    
    #To create a new window for display
    #Disabled it for now as we can't see the image on the cap
    #cv2.namedWindow(display, cv2.WINDOW_NORMAL)	

    # Discard the first few frame to adjust the camera light levels
    for i in xrange(ramp_frames):
        temp = get_image(camera)

    #Now take the image and save it after every 1 second
    while(1):
        try:
            time.sleep(1)
            take_save_image(camera)
        except Exception, e:
            print "Exception occured \n"
            print e
            pass 
Example 34
Project: vae-style-transfer   Author: sunsided   File: extract_tiles.py    (license) View Source Project 6 votes vote down vote up
def load_images(queue: PriorityQueue,
                source: int,
                file_path: str,
                target_width: int,
                target_height: int,
                display_progress: bool=False):
    window = 'image'
    if display_progress:
        cv2.namedWindow(window)

    for file in iglob(path.join(file_path, '**', '*.jpg'), recursive=True):
        buffer = cv2.imread(file)
        buffer = cv2.resize(buffer, (target_width, target_height), interpolation=cv2.INTER_AREA)

        random_priority = random()
        queue.put((random_priority, (buffer, source)))

        if display_progress:
            cv2.imshow(window, buffer)
            if (cv2.waitKey(33) & 0xff) == 27:
                break

    if display_progress:
        cv2.destroyWindow(window) 
Example 35
Project: CVMazeRunner   Author: M-Niedoba   File: setup.py    (license) View Source Project 6 votes vote down vote up
def setupWindow():
    filename = getUserSelectedImage()
    imageProcessor = ImageProcessor(cv2.imread(filename,0))
    colourImage = cv2.imread(filename,1)
    image = imageProcessor.getThresholdedImage(False)
    granularity = imageProcessor.get_granularity(image, 100)
    print("Granularity: {0}".format(granularity))
    start_x,start_y,end_x,end_y = get_start_points(image)
    image = imageProcessor.encloseMaze(image)
    mazerunner = MazeSolver.MazeSolver(image,granularity)
    solution = mazerunner.solveMaze(start_x,start_y,end_x,end_y)
    if(not solution):
        cv2.imshow(MAZE_NAME,image)
    else:
        solvedImage = draw_solution(solution, colourImage)
        solvedImage = imageProcessor.mark_point((end_x,end_y),3,(255,0,0),solvedImage)
        solvedImage = imageProcessor.mark_point((start_x,start_y),3,(255,0,0),solvedImage)
        window = cv2.namedWindow("Solved Image", cv2.WINDOW_NORMAL)
        cv2.resizeWindow("Solved Image", 900,900)
        cv2.moveWindow("Solved Image",100,100)
        cv2.imshow("Solved Image",solvedImage)
    print "Press any key to exit"
    cv2.waitKey(0)
    cv2.destroyAllWindows 
Example 36
Project: virtual-dressing-room   Author: akash0x53   File: Tshirt.py    (license) View Source Project 6 votes vote down vote up
def __init__(self):
        self.norm_rgb=np.zeros((600,800,3),np.uint8)
        self.dst=np.zeros((600,800),np.uint8)
        self.b=0
        self.g=0
        self.r=0
        
        self.lb=0
        self.lg=0
        self.lr=0
        
        self.m=np.zeros((600,800),np.uint8)
        #self.win=cv2.namedWindow("detect")
        #self.dst=cv.CreateImage((800,600),8,1)
        
        #cv2.createTrackbar("blue", "detect",0,255,self.change_b)
        #cv2.createTrackbar("green","detect",0,255,self.change_g)
        #cv2.createTrackbar("red","detect",0,255,self.change_r)
        
        #cv2.createTrackbar("low_blue", "detect",0,255,self.change_lb)
        #cv2.createTrackbar("low_green","detect",0,255,self.change_lg)
        #cv2.createTrackbar("low_red","detect",0,255,self.change_lr) 
Example 37
Project: virtual-dressing-room   Author: akash0x53   File: color_replace.py    (license) View Source Project 6 votes vote down vote up
def __init__(self):
        global color
        self.rgb=np.zeros((600,800,3),np.uint8)
        self.mask=np.zeros((600,800),np.uint8)
        self.hue_val=color
        self.scratch=np.zeros((600,800,3),np.uint8)
        
        #cv2.namedWindow("hue")
        #cv2.createTrackbar("hue", "hue",self.hue_val,255,self.change) 
Example 38
Project: moVi   Author: netsecIITK   File: frame.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def __init__(self, name):
        self.frame_name = name
        cv2.namedWindow(name) 
Example 39
Project: pybot   Author: spillai   File: imshow_utils.py    (license) View Source Project 5 votes vote down vote up
def trackbar_create(label, win_name, v, maxv, scale=1.0): 
    global trackbars
    if label in trackbars:     
        raise RuntimeError('Duplicate key. %s already created' % label)
    trackbars[label] = dict(label=label, win_name=win_name, value=v, scale=scale)

    cv2.namedWindow(win_name)
    cv2.createTrackbar(label, win_name, v, maxv, trackbar_update) 
Example 40
Project: pybot   Author: spillai   File: imshow_utils.py    (license) View Source Project 5 votes vote down vote up
def mouse_event_create(win_name, cb): 
    cv2.namedWindow(win_name)
    cv2.setMouseCallback(win_name, cb) 
Example 41
Project: opencv-gui-helper-tool   Author: maunesh   File: guiutils.py    (MIT License) View Source Project 5 votes vote down vote up
def __init__(self, image, filter_size=1, threshold1=0, threshold2=0):
        self.image = image
        self._filter_size = filter_size
        self._threshold1 = threshold1
        self._threshold2 = threshold2

        def onchangeThreshold1(pos):
            self._threshold1 = pos
            self._render()

        def onchangeThreshold2(pos):
            self._threshold2 = pos
            self._render()

        def onchangeFilterSize(pos):
            self._filter_size = pos
            self._filter_size += (self._filter_size + 1) % 2
            self._render()

        cv2.namedWindow('edges')

        cv2.createTrackbar('threshold1', 'edges', self._threshold1, 255, onchangeThreshold1)
        cv2.createTrackbar('threshold2', 'edges', self._threshold2, 255, onchangeThreshold2)
        cv2.createTrackbar('filter_size', 'edges', self._filter_size, 20, onchangeFilterSize)

        self._render()

        print "Adjust the parameters as desired.  Hit any key to close."

        cv2.waitKey(0)

        cv2.destroyWindow('edges')
        cv2.destroyWindow('smoothed') 
Example 42
Project: CE264-Computer_Vision   Author: RobinCPC   File: gesture_hci.py    (license) View Source Project 5 votes vote down vote up
def __init__(self, video_src):
        self.cam = cv2.VideoCapture(video_src)
        ret, self.frame = self.cam.read()
        cv2.namedWindow('gesture_hci')
        
        # set channel range of skin detection 
        self.mask_lower_yrb = np.array([44, 131, 80])       # [54, 131, 110]
        self.mask_upper_yrb = np.array([163, 157, 155])     # [163, 157, 135]
        # create trackbar for skin calibration
        self.calib_switch = False

        # create background subtractor 
        self.fgbg = cv2.BackgroundSubtractorMOG2(history=120, varThreshold=50, bShadowDetection=True)

        # define dynamic ROI area
        self.ROIx, self.ROIy = 200, 200
        self.track_switch = False
        # record previous positions of the centroid of ROI
        self.preCX = None
        self.preCY = None

        # A queue to record last couple gesture command
        self.last_cmds = FixedQueue()
        
        # prepare some data for detecting single-finger gesture  
        self.fin1 = cv2.imread('./test_data/index1.jpg')
        self.fin2 = cv2.imread('./test_data/index2.jpg')
        self.fin3 = cv2.imread('./test_data/index3.jpg')

        # switch to turn on mouse input control
        self.cmd_switch = False
        
        # count loop (frame), for debugging
        self.n_frame = 0


# On-line Calibration for skin detection (bug, not stable) 
Example 43
Project: camera_calibration_frontend   Author: groundmelon   File: tarfile_calibration.py    (license) View Source Project 5 votes vote down vote up
def display(win_name, img):
    cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
    cv2.imshow( win_name,  numpy.asarray( img[:,:] ))
    k=-1
    while k ==-1:
        k=waitkey()
    cv2.destroyWindow(win_name)
    if k in [27, ord('q')]:
        rospy.signal_shutdown('Quit') 
Example 44
Project: camera_calibration_frontend   Author: groundmelon   File: kalibr_camera_focus.py    (license) View Source Project 5 votes vote down vote up
def __init__(self, topic):
        self.topic = topic
        self.windowNameOrig = "Camera: {0}".format(self.topic)
        cv2.namedWindow(self.windowNameOrig, 2)
        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber(self.topic, Image, self.callback) 
Example 45
Project: PyIntroduction   Author: tody411   File: display_image.py    (license) View Source Project 5 votes vote down vote up
def cvShowImageColor(image_file):
    image_bgr = cv2.imread(image_file)
    cv2.namedWindow('image', cv2.WINDOW_NORMAL)
    cv2.imshow('image', image_bgr)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

# OpenCV??????????????? 
Example 46
Project: PyIntroduction   Author: tody411   File: display_image.py    (license) View Source Project 5 votes vote down vote up
def cvShowImageGray(image_file):
    image_gray = cv2.imread(image_file, 0)
    cv2.namedWindow('image', cv2.WINDOW_NORMAL)
    cv2.imshow('image', image_gray)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

# Matplot?????????? 
Example 47
Project: PyIntroduction   Author: tody411   File: drawing.py    (license) View Source Project 5 votes vote down vote up
def drawingDemo():
    img = emptyImage()

    # ??2?????
    drawLine(img, (10, 10), (200, 200), (0, 0, 255), 2)

    # ???-1???????????????
    drawCircle(img, (300, 100), 80, (0, 255, 0), -1)

    # ????????
    drawRectangle(img, (10, 210), (210, 350), (100, 100, 0), -1)
    drawRectangle(img, (10, 210), (210, 350), (255, 0, 0), 3)

    # ?????
    drawElipse(img, (450, 100), (30, 80), 0, 0, 360, (0, 100, 100), -1)

    # ???????
    pts = np.array([[(250, 240), (270, 280), (350, 320), (500, 300), (450, 230), (350, 210)]], dtype=np.int32)
    drawPolylines(img, pts, True, (255, 100, 100), 5)

    # ???????
    drawText(img, 'OpenCV', (20, 450), font_types[0], 4, (200, 200, 200), 2)

    cv2.namedWindow('DrawingDemo', cv2.WINDOW_AUTOSIZE)
    cv2.imshow('DrawingDemo', img)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 48
Project: lan-ichat   Author: Forec   File: vchat.py    (license) View Source Project 5 votes vote down vote up
def run(self):
        print ("VEDIO server starts...")
        while True:
            try:
                self.sock.connect(self.ADDR)
                break
            except:
                time.sleep(3)
                continue
        print ("video server <-> remote server success connected...")
        check = "F"
        check = self.sock.recv(1)
        if check.decode("utf-8") != "S":
            return
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)
        while True:
            while len(data) < payload_size:
                data += self.sock.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += self.sock.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            try:
                cv2.imshow('Remote', frame)
            except:
                pass
            if cv2.waitKey(1) & 0xFF == 27:
                break 
Example 49
Project: lan-ichat   Author: Forec   File: vchat.py    (license) View Source Project 5 votes vote down vote up
def run(self):
        print ("VEDIO client starts...")
        while True:
            try:
                self.sock.connect(self.ADDR)
                break
            except:
                time.sleep(3)
                continue
        print ("video client <-> remote server success connected...")
        check = "F"
        check = self.sock.recv(1)
        if check.decode("utf-8") != "S":
            return
        print ("receive authend")
        #self.cap = cv2.VideoCapture(0)
        self.cap = cv2.VideoCapture("test.mp4")
        if self.showme:
            cv2.namedWindow('You', cv2.WINDOW_NORMAL)
        print ("remote VEDIO client connected...")
        while self.cap.isOpened():
            ret, frame = self.cap.read()
            if self.showme:
                cv2.imshow('You', frame)
                if cv2.waitKey(1) & 0xFF == 27:
                    self.showme = False
                    cv2.destroyWindow('You')
            if self.level > 0:
                frame = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx)
            data = pickle.dumps(frame)
            zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
            try:
                self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
                print("video send ", len(zdata))
            except:
                break
            for i in range(self.interval):
                self.cap.read() 
Example 50
Project: lan-ichat   Author: Forec   File: vchat.py    (license) View Source Project 5 votes vote down vote up
def run(self):
        while True:
            try:
                self.sock.connect(self.ADDR)
                break
            except:
                time.sleep(3)
                continue
        if self.showme:
            cv2.namedWindow('You', cv2.WINDOW_NORMAL)
        print("VEDIO client connected...")
        while self.cap.isOpened():
            ret, frame = self.cap.read()
            if self.showme:
                cv2.imshow('You', frame)
                if cv2.waitKey(1) & 0xFF == 27:
                    self.showme = False
                    cv2.destroyWindow('You')
            sframe = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx)
            data = pickle.dumps(sframe)
            zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
            try:
                self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
            except:
                break
            for i in range(self.interval):
                self.cap.read()