Python cv2.startWindowThread() Examples

The following are 20 code examples of cv2.startWindowThread(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: homography.py    From nelpy with MIT License 6 votes vote down vote up
def pick_corrs(images, n_pts_to_pick=4):
    data = [ [[], 0, False, False, False, image, "Image %d" % i, n_pts_to_pick]
            for i, image in enumerate(images)]

    for d in data:
        win_name = d[6]
        cv2.namedWindow(win_name)
        cv2.setMouseCallback(win_name, corr_picker_callback, d)
        cv2.startWindowThread()
        cv2.imshow(win_name, d[5])

    key = None
    while key != '\n' and key != '\r' and key != 'q':
        key = cv2.waitKey(33)
        key = chr(key & 255) if key >= 0 else None

    cv2.destroyAllWindows()

    if key == 'q':
        return None
    else:
        return [d[0] for d in data] 
Example #2
Source File: preview.py    From rtsp with MIT License 6 votes vote down vote up
def preview_stream(stream):
    """ Display stream in an OpenCV window until "q" key is pressed """
    # together with waitkeys later, helps to close the video window effectively
    _cv2.startWindowThread()
    
    for frame in stream.frame_generator():
        if frame is not None:
            _cv2.imshow('Video', frame)
            _cv2.moveWindow('Video',5,5)
        else:
            break
        key = _cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
    _cv2.waitKey(1)
    _cv2.destroyAllWindows()
    _cv2.waitKey(1) 
Example #3
Source File: emulator.py    From fathom with Apache License 2.0 6 votes vote down vote up
def __init__(self, rom_name, vis,frameskip=1,windowname='preview'):
    self.ale = ALEInterface()
    self.max_frames_per_episode = self.ale.getInt("max_num_frames_per_episode");
    self.ale.setInt("random_seed",123)
    self.ale.setInt("frame_skip",frameskip)
    romfile = str(ROM_PATH)+str(rom_name)
    if not os.path.exists(romfile):
      print('No ROM file found at "'+romfile+'".\nAdjust ROM_PATH or double-check the filt exists.')
    self.ale.loadROM(romfile)
    self.legal_actions = self.ale.getMinimalActionSet()
    self.action_map = dict()
    self.windowname = windowname
    for i in range(len(self.legal_actions)):
      self.action_map[self.legal_actions[i]] = i

    # print(self.legal_actions)
    self.screen_width,self.screen_height = self.ale.getScreenDims()
    print("width/height: " +str(self.screen_width) + "/" + str(self.screen_height))
    self.vis = vis
    if vis:
      cv2.startWindowThread()
      cv2.namedWindow(self.windowname, flags=cv2.WINDOW_AUTOSIZE) # permit manual resizing 
Example #4
Source File: sensor.py    From pypot with GNU General Public License v3.0 6 votes vote down vote up
def run(self):
        cv2.startWindowThread()
        while True:
            img = numpy.zeros((480, 640, 3))
            skeleton = kinect.tracked_skeleton
            if skeleton:
                for user, skel in skeleton.items():
                    for joint_name in skel.joints:
                        x, y = getattr(skel, joint_name).pixel_coordinate
                        pt = (int(x), int(y))
                        cv2.circle(img, pt, 5, (255, 255, 255), thickness=-1)
                kinect.remove_all_users()
            cv2.imshow('Skeleton', img)
            cv2.waitKey(50)

        self.sub_skel.close()
        self.context.term() 
Example #5
Source File: gen_sentence_with_emoticons.py    From Real-Time-Facial-Expression-Recognition-with-DeepLearning with MIT License 6 votes vote down vote up
def main():
    '''
    Arguments to be set:
        showCam : determine if show the camera preview screen.
    '''
    print("Enter main() function")
    
    capture = getCameraStreaming()

    cv2.startWindowThread()
    cv2.namedWindow(windowsName, cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty(windowsName, cv2.WND_PROP_FULLSCREEN, cv2.WND_PROP_FULLSCREEN)
    
    while True:
        recContent = speechRecognition()
        if recContent is not None:
            emotion = showScreenAndDectect(capture)
            if emotion == "Angry":
                emoji = " >:O"
            elif emotion == "Fear":
                emoji = " :-S"
            elif emotion == "Happy":
                emoji = " :-D"
            elif emotion == "Sad":
                emoji = " :'("
            elif emotion == "Surprise":
                emoji = " :-O"
            else:
                emoji = " "
            print("Output result: " + recContent + emoji) 
Example #6
Source File: webcam_detection.py    From Real-Time-Facial-Expression-Recognition-with-DeepLearning with MIT License 6 votes vote down vote up
def main():
    '''
    Arguments to be set:
        showCam : determine if show the camera preview screen.
    '''
    print("Enter main() function")
    
    if args.testImage is not None:
        img = cv2.imread(args.testImage)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = cv2.resize(img, FACE_SHAPE)
        print(class_label[result[0]])
        sys.exit(0)

    showCam = 1

    capture = getCameraStreaming()

    if showCam:
        cv2.startWindowThread()
        cv2.namedWindow(windowsName, cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty(windowsName, cv2.WND_PROP_FULLSCREEN, cv2.WND_PROP_FULLSCREEN)
    
    showScreenAndDectect(capture) 
Example #7
Source File: main.py    From Stereo-Pose-Machines with GNU General Public License v2.0 6 votes vote down vote up
def dump_2dcoor():
    camera = libcpm.Camera()
    camera.setup()
    runner = get_parallel_runner('../data/cpm.npy')
    cv2.namedWindow('color')
    cv2.startWindowThread()
    cnt = 0
    while True:
        cnt += 1
        m1 = camera.get_for_py(0)
        m1 = np.array(m1, copy=False)
        m2 = camera.get_for_py(1)
        m2 = np.array(m2, copy=False)

        o1, o2 = runner(m1, m2)
        pts = []
        for k in range(14):
            pts.append((argmax_2d(o1[:,:,k]),
                argmax_2d(o2[:,:,k])))
        pts = np.asarray(pts)
        np.save('pts{}.npy'.format(cnt), pts)
        cv2.imwrite("frame{}.png".format(cnt), m1);
        if cnt == 10:
            break 
Example #8
Source File: main.py    From Stereo-Pose-Machines with GNU General Public License v2.0 5 votes vote down vote up
def stereo_cpm_viewer():
    camera = libcpm.Camera()
    camera.setup()
    runner = get_parallel_runner('../data/cpm.npy')
    cv2.namedWindow('color')
    cv2.startWindowThread()
    cnt = 0
    while True:
        cnt += 1
        m1 = camera.get_for_py(0)
        m1 = np.array(m1, copy=False)
        m2 = camera.get_for_py(1)
        m2 = np.array(m2, copy=False)

        m1s = cv2.resize(m1, (368,368))
        m2s = cv2.resize(m2, (368,368))

        o1, o2 = runner(m1s, m2s)

        #buf = dumps([m1, m2, o1, o2])
        #f = open('recording/{:03d}.npy'.format(cnt), 'w')
        #f.write(buf)
        #f.close()

        c1 = colorize(m1, o1[:,:,:-1].sum(axis=2))
        c2 = colorize(m2, o2[:,:,:-1].sum(axis=2))
        viz = np.concatenate((c1, c2), axis=1)
        cv2.imshow('color', viz / 255.0) 
Example #9
Source File: Utils_Image.py    From Tensorflow_Object_Tracking_Video with MIT License 5 votes vote down vote up
def resizeImage(file_path): 
    #Resize Cropping & Padding an image to the 640x480 pixel size
    if file_path is not -1:
        if check_image_with_pil(file_path):
            image = Image.open(file_path)
            image.thumbnail(size, Image.ANTIALIAS)
            image_size = image.size

            padding_0 = max( (size[0] - image_size[0]) / 2, 0 )
            padding_1 = max( (size[1] - image_size[1]) / 2, 0 )
            cv2.namedWindow('Original Image')
            cv2.namedWindow('Resized Image')
            cv2.startWindowThread()
            orig_img = cv2.imread(file_path, 0)
            cv2.imshow('Original Image',orig_img)
            cv2.waitKey(2)

            if((padding_0==0) & (padding_1==0)):
                image.save(file_path, img_save_type)
            else:
                thumb = image.crop( (0, 0, size[0], size[1]) )
                thumb = ImageChops.offset(thumb, int(padding_0), int(padding_1))
                thumb.save(file_path)

            resized_img = cv2.imread(file_path, 0)
            cv2.imshow('Resized Image',resized_img)
    else :
        cv2.destroyAllWindows()
        cv2.waitKey(2) 
Example #10
Source File: display_video.py    From detectron2-pipeline with MIT License 5 votes vote down vote up
def __init__(self, src, window_name=None, org=None):
        self.src = src
        self.window_name = window_name if window_name else src

        cv2.startWindowThread()
        cv2.namedWindow(self.window_name, cv2.WINDOW_AUTOSIZE)
        if org:
            # Set the window position
            x, y = org
            cv2.moveWindow(self.window_name, x, y)

        super().__init__() 
Example #11
Source File: test_scripts.py    From deepface with MIT License 5 votes vote down vote up
def test_ssd():
    face_detector = FaceDetectorSSDMobilenetV2()
    image = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            "samples/blackpink/blackpink4.jpg")
    print("image path is: " + image)
    test_image = cv2.imread(image, cv2.IMREAD_COLOR)
    faces = face_detector.detect(test_image)

    for face in faces:
      cv2.rectangle(test_image,(int(face.x),int(face.y)),(int(face.x + face.w), int(face.y + face.h)), (0,255,0),3)

    window_name = "image"
    cv2.namedWindow(window_name, cv2.WND_PROP_AUTOSIZE)
    cv2.startWindowThread()

    cv2.imshow('image', test_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    cv2.waitKey(1)
    print("done showing face annotated image!")

    for face in faces:
      print(face.face_landmark)

    print("done") 
Example #12
Source File: test_scripts.py    From deepface with MIT License 5 votes vote down vote up
def test_ssd_webcam():
    cap = cv2.VideoCapture(0)
    # Define the codec and create VideoWriter object
    fourcc = cv2.VideoWriter_fourcc(*'MP4V')
    out = cv2.VideoWriter('ssd_output.mp4', fourcc, 60.0, (640, 480))

    face_detector = FaceDetectorSSDMobilenetV2()
    while(True):
        ret, frame = cap.read()

        test_image = frame
        faces = face_detector.detect(test_image)

        for face in faces:
          cv2.rectangle(test_image,(int(face.x),int(face.y)),(int(face.x + face.w), int(face.y + face.h)), (0,255,0),3)

        window_name = "image"
        cv2.namedWindow(window_name, cv2.WND_PROP_AUTOSIZE)
        cv2.startWindowThread()

        out.write(test_image)
        cv2.imshow(window_name, test_image)

        if cv2.waitKey(5) & 0xFF == ord('q'):
            break

    cap.release()
    out.release()
    cv2.destroyAllWindows() 
Example #13
Source File: get_augmentation.py    From AUNets with MIT License 5 votes vote down vote up
def imshow(image):
    import cv2 as cv
    if isinstance(image, str):
        image = cv.imread(image)
    cv.startWindowThread()
    cv.namedWindow('Image', cv.WINDOW_NORMAL)
    cv.imshow('Image', image) 
Example #14
Source File: OF_resizeBP4D.py    From AUNets with MIT License 5 votes vote down vote up
def imshow(image, name=0):
    import cv2 as cv
    if isinstance(image, str):
        image = cv.imread(image)
    cv.startWindowThread()
    cv.namedWindow(str(name), cv.WINDOW_NORMAL)
    cv.imshow(str(name), image) 
Example #15
Source File: display_video.py    From image-processing-pipeline with MIT License 5 votes vote down vote up
def __init__(self, src, window_name=None, org=None):
        self.src = src
        self.window_name = window_name if window_name else src

        cv2.startWindowThread()
        cv2.namedWindow(self.window_name, cv2.WINDOW_AUTOSIZE)
        if org:
            # Set the window position
            x, y = org
            cv2.moveWindow(self.window_name, x, y)

        super(DisplayVideo, self).__init__() 
Example #16
Source File: mylib.py    From anomaly-event-detection with MIT License 4 votes vote down vote up
def dispOpticalFlow (Image, Flow, Divisor, name ):
    """
    
    Display image with a visualisation of a flow over the top. 
    A divisor controls the density of the quiver plot.
    
    Arguments:
        Image:      Image on which to display flow lines
        Flow :      Flow vectors x and y
        Divisor:    Spacing between the arrow nodes
        name:       Name of the window
    """
    PictureShape = np.shape (Image)
    # determine number of quiver points there will be
    Imax = int (PictureShape[0] / Divisor)
    Jmax = int (PictureShape[1] / Divisor)
    # create a blank mask,   on which lines will be drawn.
    mask = np.zeros_like (Image)
    panel = np.zeros_like (Image)

    for i in range (1, Imax):
        for j in range (1, Jmax):
            X1 = (i) * Divisor
            Y1 = (j) * Divisor

            X2 = int (X1 + Flow[X1, Y1, 1])
            Y2 = int (Y1 + Flow[X1, Y1, 0])
            X2 = np.clip (X2, 0, PictureShape[0])
            Y2 = np.clip (Y2, 0, PictureShape[1])
            # add all the lines to the mask

            mask = cv2.arrowedLine (mask, (Y1, X1), (Y2, X2), [255, 255, 255], 1)
            # To show only arrows in the image
            # cv2.namedWindow("Panel", 0)
            # panel = panel+mask
            # cv2.imshow("Panel", panel)
    # superpose lines onto image

    img = cv2.add (Image, mask)
    # print image
    cv2.startWindowThread ()
    cv2.namedWindow (name, 0)
    cv2.imshow (name, img)

    return [] 
Example #17
Source File: emulator.py    From tensorflow-rl with Apache License 2.0 4 votes vote down vote up
def __init__(self, rom_path, rom_name, visualize, actor_id, rseed, single_life_episodes = False):
        
        self.ale = ALEInterface()

        self.ale.setInt("random_seed", rseed * (actor_id +1))

        # For fuller control on explicit action repeat (>= ALE 0.5.0) 
        self.ale.setFloat("repeat_action_probability", 0.0)
        
        # Disable frame_skip and color_averaging
        # See: http://is.gd/tYzVpj
        self.ale.setInt("frame_skip", 1)
        self.ale.setBool("color_averaging", False)
        self.ale.loadROM(rom_path + "/" + rom_name + ".bin")
        self.legal_actions = self.ale.getMinimalActionSet()        
        self.screen_width,self.screen_height = self.ale.getScreenDims()
        #self.ale.setBool('display_screen', True)
        
        # Processed historcal frames that will be fed in to the network 
        # (i.e., four 84x84 images)
        self.screen_images_processed = np.zeros((IMG_SIZE_X, IMG_SIZE_Y, 
            NR_IMAGES)) 
        self.rgb_screen = np.zeros((self.screen_height,self.screen_width, 3), dtype=np.uint8)
        self.gray_screen = np.zeros((self.screen_height,self.screen_width,1), dtype=np.uint8)

        self.frame_pool = np.empty((2, self.screen_height, self.screen_width))
        self.current = 0
        self.lives = self.ale.lives()

        self.visualize = visualize
        self.visualize_processed = False
        self.windowname = rom_name + ' ' + str(actor_id)
        if self.visualize:
            logger.debug("Opening emulator window...")
            #from skimage import io
            #io.use_plugin('qt')
            cv2.startWindowThread()
            cv2.namedWindow(self.windowname)
            logger.debug("Emulator window opened")
            
        if self.visualize_processed:
            logger.debug("Opening processed frame window...")
            cv2.startWindowThread()
            logger.debug("Processed frame window opened")
            cv2.namedWindow(self.windowname + "_processed")
            
        self.single_life_episodes = single_life_episodes 
Example #18
Source File: visualizer_seq.py    From MBMD with MIT License 4 votes vote down vote up
def evaluate(create_input_dict_fn, create_model_fn, eval_config, categories,
             checkpoint_dir, eval_dir, image_root):
    """Evaluation function for detection models.
    
    Args:
    create_input_dict_fn: a function to create a tensor input dictionary.
    create_model_fn: a function that creates a DetectionModel.
    eval_config: a eval_pb2.EvalConfig protobuf.
    categories: a list of category dictionaries. Each dict in the list should
                have an integer 'id' field and string 'name' field.
    checkpoint_dir: directory to load the checkpoints to evaluate from.
    eval_dir: directory to write evaluation metrics summary to.
    """

    model = create_model_fn()
    tensor_dict = _extract_prediction_tensors(
        model=model,
        create_input_dict_fn=create_input_dict_fn,
        image_root=image_root,
        ignore_groundtruth=eval_config.ignore_groundtruth)

    def _display_batch(tensor_dict, sess):
        res_tensor = sess.run(tensor_dict)
        original_images = ((res_tensor['original_image'] + 1)/2*255).astype(np.uint8)
        gt_boxes = res_tensor['groundtruth_boxes']
        detection_box = res_tensor['detection_boxes'][0]
        cv2.namedWindow('1')
        cv2.rectangle(original_images[0,0], (gt_boxes[0][1], gt_boxes[0][0]),
                      (gt_boxes[0][3], gt_boxes[0][2]), [255,0,0], 2)
        cv2.imshow('1', original_images[0,0,:,:,-1::-1])

        cv2.namedWindow('2')
        cv2.rectangle(original_images[0, 1], (gt_boxes[1][1], gt_boxes[1][0]),
                      (gt_boxes[1][3], gt_boxes[1][2]), [255, 0, 0], 2)
        cv2.rectangle(original_images[0, 1], (detection_box[1], detection_box[0]),
                      (detection_box[3], detection_box[2]), [0, 255, 0], 2)
        cv2.imshow('2', original_images[0, 1, :, :, -1::-1])
        print("Detection Score %f"%(res_tensor['detection_scores'][0]))



    variables_to_restore = tf.global_variables()
    global_step = slim.get_or_create_global_step()
    variables_to_restore.append(global_step)
    if eval_config.use_moving_averages:
        variable_averages = tf.train.ExponentialMovingAverage(0.0)
        variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
    def _restore_latest_checkpoint(sess):
        latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
        saver.restore(sess, latest_checkpoint)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord, sess=sess)
    _restore_latest_checkpoint(sess)

    cv2.startWindowThread()
    for i in range(5000):
        _display_batch(tensor_dict, sess) 
Example #19
Source File: visualizer.py    From MBMD with MIT License 4 votes vote down vote up
def evaluate(create_input_dict_fn, create_model_fn, eval_config, categories,
             checkpoint_dir, eval_dir, image_root):
    """Evaluation function for detection models.
    
    Args:
    create_input_dict_fn: a function to create a tensor input dictionary.
    create_model_fn: a function that creates a DetectionModel.
    eval_config: a eval_pb2.EvalConfig protobuf.
    categories: a list of category dictionaries. Each dict in the list should
                have an integer 'id' field and string 'name' field.
    checkpoint_dir: directory to load the checkpoints to evaluate from.
    eval_dir: directory to write evaluation metrics summary to.
    """

    model = create_model_fn()
    tensor_dict = _extract_prediction_tensors(
        model=model,
        create_input_dict_fn=create_input_dict_fn,
        image_root=image_root,
        ignore_groundtruth=eval_config.ignore_groundtruth)

    def _display_batch(tensor_dict, sess):
        res_tensor = sess.run(tensor_dict)
        original_images = ((res_tensor['original_image'] + 1)/2*255).astype(np.uint8)
        gt_boxes = res_tensor['groundtruth_boxes']
        detection_box = res_tensor['detection_boxes'][0]
        cv2.namedWindow('1')
        cv2.rectangle(original_images[0,0], (gt_boxes[0][1], gt_boxes[0][0]),
                      (gt_boxes[0][3], gt_boxes[0][2]), [255,0,0], 2)
        cv2.imshow('1', original_images[0,0,:,:,-1::-1])

        cv2.namedWindow('2')
        cv2.rectangle(original_images[0, 1], (gt_boxes[1][1], gt_boxes[1][0]),
                      (gt_boxes[1][3], gt_boxes[1][2]), [255, 0, 0], 2)
        cv2.rectangle(original_images[0, 1], (detection_box[1], detection_box[0]),
                      (detection_box[3], detection_box[2]), [0, 255, 0], 2)
        cv2.imshow('2', original_images[0, 1, :, :, -1::-1])
        print("Detection Score %f"%(res_tensor['detection_scores'][0]))



    variables_to_restore = tf.global_variables()
    global_step = slim.get_or_create_global_step()
    variables_to_restore.append(global_step)
    if eval_config.use_moving_averages:
        variable_averages = tf.train.ExponentialMovingAverage(0.0)
        variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
    def _restore_latest_checkpoint(sess):
        latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
        saver.restore(sess, latest_checkpoint)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord, sess=sess)
    _restore_latest_checkpoint(sess)

    cv2.startWindowThread()
    for i in range(5000):
        _display_batch(tensor_dict, sess) 
Example #20
Source File: emulator.py    From async-deep-rl with Apache License 2.0 4 votes vote down vote up
def __init__(self, rom_path, rom_name, visualize, actor_id, rseed, single_life_episodes = False):
        
        self.ale = ALEInterface()

        self.ale.setInt("random_seed", rseed * (actor_id +1))

        # For fuller control on explicit action repeat (>= ALE 0.5.0) 
        self.ale.setFloat("repeat_action_probability", 0.0)
        
        # Disable frame_skip and color_averaging
        # See: http://is.gd/tYzVpj
        self.ale.setInt("frame_skip", 1)
        self.ale.setBool("color_averaging", False)
        self.ale.loadROM(rom_path + "/" + rom_name + ".bin")
        self.legal_actions = self.ale.getMinimalActionSet()        
        self.screen_width,self.screen_height = self.ale.getScreenDims()
        #self.ale.setBool('display_screen', True)
        
        # Processed historcal frames that will be fed in to the network 
        # (i.e., four 84x84 images)
        self.screen_images_processed = np.zeros((IMG_SIZE_X, IMG_SIZE_Y, 
            NR_IMAGES)) 
        self.rgb_screen = np.zeros((self.screen_height,self.screen_width, 3), dtype=np.uint8)
        self.gray_screen = np.zeros((self.screen_height,self.screen_width,1), dtype=np.uint8)

        self.frame_pool = np.empty((2, self.screen_height, self.screen_width))
        self.current = 0
        self.lives = self.ale.lives()

        self.visualize = visualize
        self.visualize_processed = False
        self.windowname = rom_name + ' ' + str(actor_id)
        if self.visualize:
            logger.debug("Opening emulator window...")
            #from skimage import io
            #io.use_plugin('qt')
            cv2.startWindowThread()
            cv2.namedWindow(self.windowname)
            logger.debug("Emulator window opened")
            
        if self.visualize_processed:
            logger.debug("Opening processed frame window...")
            cv2.startWindowThread()
            logger.debug("Processed frame window opened")
            cv2.namedWindow(self.windowname + "_processed")
            
        self.single_life_episodes = single_life_episodes