Python cv2.VideoCapture() Examples

The following are 30 code examples of cv2.VideoCapture(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: cv2Iterator.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 10 votes vote down vote up
def __init__(self, capture=cv2.VideoCapture(0), frame_resize=None):
        self._capture = capture
        self._frame_resize = None
        if frame_resize:
            if isinstance(frame_resize, (tuple, list)) and (len(frame_resize) == 2):
                self._frame_resize = tuple(map(int, frame_resize))
                self._frame_shape = (1, 3, self._frame_resize[0], self._frame_resize[1])
            elif isinstance(frame_resize, float):
                width = int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)*frame_resize)
                height = int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT)*frame_resize)
                self._frame_shape = (1, 3, width, height)
                self._frame_resize = (width, height)
            else:
                assert False, "frame_resize should be a tuple of (x,y) pixels "
                "or a float setting the scaling factor"
        else:
            self._frame_shape = (1, 3,
                int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) 
Example #2
Source File: datasets.py    From pruning_yolov3 with GNU General Public License v3.0 10 votes vote down vote up
def __init__(self, pipe=0, img_size=416, half=False):
        self.img_size = img_size
        self.half = half  # half precision fp16 images

        if pipe == '0':
            pipe = 0  # local camera
        # pipe = 'rtsp://192.168.1.64/1'  # IP camera
        # pipe = 'rtsp://username:password@192.168.1.64/1'  # IP camera with login
        # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa'  # IP traffic camera
        # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg'  # IP golf camera

        # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
        # pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink'  # GStreamer

        # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
        # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package  # install help
        # pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink"  # GStreamer

        self.pipe = pipe
        self.cap = cv2.VideoCapture(pipe)  # video capture object
        self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3)  # set buffer size 
Example #3
Source File: tkinter_functions.py    From simba with GNU Lesser General Public License v3.0 8 votes vote down vote up
def clahe(filename):
    os.chdir(os.path.dirname(filename))
    print('Applying CLAHE, this might take awhile...')

    currentVideo = os.path.basename(filename)
    fileName, fileEnding = currentVideo.split('.',2)
    saveName = str('CLAHE_') + str(fileName) + str('.avi')
    cap = cv2.VideoCapture(currentVideo)
    imageWidth = int(cap.get(3))
    imageHeight = int(cap.get(4))
    fps = cap.get(cv2.CAP_PROP_FPS)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(saveName, fourcc, fps, (imageWidth, imageHeight), 0)
    try:
        while True:
            ret, image = cap.read()
            if ret == True:
                im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                claheFilter = cv2.createCLAHE(clipLimit=2, tileGridSize=(16, 16))
                claheCorrecttedFrame = claheFilter.apply(im)
                out.write(claheCorrecttedFrame)
                if cv2.waitKey(10) & 0xFF == ord('q'):
                    break
            else:
                print(str('Completed video ') + str(saveName))
                break
    except:
        print('clahe not applied')
    cap.release()
    out.release()
    cv2.destroyAllWindows()
    return saveName 
Example #4
Source File: chapter2.py    From OpenCV-Computer-Vision-Projects-with-Python with MIT License 7 votes vote down vote up
def main():
    device = cv2.CAP_OPENNI
    capture = cv2.VideoCapture(device)
    if not(capture.isOpened()):
        capture.open(device)

    capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    app = wx.App()
    frame = MyFrame(None, -1, 'chapter2.py', capture)
    frame.Show(True)
#   self.SetTopWindow(frame)
    app.MainLoop()

    # When everything done, release the capture
    capture.release()
    cv2.destroyAllWindows() 
Example #5
Source File: videocapturethreading.py    From video-capture-async with Apache License 2.0 6 votes vote down vote up
def _run(self, n_frames=500, width=1280, height=720, with_threading=False):
        if with_threading:
            cap = VideoCaptureTreading(0)
        else:
            cap = cv2.VideoCapture(0)
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
        if with_threading:
            cap.start()
        t0 = time.time()
        i = 0
        while i < n_frames:
            _, frame = cap.read()
            cv2.imshow('Frame', frame)
            cv2.waitKey(1) & 0xFF
            i += 1
        print('[i] Frames per second: {:.2f}, with_threading={}'.format(n_frames / (time.time() - t0), with_threading))
        if with_threading:
            cap.stop()
        cv2.destroyAllWindows() 
Example #6
Source File: Main.py    From bjtu_BinocularCameraRecord with MIT License 6 votes vote down vote up
def initfrom(self):
        global update1
        update1 = 0
        global update2
        update2 = 0
        self.maxcap=0;
        testmax = 10;
        for i in range(10):
            cap = cv2.VideoCapture(i)
            if(cap.isOpened()):
                self.maxcap+=1
            cap.release()
        self.selecamera1.clear()
        self.selecamera2.clear()

        self.selecamera1.addItems([str(i) for i in range(self.maxcap)])
        self.selecamera2.addItems([str(i) for i in range(self.maxcap)]) 
Example #7
Source File: dlc_change_yamlfile.py    From simba with GNU Lesser General Public License v3.0 6 votes vote down vote up
def add_single_video_yaml(yamlfile,videofile):
    yamlPath = yamlfile
    cap = cv2.VideoCapture(videofile)
    width = int(cap.get(3))  # float
    height = int(cap.get(4))  # float
    cropLine = [0, width, 0, height]
    cropLine = str(cropLine)
    currCropLinePath = cropLine.strip("[]")
    currCropLinePath = currCropLinePath.replace("'", "")
    with open(yamlPath) as f:
        read_yaml = yaml.load(f, Loader=yaml.FullLoader)

    read_yaml["video_sets"].update({videofile: {'crop': currCropLinePath}})

    with open(yamlPath, 'w') as outfile:
        yaml.dump(read_yaml, outfile, default_flow_style=False) 
Example #8
Source File: Main.py    From bjtu_BinocularCameraRecord with MIT License 6 votes vote down vote up
def loop2(self,text,w=1280,h=720):
        cap = cv2.VideoCapture(int(text))
        cap.set(6 ,cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') );
        global capnum2
        capnum2 = int(text)
        cap.set(3,w);
        cap.set(4,h);
        global update2
        update2 = 1
        global shotmark2

        while (update2 == 1):
            ret, frame = cap.read() 
            if shotmark2 == 1:
                fn = self.lineEdit.text()
                name = "photo/2_"+fn + "video.jpg"
                if os.path.exists(name):
                    name = "photo/2_" + fn + "video"+str(int(time.time()))+".jpg"
                cv2.imwrite(name, frame)
                shotmark2 = 0
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.original2_image.updateImage(frame)
        # cap.release()
        cv_img_rgb = np.zeros((700,700,3))
        self.original2_image.updateImage(cv_img_rgb) 
Example #9
Source File: tkinter_functions.py    From simba with GNU Lesser General Public License v3.0 6 votes vote down vote up
def extractspecificframe(filename,startframe1,endframe1):

    cap = cv2.VideoCapture(filename)
    amount_of_frames = cap.get(7)
    pathDir = str(filename[:-4]+'\\frames')
    if not os.path.exists(pathDir):
        os.makedirs(pathDir)

    frames_OI = list(range(int(startframe1),int(endframe1)+1))
    #frames_OI.extend(range(7000,7200))
    #frames_OI.extend(range(9200,9350))

    for i in frames_OI:
        currentFrame = i
        cap.set(1, currentFrame)
        ret, frame = cap.read()
        fileName = str(currentFrame) + str('.png')
        filePath = os.path.join(pathDir, fileName)
        cv2.imwrite(filePath,frame) 
Example #10
Source File: train_featurizer.py    From HardRLWithYoutube with MIT License 6 votes vote down vote up
def generate_dataset(videos_path, framerate, width, height):
    """Converts videos from specified path to ndarrays of shape [numberOfVideos, -1, width, height, 1]

    Args:
        videos_path: Inside the 'videos/' directory, the name of the subdirectory for videos.
        framerate: The desired framerate of the dataset.
        width: The width we will resize the videos to.
        height: The height we will resize the videos to.

    Returns:
        The dataset with the new size and framerate, and converted to monochromatic.

    """
    dataset = []
    video_index = 0
    for playlist in os.listdir('videos/' + videos_path):
        for video_name in os.listdir('videos/{}/{}'.format(videos_path, playlist)):
            dataset.append([])
            print('Video: {}'.format(video_name))
            video = cv2.VideoCapture('videos/{}/{}/{}'.format(videos_path, playlist, video_name))
            while video.isOpened():
                success, frame = video.read()
                if success:
                    frame = preprocess_image(frame, width, height)
                    dataset[video_index].append(frame)

                    frame_index = video.get(cv2.CAP_PROP_POS_FRAMES)
                    video_framerate = video.get(cv2.CAP_PROP_FPS)
                    video.set(cv2.CAP_PROP_POS_FRAMES, frame_index + video_framerate // framerate)
                    last_frame_index = video.get(cv2.CAP_PROP_FRAME_COUNT)
                    if frame_index >= last_frame_index:
                        # Video is over
                        break

                    
                else:
                    break
            dataset[video_index] = np.reshape(dataset[video_index], (-1, width, height, 1))
            video_index += 1
    return dataset 
Example #11
Source File: test.py    From yolo_tensorflow with MIT License 6 votes vote down vote up
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', default="YOLO_small.ckpt", type=str)
    parser.add_argument('--weight_dir', default='weights', type=str)
    parser.add_argument('--data_dir', default="data", type=str)
    parser.add_argument('--gpu', default='', type=str)
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    yolo = YOLONet(False)
    weight_file = os.path.join(args.data_dir, args.weight_dir, args.weights)
    detector = Detector(yolo, weight_file)

    # detect from camera
    # cap = cv2.VideoCapture(-1)
    # detector.camera_detector(cap)

    # detect from image file
    imname = 'test/person.jpg'
    detector.image_detector(imname) 
Example #12
Source File: camera_opencv.py    From object-detection with MIT License 6 votes vote down vote up
def CaptureContinous(self, detector):
    cap = cv2.VideoCapture(0)
    _, image = cap.read()
    cap.release()
    output = detector.prediction(image)
    df = detector.filter_prediction(output, image)
    if len(df) > 0:
        if (df['class_name']
                .str
                .contains('person|bird|cat|wine glass|cup|sandwich')
                .any()):
            day = datetime.now().strftime("%Y%m%d")
            directory = os.path.join(IMAGE_FOLDER, 'webcam', day)
            if not os.path.exists(directory):
                os.makedirs(directory)
            image = detector.draw_boxes(image, df)
            classes = df['class_name'].unique().tolist()
            hour = datetime.now().strftime("%H%M%S")
            filename_output = os.path.join(
                    directory, "{}_{}_.jpg".format(hour, "-".join(classes))
                    )
            cv2.imwrite(filename_output, image) 
Example #13
Source File: tkinter_functions.py    From simba with GNU Lesser General Public License v3.0 6 votes vote down vote up
def extract_allframescommand(filename):
    if filename:

        pathDir = str(filename[:-4])
        if not os.path.exists(pathDir):
            os.makedirs(pathDir)

        picFname = '%d.png'

        saveDirFilenames = os.path.join(pathDir, picFname)
        print(saveDirFilenames)

        fname = str(filename)
        cap = cv2.VideoCapture(fname)
        fps = cap.get(cv2.CAP_PROP_FPS)
        amount_of_frames = cap.get(7)
        print('The number of frames in this video = ',amount_of_frames)
        print('Extracting frames... (Might take awhile)')
        command = str('ffmpeg -i ' +'"'+ str(fname)+'"' + ' ' + '-q:v 1' + ' ' + '-start_number 0' + ' '+'"'+ str(saveDirFilenames)+'"')
        print(command)
        subprocess.call(command, shell=True)
        print('All frames are extracted!')
    else:
        print('Please select a video to convert') 
Example #14
Source File: fatigue.py    From Traffic-Signs-and-Object-Detection with GNU General Public License v3.0 6 votes vote down vote up
def counter(func):
    @wraps(func)
    def tmp(*args, **kwargs):
        tmp.count += 1
        global lastsave
        if time.time() - lastsave > 3:
            # this is in seconds, so 5 minutes = 300 seconds
            lastsave = time.time()
            tmp.count = 0
        return func(*args, **kwargs)
    tmp.count = 0
    return tmp




#cap = cv2.VideoCapture(0) 
Example #15
Source File: camera_support.py    From pynvr with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _initCamera(self, callSleep = True):
        """
        Initializes camera. If can't establish connection will write error message to log file and sleep for some
        interval.

        :return: True when camera successfully open, otherwise False
        """
        self.cap = cv.VideoCapture(self.camConnectionString)

        if self.cap is None:
            self.setError("can't connect to camera")
            if callSleep:
                time.sleep(5)
            return None

        if not self.cap.isOpened():  # did we get a connection at all ?
            self.setError("can't connect to camera")
            if callSleep:
                time.sleep(5)

            return None

        return self.cap 
Example #16
Source File: get_hand_images.py    From Emojinator with MIT License 6 votes vote down vote up
def main():
    total_pics = 1000
    cap = cv2.VideoCapture(0)
    x, y, w, h = 300, 50, 350, 350

    pic_no = 0
    flag_start_capturing = False
    frames = 0

    while True:
        ret, frame = cap.read()
        frame = cv2.flip(frame, 1)
        #frame = cv2.resize(frame, (image_x, image_y))
        cv2.imwrite("hand_images/" + str(pic_no) + ".jpg", frame)
        cv2.imshow("Capturing gesture", frame)
        pic_no += 1
        if pic_no == total_pics:
            break 
Example #17
Source File: webcam_demo.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def main():
    args = parse_args()

    device = torch.device(args.device)

    model = init_detector(args.config, args.checkpoint, device=device)

    camera = cv2.VideoCapture(args.camera_id)

    print('Press "Esc", "q" or "Q" to exit.')
    while True:
        ret_val, img = camera.read()
        result = inference_detector(model, img)

        ch = cv2.waitKey(1)
        if ch == 27 or ch == ord('q') or ch == ord('Q'):
            break

        model.show_result(
            img, result, score_thr=args.score_thr, wait_time=1, show=True) 
Example #18
Source File: CameraCapture.py    From Custom-vision-service-iot-edge-raspberry-pi with MIT License 5 votes vote down vote up
def __enter__(self):
        if self.isWebcam:
            #The VideoStream class always gives us the latest frame from the webcam. It uses another thread to read the frames.
            self.vs = VideoStream(int(self.videoPath)).start()
            time.sleep(1.0)#needed to load at least one frame into the VideoStream class
            #self.capture = cv2.VideoCapture(int(self.videoPath))
        else:
            #In the case of a video file, we want to analyze all the frames of the video thus are not using VideoStream class
            self.capture = cv2.VideoCapture(self.videoPath)
        return self 
Example #19
Source File: detect.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 5 votes vote down vote up
def create_cap(self):
        try:
            cap = int(self.args.input)
        except ValueError:
            cap = os.path.expanduser(os.path.expandvars(self.args.input))
            assert os.path.exists(cap)
        return cv2.VideoCapture(cap) 
Example #20
Source File: camera_demo.py    From R2CNN_Faster-RCNN_Tensorflow with MIT License 5 votes vote down vote up
def testCamera():
    cap = cv2.VideoCapture(0)
    while(1):
        # get a frame
        ret, frame = cap.read()
        # show a frame
        cv2.imshow("capture", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows() 
Example #21
Source File: demo.py    From pytorch-0.4-yolov3 with MIT License 5 votes vote down vote up
def demo(cfgfile, weightfile):
    m = Darknet(cfgfile)
    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if m.num_classes == 20:
        namesfile = 'data/voc.names'
    elif m.num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/names'
    print("{} is used for classification".format(namesfile))
    class_names = load_class_names(namesfile)
 
    use_cuda = True
    if use_cuda:
        m.cuda()

    cap = cv2.VideoCapture(1)
    if not cap.isOpened():
        print("Unable to open camera")
        exit(-1)

    while True:
        res, img = cap.read()
        if res:
            sized = cv2.resize(img, (m.width, m.height))
            bboxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
            print('------')
            draw_img = plot_boxes_cv2(img, bboxes, None, class_names)
            cv2.imshow(cfgfile, draw_img)
            cv2.waitKey(1)
        else:
             print("Unable to read image")
             exit(-1) 

############################################ 
Example #22
Source File: VideoStream.py    From Custom-vision-service-iot-edge-raspberry-pi with MIT License 5 votes vote down vote up
def __init__(self, path, queueSize=3):
        self.stream = cv2.VideoCapture(path)
        self.stopped = False
        self.Q = Queue(maxsize=queueSize) 
Example #23
Source File: functions.py    From 3D-HourGlass-Network with MIT License 5 votes vote down vote up
def playVideoFromAVI(s):
    cap = cv2.VideoCapture(s)
 
    # Check if camera opened successfully
    if (cap.isOpened()== False): 
      print("Error opening video stream or file")
     
    # Read until video is completed
    while(cap.isOpened()):
      # Capture frame-by-frame
      ret, frame = cap.read()
      if ret == True:
     
        # Display the resulting frame
        cv2.imshow('Frame',frame)
     
        # Press Q on keyboard to  exit
        if cv2.waitKey(1) & 0xFF == ord('q'):
          break
     
      # Break the loop
      else: 
        break
     
    # When everything done, release the video capture object
    cap.release()
     
    # Closes all the frames
    cv2.destroyAllWindows() 
Example #24
Source File: import_videos_csv_project_ini.py    From simba with GNU Lesser General Public License v3.0 5 votes vote down vote up
def extract_frames_ini(directory):
    filesFound = []

    def execute(command):
        print(command)
        subprocess.call(command, shell=True, stdout=subprocess.PIPE)

    ########### FIND FILES ###########
    for i in os.listdir(directory):
        # if i.__contains__(".mp4"):
        filesFound.append(i)


    for i in filesFound:
        pathDir1 = str(i[:-4])
        pathDir0 =str(str(os.path.dirname(directory)) +'\\frames\\input' )
        pathDir = str(str(pathDir0)+'\\' + pathDir1)

        if not os.path.exists(pathDir):
            os.makedirs(pathDir)
            picFname = '%d.png'

            saveDirFilenames = os.path.join(pathDir, picFname)

            fname = str(directory)+'\\' + str(i)
            cap = cv2.VideoCapture(fname)
            fps = cap.get(cv2.CAP_PROP_FPS)
            amount_of_frames = cap.get(7)
            print('The number of frames in this video = ',amount_of_frames)
            print('Extracting frames... (Might take awhile)')
            command = str('ffmpeg -i ' + str(fname) + ' ' + '-q:v 1' + ' ' + '-start_number 0' + ' ' + str(saveDirFilenames))
            print(command)
            subprocess.call(command, shell=True)
            print('Frames were extracted for',os.path.basename(pathDir))


        else:
            print(os.path.basename(pathDir),'existed, no action taken, frames should be in there')

    print('All frames were extracted.') 
Example #25
Source File: YOLOtest.py    From Traffic_sign_detection_YOLO with MIT License 5 votes vote down vote up
def __init__(self,video=False):
        self.config = json.load(open('../config3.json'))
        self.video=video
        print(self.config)
        self.options = self.config['yoloConfig']
        self.tfnet = TFNet(self.options)
        self.predictThresh = 0.05
        self.getAnnotations()
        print(self.anotations_list)
        if self.video:
            # self.cap = cv2.VideoCapture(0)
            self.cap = cv2.VideoCapture('../../WPI_vdo.mov')
            self.out = cv2.VideoWriter('output.avi',-1, 20.0, (640,480)) 
Example #26
Source File: functions.py    From 3D-HourGlass-Network with MIT License 5 votes vote down vote up
def playVideoFromAVI(s):
    cap = cv2.VideoCapture(s)
 
    # Check if camera opened successfully
    if (cap.isOpened()== False): 
      print("Error opening video stream or file")
     
    # Read until video is completed
    while(cap.isOpened()):
      # Capture frame-by-frame
      ret, frame = cap.read()
      if ret == True:
     
        # Display the resulting frame
        cv2.imshow('Frame',frame)
     
        # Press Q on keyboard to  exit
        if cv2.waitKey(1) & 0xFF == ord('q'):
          break
     
      # Break the loop
      else: 
        break
     
    # When everything done, release the video capture object
    cap.release()
     
    # Closes all the frames
    cv2.destroyAllWindows() 
Example #27
Source File: camera.py    From web-document-scanner with MIT License 5 votes vote down vote up
def __init__(self):
        # Open a camera
        self.cap = cv2.VideoCapture(2)
      
        # Initialize video recording environment
        self.is_record = False
        self.out = None
        self.transformed_frame = None

        self.scanner = Scanner()
        self.cached_frame = None 
Example #28
Source File: tkinter_functions.py    From simba with GNU Lesser General Public License v3.0 5 votes vote down vote up
def batch_extract_allframes(dir):
    curdir = os.listdir(dir)
    vid=[]
    for i in curdir:
        if i.endswith(('.avi','.mp4','.mov','flv')):
            vid.append(i)

    for index,i in enumerate(vid):
        vid[index]=os.path.join(dir,i)

    for i in vid:
        filename= i
        pathDir = str(filename[:-4])
        if not os.path.exists(pathDir):
            os.makedirs(pathDir)

        picFname = '%d.png'

        saveDirFilenames = os.path.join(pathDir, picFname)
        print(saveDirFilenames)

        fname = str(filename)
        cap = cv2.VideoCapture(fname)
        fps = cap.get(cv2.CAP_PROP_FPS)
        amount_of_frames = cap.get(7)
        print('The number of frames in this video = ', amount_of_frames)
        print('Extracting frames... (Might take awhile)')
        command = str(
            'ffmpeg -i '+'"' + str(fname)+'"' + ' ' + '-q:v 1' + ' ' + '-start_number 0' + ' ' +'"'+ str(saveDirFilenames)+'"')
        print(command)
        subprocess.call(command, shell=True)
        print('All frames are extracted!') 
Example #29
Source File: cvm1.py    From Traffic-Signs-and-Object-Detection with GNU General Public License v3.0 5 votes vote down vote up
def setup_camera(self):
        self.capture = cv2.VideoCapture(0)
        self.capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.video_width)
        self.capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.video_height)

        self.timer = QTimer()
        self.timer.timeout.connect(self.display_camera_stream)
        self.timer.start(50) 
Example #30
Source File: functions.py    From Action-Recognition with MIT License 5 votes vote down vote up
def playVideoFromAVI(s):
    cap = cv2.VideoCapture(s)
 
    # Check if camera opened successfully
    if (cap.isOpened()== False): 
      print("Error opening video stream or file")
     
    # Read until video is completed
    while(cap.isOpened()):
      # Capture frame-by-frame
      ret, frame = cap.read()
      if ret == True:
     
        # Display the resulting frame
        cv2.imshow('Frame',frame)
     
        # Press Q on keyboard to  exit
        if cv2.waitKey(1) & 0xFF == ord('q'):
          break
     
      # Break the loop
      else: 
        break
     
    # When everything done, release the video capture object
    cap.release()
     
    # Closes all the frames
    cv2.destroyAllWindows()