Python cv2.VideoCapture() Examples

The following are 30 code examples for showing how to use cv2.VideoCapture(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module cv2 , or try the search function .

Example 1
Project: pruning_yolov3   Author: zbyuan   File: datasets.py    License: GNU General Public License v3.0 9 votes vote down vote up
def __init__(self, pipe=0, img_size=416, half=False):
        self.img_size = img_size
        self.half = half  # half precision fp16 images

        if pipe == '0':
            pipe = 0  # local camera
        # pipe = 'rtsp://192.168.1.64/1'  # IP camera
        # pipe = 'rtsp://username:password@192.168.1.64/1'  # IP camera with login
        # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa'  # IP traffic camera
        # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg'  # IP golf camera

        # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
        # pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink'  # GStreamer

        # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
        # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package  # install help
        # pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink"  # GStreamer

        self.pipe = pipe
        self.cap = cv2.VideoCapture(pipe)  # video capture object
        self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3)  # set buffer size 
Example 2
def __init__(self, capture=cv2.VideoCapture(0), frame_resize=None):
        self._capture = capture
        self._frame_resize = None
        if frame_resize:
            if isinstance(frame_resize, (tuple, list)) and (len(frame_resize) == 2):
                self._frame_resize = tuple(map(int, frame_resize))
                self._frame_shape = (1, 3, self._frame_resize[0], self._frame_resize[1])
            elif isinstance(frame_resize, float):
                width = int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)*frame_resize)
                height = int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT)*frame_resize)
                self._frame_shape = (1, 3, width, height)
                self._frame_resize = (width, height)
            else:
                assert False, "frame_resize should be a tuple of (x,y) pixels "
                "or a float setting the scaling factor"
        else:
            self._frame_shape = (1, 3,
                int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) 
Example 3
Project: OpenCV-Computer-Vision-Projects-with-Python   Author: PacktPublishing   File: chapter2.py    License: MIT License 7 votes vote down vote up
def main():
    device = cv2.CAP_OPENNI
    capture = cv2.VideoCapture(device)
    if not(capture.isOpened()):
        capture.open(device)

    capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    app = wx.App()
    frame = MyFrame(None, -1, 'chapter2.py', capture)
    frame.Show(True)
#   self.SetTopWindow(frame)
    app.MainLoop()

    # When everything done, release the capture
    capture.release()
    cv2.destroyAllWindows() 
Example 4
Project: mmdetection   Author: open-mmlab   File: webcam_demo.py    License: Apache License 2.0 6 votes vote down vote up
def main():
    args = parse_args()

    device = torch.device(args.device)

    model = init_detector(args.config, args.checkpoint, device=device)

    camera = cv2.VideoCapture(args.camera_id)

    print('Press "Esc", "q" or "Q" to exit.')
    while True:
        ret_val, img = camera.read()
        result = inference_detector(model, img)

        ch = cv2.waitKey(1)
        if ch == 27 or ch == ord('q') or ch == ord('Q'):
            break

        model.show_result(
            img, result, score_thr=args.score_thr, wait_time=1, show=True) 
Example 5
Project: The-chat-room   Author: 11ze   File: vachat.py    License: MIT License 6 votes vote down vote up
def __init__(self, ip, port, showme, level, version):
        threading.Thread.__init__(self)
        self.setDaemon(True)
        self.ADDR = (ip, port)
        self.showme = showme
        if int(level) < 3:
            self.interval = int(level)
        else:
            self.interval = 3
        self.fx = 1 / (self.interval + 1)
        if self.fx < 0.3:
            self.fx = 0.3
        if version == 4:
            self.sock = socket(AF_INET, SOCK_STREAM)
        else:
            self.sock = socket(AF_INET6, SOCK_STREAM)
        self.cap = cv2.VideoCapture(0)
        print("VEDIO client starts...") 
Example 6
Project: pynvr   Author: JFF-Bohdan   File: camera_support.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _initCamera(self, callSleep = True):
        """
        Initializes camera. If can't establish connection will write error message to log file and sleep for some
        interval.

        :return: True when camera successfully open, otherwise False
        """
        self.cap = cv.VideoCapture(self.camConnectionString)

        if self.cap is None:
            self.setError("can't connect to camera")
            if callSleep:
                time.sleep(5)
            return None

        if not self.cap.isOpened():  # did we get a connection at all ?
            self.setError("can't connect to camera")
            if callSleep:
                time.sleep(5)

            return None

        return self.cap 
Example 7
Project: bjtu_BinocularCameraRecord   Author: anonymouslycn   File: Main.py    License: MIT License 6 votes vote down vote up
def initfrom(self):
        global update1
        update1 = 0
        global update2
        update2 = 0
        self.maxcap=0;
        testmax = 10;
        for i in range(10):
            cap = cv2.VideoCapture(i)
            if(cap.isOpened()):
                self.maxcap+=1
            cap.release()
        self.selecamera1.clear()
        self.selecamera2.clear()

        self.selecamera1.addItems([str(i) for i in range(self.maxcap)])
        self.selecamera2.addItems([str(i) for i in range(self.maxcap)]) 
Example 8
Project: bjtu_BinocularCameraRecord   Author: anonymouslycn   File: Main.py    License: MIT License 6 votes vote down vote up
def loop2(self,text,w=1280,h=720):
        cap = cv2.VideoCapture(int(text))
        cap.set(6 ,cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') );
        global capnum2
        capnum2 = int(text)
        cap.set(3,w);
        cap.set(4,h);
        global update2
        update2 = 1
        global shotmark2

        while (update2 == 1):
            ret, frame = cap.read() 
            if shotmark2 == 1:
                fn = self.lineEdit.text()
                name = "photo/2_"+fn + "video.jpg"
                if os.path.exists(name):
                    name = "photo/2_" + fn + "video"+str(int(time.time()))+".jpg"
                cv2.imwrite(name, frame)
                shotmark2 = 0
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.original2_image.updateImage(frame)
        # cap.release()
        cv_img_rgb = np.zeros((700,700,3))
        self.original2_image.updateImage(cv_img_rgb) 
Example 9
Project: object-detection   Author: cristianpb   File: camera_opencv.py    License: MIT License 6 votes vote down vote up
def CaptureContinous(self, detector):
    cap = cv2.VideoCapture(0)
    _, image = cap.read()
    cap.release()
    output = detector.prediction(image)
    df = detector.filter_prediction(output, image)
    if len(df) > 0:
        if (df['class_name']
                .str
                .contains('person|bird|cat|wine glass|cup|sandwich')
                .any()):
            day = datetime.now().strftime("%Y%m%d")
            directory = os.path.join(IMAGE_FOLDER, 'webcam', day)
            if not os.path.exists(directory):
                os.makedirs(directory)
            image = detector.draw_boxes(image, df)
            classes = df['class_name'].unique().tolist()
            hour = datetime.now().strftime("%H%M%S")
            filename_output = os.path.join(
                    directory, "{}_{}_.jpg".format(hour, "-".join(classes))
                    )
            cv2.imwrite(filename_output, image) 
Example 10
Project: Traffic-Signs-and-Object-Detection   Author: dark-archerx   File: fatigue.py    License: GNU General Public License v3.0 6 votes vote down vote up
def counter(func):
    @wraps(func)
    def tmp(*args, **kwargs):
        tmp.count += 1
        global lastsave
        if time.time() - lastsave > 3:
            # this is in seconds, so 5 minutes = 300 seconds
            lastsave = time.time()
            tmp.count = 0
        return func(*args, **kwargs)
    tmp.count = 0
    return tmp




#cap = cv2.VideoCapture(0) 
Example 11
Project: simba   Author: sgoldenlab   File: tkinter_functions.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def extract_allframescommand(filename):
    if filename:

        pathDir = str(filename[:-4])
        if not os.path.exists(pathDir):
            os.makedirs(pathDir)

        picFname = '%d.png'

        saveDirFilenames = os.path.join(pathDir, picFname)
        print(saveDirFilenames)

        fname = str(filename)
        cap = cv2.VideoCapture(fname)
        fps = cap.get(cv2.CAP_PROP_FPS)
        amount_of_frames = cap.get(7)
        print('The number of frames in this video = ',amount_of_frames)
        print('Extracting frames... (Might take awhile)')
        command = str('ffmpeg -i ' +'"'+ str(fname)+'"' + ' ' + '-q:v 1' + ' ' + '-start_number 0' + ' '+'"'+ str(saveDirFilenames)+'"')
        print(command)
        subprocess.call(command, shell=True)
        print('All frames are extracted!')
    else:
        print('Please select a video to convert') 
Example 12
Project: simba   Author: sgoldenlab   File: tkinter_functions.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def extractspecificframe(filename,startframe1,endframe1):

    cap = cv2.VideoCapture(filename)
    amount_of_frames = cap.get(7)
    pathDir = str(filename[:-4]+'\\frames')
    if not os.path.exists(pathDir):
        os.makedirs(pathDir)

    frames_OI = list(range(int(startframe1),int(endframe1)+1))
    #frames_OI.extend(range(7000,7200))
    #frames_OI.extend(range(9200,9350))

    for i in frames_OI:
        currentFrame = i
        cap.set(1, currentFrame)
        ret, frame = cap.read()
        fileName = str(currentFrame) + str('.png')
        filePath = os.path.join(pathDir, fileName)
        cv2.imwrite(filePath,frame) 
Example 13
Project: simba   Author: sgoldenlab   File: dlc_change_yamlfile.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def add_single_video_yaml(yamlfile,videofile):
    yamlPath = yamlfile
    cap = cv2.VideoCapture(videofile)
    width = int(cap.get(3))  # float
    height = int(cap.get(4))  # float
    cropLine = [0, width, 0, height]
    cropLine = str(cropLine)
    currCropLinePath = cropLine.strip("[]")
    currCropLinePath = currCropLinePath.replace("'", "")
    with open(yamlPath) as f:
        read_yaml = yaml.load(f, Loader=yaml.FullLoader)

    read_yaml["video_sets"].update({videofile: {'crop': currCropLinePath}})

    with open(yamlPath, 'w') as outfile:
        yaml.dump(read_yaml, outfile, default_flow_style=False) 
Example 14
Project: yolo_tensorflow   Author: hizhangp   File: test.py    License: MIT License 6 votes vote down vote up
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', default="YOLO_small.ckpt", type=str)
    parser.add_argument('--weight_dir', default='weights', type=str)
    parser.add_argument('--data_dir', default="data", type=str)
    parser.add_argument('--gpu', default='', type=str)
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    yolo = YOLONet(False)
    weight_file = os.path.join(args.data_dir, args.weight_dir, args.weights)
    detector = Detector(yolo, weight_file)

    # detect from camera
    # cap = cv2.VideoCapture(-1)
    # detector.camera_detector(cap)

    # detect from image file
    imname = 'test/person.jpg'
    detector.image_detector(imname) 
Example 15
Project: Emojinator   Author: akshaybahadur21   File: get_hand_images.py    License: MIT License 6 votes vote down vote up
def main():
    total_pics = 1000
    cap = cv2.VideoCapture(0)
    x, y, w, h = 300, 50, 350, 350

    pic_no = 0
    flag_start_capturing = False
    frames = 0

    while True:
        ret, frame = cap.read()
        frame = cv2.flip(frame, 1)
        #frame = cv2.resize(frame, (image_x, image_y))
        cv2.imwrite("hand_images/" + str(pic_no) + ".jpg", frame)
        cv2.imshow("Capturing gesture", frame)
        pic_no += 1
        if pic_no == total_pics:
            break 
Example 16
Project: video-capture-async   Author: gilbertfrancois   File: videocapturethreading.py    License: Apache License 2.0 6 votes vote down vote up
def _run(self, n_frames=500, width=1280, height=720, with_threading=False):
        if with_threading:
            cap = VideoCaptureTreading(0)
        else:
            cap = cv2.VideoCapture(0)
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
        if with_threading:
            cap.start()
        t0 = time.time()
        i = 0
        while i < n_frames:
            _, frame = cap.read()
            cv2.imshow('Frame', frame)
            cv2.waitKey(1) & 0xFF
            i += 1
        print('[i] Frames per second: {:.2f}, with_threading={}'.format(n_frames / (time.time() - t0), with_threading))
        if with_threading:
            cap.stop()
        cv2.destroyAllWindows() 
Example 17
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: objectDetectorYOLO.py    License: MIT License 5 votes vote down vote up
def __init__(self,video=False):
        self.config = json.load(open('../config3.json'))
        self.video=video
        print(self.config)
        self.options = self.config['yoloConfig']
        self.tfnet = TFNet(self.options)
        self.predictThresh = 0.4
        self.getAnnotations()
        if self.video:
            self.cap = cv2.VideoCapture(0) 
Example 18
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: YOLOtest.py    License: MIT License 5 votes vote down vote up
def __init__(self,video=False):
        self.config = json.load(open('../config3.json'))
        self.video=video
        print(self.config)
        self.options = self.config['yoloConfig']
        self.tfnet = TFNet(self.options)
        self.predictThresh = 0.05
        self.getAnnotations()
        print(self.anotations_list)
        if self.video:
            # self.cap = cv2.VideoCapture(0)
            self.cap = cv2.VideoCapture('../../WPI_vdo.mov')
            self.out = cv2.VideoWriter('output.avi',-1, 20.0, (640,480)) 
Example 19
Project: face-attendance-machine   Author: matiji66   File: app_utils.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, src, width, height):
        # initialize the video camera stream and read the first frame
        # from the stream
        self.stream = cv2.VideoCapture(src)
        self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
        (self.grabbed, self.frame) = self.stream.read()

        # initialize the variable used to indicate if the thread should
        # be stopped
        self.stopped = False 
Example 20
Project: pruning_yolov3   Author: zbyuan   File: datasets.py    License: GNU General Public License v3.0 5 votes vote down vote up
def new_video(self, path):
        self.frame = 0
        self.cap = cv2.VideoCapture(path)
        self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) 
Example 21
Project: pruning_yolov3   Author: zbyuan   File: datasets.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, sources='streams.txt', img_size=416, half=False):
        self.mode = 'images'
        self.img_size = img_size
        self.half = half  # half precision fp16 images

        if os.path.isfile(sources):
            with open(sources, 'r') as f:
                sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
        else:
            sources = [sources]

        n = len(sources)
        self.imgs = [None] * n
        self.sources = sources
        for i, s in enumerate(sources):
            # Start the thread to read frames from the video stream
            print('%g/%g: %s... ' % (i + 1, n, s), end='')
            cap = cv2.VideoCapture(0 if s == '0' else s)
            assert cap.isOpened(), 'Failed to open %s' % s
            w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fps = cap.get(cv2.CAP_PROP_FPS) % 100
            _, self.imgs[i] = cap.read()  # guarantee first frame
            thread = Thread(target=self.update, args=([i, cap]), daemon=True)
            print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
            thread.start()
        print('')  # newline 
Example 22
Project: display_ocr   Author: arturaugusto   File: digital_display_ocr.py    License: GNU General Public License v2.0 5 votes vote down vote up
def take_picture(should_save=False, d_id=0):
  cam = cv2.VideoCapture(d_id)
  s, img = cam.read()
  if s:
    if should_save:
      cv2.imwrite('ocr.jpg',img)
    print "picture taken"
  return img 
Example 23
Project: OpenCV-Computer-Vision-Projects-with-Python   Author: PacktPublishing   File: chapter5.py    License: MIT License 5 votes vote down vote up
def main(video_file='soccer.avi', roi=((140, 100), (500, 600))):
    # open video file
    if path.isfile(video_file):
        video = cv2.VideoCapture(video_file)
    else:
        print 'File "' + video_file + '" does not exist.'
        raise SystemExit

    # initialize tracker
    mot = MultipleObjectsTracker()

    while True:
        # grab next frame
        success, img = video.read()
        if success:
            if roi:
                # original video is too big: grab some meaningful ROI
                img = img[roi[0][0]:roi[1][0], roi[0][1]:roi[1][1]]

            # generate saliency map
            sal = Saliency(img, use_numpy_fft=False, gauss_kernel=(3, 3))

            cv2.imshow('original', img)
            cv2.imshow('saliency', sal.get_saliency_map())
            cv2.imshow('objects', sal.get_proto_objects_map(use_otsu=False))
            cv2.imshow('tracker', mot.advance_frame(img,
                       sal.get_proto_objects_map(use_otsu=False)))

            if cv2.waitKey(100) & 0xFF == ord('q'):
                break
        else:
            break 
Example 24
Project: OpenCV-Computer-Vision-Projects-with-Python   Author: PacktPublishing   File: gui.py    License: MIT License 5 votes vote down vote up
def __init__(self, parent, id, title, capture, fps=10):
        """Class constructor

            This method initializes all necessary parameters and generates a
            basic GUI layout that can then be modified by
            self.init_custom_layout() and self.create_custom_layout().

            :param parent: A wx.Frame parent (often Null). If it is non-Null,
                the frame will be minimized when its parent is minimized and
                restored when it is restored.
            :param id: The window identifier. Value -1 indicates default value.
            :param title: The caption to be displayed on the frame's title bar.
            :param capture: A cv2.VideoCapture object to be used as camera
                feed.
            :param fps: frames per second at which to display camera feed
        """
        self.capture = capture
        self.fps = fps

        # determine window size and init wx.Frame
        success, frame = self._acquire_frame()
        if not success:
            print "Could not acquire frame from camera."
            raise SystemExit

        self.imgHeight, self.imgWidth = frame.shape[:2]
        self.bmp = wx.BitmapFromBuffer(self.imgWidth, self.imgHeight, frame)
        wx.Frame.__init__(self, parent, id, title,
                          size=(self.imgWidth, self.imgHeight))

        self._init_base_layout()
        self._create_base_layout() 
Example 25
Project: tools_python   Author: xingag   File: video_cut_cv2.py    License: Apache License 2.0 5 votes vote down vote up
def compound_bgm(video_path, bgm_path):
    """
    通过视频、BGM 合成一段视频
    :param video_path: 视频路径
    :param bgm_path: BGM路径
    :return:
    """
    # $ ffmpeg -i 2_003_014.mp4 -vn -y -acodec copy 3.aac
    # 1.提前temp.mp4这个视频的BGM,文件结果为:temp.aac
    # os.system('ffmpeg -i temp.mp4 -vn -y -acodec copy temp.aac')

    # 2.获取视频的长度
    cap = cv2.VideoCapture(video_path)
    # 帧率
    fps = cap.get(cv2.CAP_PROP_FPS)
    # 总帧数
    frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    # 总时长-秒,这里做取整操作 【浮点类型】
    time_count = math.floor(frame_count / fps)

    print('帧率:%f,总帧数:%d' % (fps, frame_count))
    print(time_count)

    # 3.截取音频
    # 为了简单,这里一般不会超过一分钟
    bgm_temp_path = get_temp_path(bgm_path, 'temp_new')
    os.system('ffmpeg -i %s -ss 00:00:00 -t 00:00:%d -acodec copy %s' % (bgm_path, time_count, bgm_temp_path))

    # 3.1 删除源音频并重命令当前文件
    os.remove(bgm_path)
    os.rename(bgm_temp_path, bgm_path)

    # 4.视频、音频合二为一
    video_temp_path = get_temp_path(video_path, 'temp')
    os.system('ffmpeg -i %s  -i %s  -vcodec copy -acodec copy %s' % (video_path, bgm_path, video_temp_path))
    os.remove(video_path)
    os.rename(video_temp_path, video_path)

    print('音视频合成完成~') 
Example 26
Project: tools_python   Author: xingag   File: video_cut_cv2.py    License: Apache License 2.0 5 votes vote down vote up
def add_water_mask(video_path, mask_word):
    """
    给视频增加水印
    :param video_part3: 视频源
    :param mask_word: 水印文字
    :return:
    """
    cap = cv2.VideoCapture(video_path)
    fps = cap.get(cv2.CAP_PROP_FPS)

    # 保证帧率不变
    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    video_temp_path = get_temp_path(video_path, 'temp')
    video_writer = cv2.VideoWriter(video_temp_path, fourcc, fps, img_size)

    ret, frame = cap.read()

    while ret:
        # 文字在图中的坐标(注意:这里的坐标原点是图片左上角)
        x, y = img_size[0] - 200, img_size[1] - 50

        cv2.putText(img=frame, text=mask_word,
                    org=(x, y), fontFace=cv2.FONT_HERSHEY_COMPLEX_SMALL,
                    fontScale=1, color=(255, 255, 255))

        video_writer.write(frame)
        ret, frame = cap.read()

    # 删除源文件,并重命名临时文件
    os.remove(video_path)
    os.rename(video_temp_path, video_path)

    print('水印添加完成~')
    video_writer.release()
    cap.release() 
Example 27
Project: HardRLWithYoutube   Author: MaxSobolMark   File: train_featurizer.py    License: MIT License 5 votes vote down vote up
def generate_dataset(videos_path, framerate, width, height):
    """Converts videos from specified path to ndarrays of shape [numberOfVideos, -1, width, height, 1]

    Args:
        videos_path: Inside the 'videos/' directory, the name of the subdirectory for videos.
        framerate: The desired framerate of the dataset.
        width: The width we will resize the videos to.
        height: The height we will resize the videos to.

    Returns:
        The dataset with the new size and framerate, and converted to monochromatic.

    """
    dataset = []
    video_index = 0
    for playlist in os.listdir('videos/' + videos_path):
        for video_name in os.listdir('videos/{}/{}'.format(videos_path, playlist)):
            dataset.append([])
            print('Video: {}'.format(video_name))
            video = cv2.VideoCapture('videos/{}/{}/{}'.format(videos_path, playlist, video_name))
            while video.isOpened():
                success, frame = video.read()
                if success:
                    frame = preprocess_image(frame, width, height)
                    dataset[video_index].append(frame)

                    frame_index = video.get(cv2.CAP_PROP_POS_FRAMES)
                    video_framerate = video.get(cv2.CAP_PROP_FPS)
                    video.set(cv2.CAP_PROP_POS_FRAMES, frame_index + video_framerate // framerate)
                    last_frame_index = video.get(cv2.CAP_PROP_FRAME_COUNT)
                    if frame_index >= last_frame_index:
                        # Video is over
                        break

                    
                else:
                    break
            dataset[video_index] = np.reshape(dataset[video_index], (-1, width, height, 1))
            video_index += 1
    return dataset 
Example 28
Project: object_detector_app   Author: datitran   File: app_utils.py    License: MIT License 5 votes vote down vote up
def __init__(self, src, width, height):
		# initialize the video camera stream and read the first frame
		# from the stream
		self.stream = cv2.VideoCapture(src)
		self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
		self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
		(self.grabbed, self.frame) = self.stream.read()

		# initialize the variable used to indicate if the thread should
		# be stopped
		self.stopped = False 
Example 29
Project: EdgeRealtimeVideoAnalytics   Author: RedisGears   File: capture.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, infile=0, fps=30.0):
        self.isFile = not str(infile).isdecimal()
        self.ts = time.time()
        self.infile = infile
        self.cam = cv2.VideoCapture(self.infile)
        if not self.isFile:
            self.cam.set(cv2.CAP_PROP_FPS, fps)
            self.fps = fps
            # TODO: some cameras don't respect the fps directive
            self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
            self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
        else:
            self.fps = self.cam.get(cv2.CAP_PROP_FPS)
            self.sma = SimpleMovingAverage(value=0.1, count=19) 
Example 30
Project: bjtu_BinocularCameraRecord   Author: anonymouslycn   File: Main.py    License: MIT License 5 votes vote down vote up
def loop1(self,text,w=1280,h=720):
        cap = cv2.VideoCapture(int(text))
        cap.set(6 ,cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') );
        global capnum1
        capnum1 = int(text)
        cap.set(3,w);
        cap.set(4,h);
        global update1
        update1 = 1
        global shotmark1
        


        while (update1 == 1):
            ret, frame = cap.read() 
            if shotmark1 == 1:
                fn = self.lineEdit.text()
                name = "photo/1_"+fn + "video.jpg"
                if os.path.exists(name):
                    name = "photo/1_"+fn + "video"+str(int(time.time()))+".jpg"
                cv2.imwrite(name, frame)
                shotmark1 = 0
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.original1_image.updateImage(frame)
        # cap.release()
        cv_img_rgb = np.zeros((700,700,3))
        self.original1_image.updateImage(cv_img_rgb)