Python cv2.VideoCapture() Examples

The following are code examples for showing how to use cv2.VideoCapture(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don't like. You can also save this page to your account.

Example 1
Project: CIKM2017   Author: MovieFIB   File: extract_feature.py    (license) View Source Project 11 votes vote down vote up
def load_videos(video_file):
    # print "load_videos"
    capture = cv2.VideoCapture(video_file)

    read_flag, frame = capture.read()
    vid_frames = []
    i = 1
    # print read_flag

    while (read_flag):
        # print i
        if i % 10 == 0:
            vid_frames.append(frame)
            #                print frame.shape
        read_flag, frame = capture.read()
        i += 1
    vid_frames = np.asarray(vid_frames, dtype='uint8')[:-1]
    # print 'vid shape'
    # print vid_frames.shape
    capture.release()
    print i
    return vid_frames 
Example 2
Project: Mini-Projects   Author: gaborvecsei   File: Capture_Img_To_Drive.py    (license) View Source Project 10 votes vote down vote up
def CaptureImage():
	imageName = 'DontCare.jpg' #Just a random string
	cap = cv2.VideoCapture(0)
	while(True):
	    # Capture frame-by-frame
	    ret, frame = cap.read()

	    #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
	    rgbImage = frame #For capture the image in RGB color space

	    # Display the resulting frame
	    cv2.imshow('Webcam',rgbImage)
	    #Wait to press 'q' key for capturing
	    if cv2.waitKey(1) & 0xFF == ord('q'):
	        #Set the image name to the date it was captured
	        imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
	        #Save the image
	        cv2.imwrite(imageName, rgbImage)
	        break
	# When everything done, release the capture
	cap.release()
	cv2.destroyAllWindows()
	#Returns the captured image's name
	return imageName 
Example 3
Project: motorized_zoom_lens   Author: Kurokesu   File: main_5-50.py    (license) View Source Project 9 votes vote down vote up
def grab(cam, queue, width, height, fps):
    global running
    capture = cv2.VideoCapture(cam)
    capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    capture.set(cv2.CAP_PROP_FPS, fps)

    while(running):
        frame = {}
        capture.grab()
        retval, img = capture.retrieve(0)
        frame["img"] = img
        frame["1"] = config["1"]
        frame["2"] = config["2"]

        blur = get_blur(img, 0.05)
        frame["blur"] = blur

        if queue.qsize() < 10:
            queue.put(frame)
        else:
            print(queue.qsize()) 
Example 4
Project: Motion-Sensor   Author: Paco1994   File: recorder.py    (license) View Source Project 8 votes vote down vote up
def video (seconds, frameRate):
    cap = cv2.VideoCapture(0)
    if(not cap.isOpened()):
        return "error"

    # Define the codec and create VideoWriter object
    fourcc = cv2.cv.CV_FOURCC(*'XVID')
    name = "media/video/" + time.strftime("%d-%m-%Y_%X")+".avi"
    out = cv2.VideoWriter(name, fourcc, frameRate, (640,480))
    program_starts = time.time()
    result = subprocess.Popen(["ffprobe", name], stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell=True)
    nFrames=0
    while(nFrames<seconds*frameRate):
        ret, frame = cap.read()
        if ret==True:
            out.write(frame)
            nFrames += 1
        else:
            break
    cap.release()
    return name 
Example 5
Project: Enchain   Author: Zhehua-Hu   File: videoSlice.py    (license) View Source Project 8 votes vote down vote up
def showVideoInfo(video_path):
    try:
        vhandle = cv2.VideoCapture(video_path)  # For read Chinease-name video
        fps = vhandle.get(cv2.CAP_PROP_FPS)
        count = vhandle.get(cv2.CAP_PROP_FRAME_COUNT)
        size = (int(vhandle.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(vhandle.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        ret, firstframe = vhandle.read()
        if ret:
            print("FPS: %.2f" % fps)
            print("COUNT: %.2f" % count)
            print("WIDTH: %d" % size[0])
            print("HEIGHT: %d" % size[1])
            return vhandle, fps, size, firstframe
        else:
            print("Video can not read!")
    except:
        "Error in showVideoInfo" 
Example 6
Project: FaceSwap   Author: Aravind-Suresh   File: main.py    (MIT License) View Source Project 7 votes vote down vote up
def videoize(func, args, src = 0, win_name = "Cam", delim_wait = 1, delim_key = 27):
    cap = cv2.VideoCapture(src)
    while(1):
        ret, frame = cap.read()
        # To speed up processing; Almost real-time on my PC
        frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5)
        frame = cv2.flip(frame, 1)
        out = func(frame, args)
        if out is None:
            continue
        out = cv2.resize(out, dsize=None, fx=1.4, fy=1.4)
        cv2.imshow(win_name, out)
        cv2.moveWindow(win_name, (s_w - out.shape[1])/2, (s_h - out.shape[0])/2)
        k = cv2.waitKey(delim_wait)

        if k == delim_key:
            cv2.destroyAllWindows()
            cap.release()
            return 
Example 7
Project: MultiObjectTracker   Author: alokwhitewolf   File: tracker.py    (license) View Source Project 7 votes vote down vote up
def get_fps(source, Videolength):
	cap = cv2.VideoCapture(source)
	frame_counter = 0
	print "Calculating Frames per second . . . "

	while (True):
		# Capture frame-by-frame

		ret, frame = cap.read()
		if not ret:
			break

		frame_counter += 1
		
	cap.release()
	cv2.destroyAllWindows()
	fps = float(frame_counter/Videolength)
	print "\nFPS is " +str(fps)+"\n"

	return fps

#Algorithm to check intersection of line segments
#It checks iteratively intersection between a pair of points(Last location of the vehicle) and pairs of points of another List(Pedestrian path) 
Example 8
Project: PyIntroduction   Author: tody411   File: video_capture.py    (license) View Source Project 7 votes vote down vote up
def cvCaptureVideo():
    capture = cv2.VideoCapture(0)

    if capture.isOpened() is False:
        raise("IO Error")

    cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)

    while True:
        ret, image = capture.read()

        if ret == False:
            continue

        cv2.imshow("Capture", image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    capture.release()
    cv2.destroyAllWindows()


# Matplot???Web???????????? 
Example 9
Project: Video-Classification-Action-Recognition   Author: qijiezhao   File: build_of.py    (license) View Source Project 7 votes vote down vote up
def dump_frames(vid_path):
    import cv2
    video = cv2.VideoCapture(vid_path)
    vid_name = vid_path.split('/')[-1].split('.')[0]
    out_full_path = os.path.join(out_path, vid_name)

    fcount = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    try:
        os.mkdir(out_full_path)
    except OSError:
        pass
    file_list = []
    for i in xrange(fcount):
        ret, frame = video.read()
        assert ret
        cv2.imwrite('{}/{:06d}.jpg'.format(out_full_path, i), frame)
        access_path = '{}/{:06d}.jpg'.format(vid_name, i)
        file_list.append(access_path)
    print '{} done'.format(vid_name)
    sys.stdout.flush()
    return file_list 
Example 10
Project: lan-ichat   Author: Forec   File: vchat.py    (license) View Source Project 7 votes vote down vote up
def __init__(self ,ip, port, showme, level, version):
        threading.Thread.__init__(self)
        self.setDaemon(True)
        self.ADDR = (ip, port)
        self.showme = showme
        if level == 0:
            self.interval = 0
        elif level == 1:
            self.interval = 1
        elif level == 2:
            self.interval = 2
        else:
            self.interval = 3
        self.fx = 1 / (self.interval + 1)
        if self.fx < 0.3:
            self.fx = 0.3
        if version == 4:
            self.sock = socket(AF_INET, SOCK_STREAM)
        else:
            self.sock = socket(AF_INET6, SOCK_STREAM)
        self.cap = cv2.VideoCapture(0)
        print("VEDIO client starts...") 
Example 11
Project: pycreate2   Author: MomsFriendlyRobotCompany   File: test.py    (license) View Source Project 7 votes vote down vote up
def write():
    os.remove(filename)
    cap = cv2.VideoCapture(0)
    db = shelve.open(filename)
    imgs = []
    data = range(100)

    for i in range(100):
        ret, frame = cap.read()

        if ret:
            # jpg = frame  # 29 MB
            # jpg = cv2.imencode('.jpg', frame)  # make much smaller (1.9MB), otherwise 29MB
            jpg = cv2.imencode('.jpg', frame)[1].tostring()  # no bennefit with doing string (1.9MB)
            imgs.append(jpg)
            print('frame[{}] {}'.format(i, frame.shape))

        time.sleep(0.03)

    db['imgs'] = imgs
    db['data'] = data
    cap.release()
    db.close() 
Example 12
Project: 3DCNN   Author: bityangke   File: videoto3d.py    (license) View Source Project 7 votes vote down vote up
def video3d(self, filename, color=False, skip=True):
        cap = cv2.VideoCapture(filename)
        nframe = cap.get(cv2.CAP_PROP_FRAME_COUNT)
        if skip:
            frames = [x * nframe / self.depth for x in range(self.depth)]
        else:
            frames = [x for x in range(self.depth)]
        framearray = []

        for i in range(self.depth):
            cap.set(cv2.CAP_PROP_POS_FRAMES, frames[i])
            ret, frame = cap.read()
            frame = cv2.resize(frame, (self.height, self.width))
            if color:
                framearray.append(frame)
            else:
                framearray.append(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))

        cap.release()
        return np.array(framearray) 
Example 13
Project: Simple-stream-Kafka   Author: amwaleh   File: producer.py    (license) View Source Project 7 votes vote down vote up
def video_emitter(video):
    # Open the video
    video = cv2.VideoCapture(video)
    print(' emitting.....')

    # read the file
    while (video.isOpened):
        # read the image in each frame
        success, image = video.read()

        # check if the file has read the end
        if not success:
            break

        # convert the image png
        ret, jpeg = cv2.imencode('.png', image)
        # Convert the image to bytes and send to kafka
        producer.send_messages(topic, jpeg.tobytes())
        # To reduce CPU usage create sleep time of 0.2sec
        time.sleep(0.2)
    # clear the capture
    video.release()
    print('done emitting') 
Example 14
Project: yolo_tensorflow   Author: hizhangp   File: test.py    (MIT License) View Source Project 6 votes vote down vote up
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', default="YOLO_small.ckpt", type=str)
    parser.add_argument('--weight_dir', default='weights', type=str)
    parser.add_argument('--data_dir', default="data", type=str)
    parser.add_argument('--gpu', default='', type=str)
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    yolo = YOLONet(False)
    weight_file = os.path.join(args.data_dir, args.weight_dir, args.weights)
    detector = Detector(yolo, weight_file)

    # detect from camera
    # cap = cv2.VideoCapture(-1)
    # detector.camera_detector(cap)

    # detect from image file
    imname = 'test/person.jpg'
    detector.image_detector(imname) 
Example 15
Project: MultiObjectTracker   Author: alokwhitewolf   File: main.py    (license) View Source Project 6 votes vote down vote up
def get_fps(source, Videolength):
	cap = cv2.VideoCapture("docs/video/traffic2")
	frame_counter = 0
	print "Calculating Frames per second . . . "

	while (True):
		# Capture frame-by-frame

		ret, frame = cap.read()
		if not ret:
			break

		frame_counter += 1

	cap.release()
	cv2.destroyAllWindows()
	fps = float(frame_counter/Videolength)
	print "\nFPS is " +str(fps)+"\n"

	return fps

#Algorithm to check intersection of line segments
#It checks iteratively intersection between a pair of points(Last location of the vehicle) and pairs of points of another List(Pedestrian path) 
Example 16
Project: temporal-segment-networks   Author: yjxiong   File: build_of.py    (license) View Source Project 6 votes vote down vote up
def dump_frames(vid_path):
    import cv2
    video = cv2.VideoCapture(vid_path)
    vid_name = vid_path.split('/')[-1].split('.')[0]
    out_full_path = os.path.join(out_path, vid_name)

    fcount = int(video.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    try:
        os.mkdir(out_full_path)
    except OSError:
        pass
    file_list = []
    for i in xrange(fcount):
        ret, frame = video.read()
        assert ret
        cv2.imwrite('{}/{:06d}.jpg'.format(out_full_path, i), frame)
        access_path = '{}/{:06d}.jpg'.format(vid_name, i)
        file_list.append(access_path)
    print '{} done'.format(vid_name)
    sys.stdout.flush()
    return file_list 
Example 17
Project: video_labeler   Author: hahnyuan   File: viewer.py    (license) View Source Project 6 votes vote down vote up
def __init__(self,labels,video_file,box_saver,border=30):
        """
        the GUI Labeler
        :param labels: the labels name string list
        :param video_file: the video file path
        :param border: the border of the center clip filed (white line around the video)
        :param save_dir: label result save path
        :param save_im: if write every cropped image to each label directory
        """
        self.cam = cv2.VideoCapture(video_file)
        self.video_stat = VideoStat(border)
        self.label_stat = LabelStat(labels)
        self.labels=labels
        self.box_saver=box_saver
        cv2.setMouseCallback("video", self.video_click)
        cv2.setMouseCallback("label", self.label_click)
        self.run() 
Example 18
Project: lan-ichat   Author: Forec   File: vchat.py    (license) View Source Project 6 votes vote down vote up
def __init__(self ,ip, port, level, version):
        threading.Thread.__init__(self)
        self.setDaemon(True)
        self.ADDR = (ip, port)
        if level <= 3:
            self.interval = level
        else:
        	self.interval = 3
        self.fx = 1 / (self.interval + 1)
        if self.fx < 0.3:
            self.fx = 0.3
        if version == 4:
            self.sock = socket(AF_INET, SOCK_STREAM)
        else:
            self.sock = socket(AF_INET6, SOCK_STREAM)
        self.cap = cv2.VideoCapture(0) 
Example 19
Project: rekognition-video-utils   Author: awslabs   File: opencv_utils.py    (license) View Source Project 6 votes vote down vote up
def get_frames_every_x_sec(video, secs=1, fmt='opencv'):
    vidcap = cv2.VideoCapture(video)
    fps = get_frame_rate(vidcap)
    inc = int(fps * secs)
    length = int(vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    count = 0
    while vidcap.isOpened() and count <= length:
        if count % inc == 0:
            success, image = vidcap.read()
            if success:
                cv2_im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                if fmt == 'PIL':
                    im = Image.fromarray(cv2_im)
                #elif fmt == 'DISK':
                    #cv2.imwrite(os.path.join(path_output_dir, '%d.png') % count, image)
                else:
                    im = cv2_im
                yield count, im 
            else:
                break
        count += 1
    cv2.destroyAllWindows()
    vidcap.release()

# image region: img = img[c1:c1+25,r1:r1+25] # roi = gray[y1:y2, x1:x2] 
Example 20
Project: Controller-Hand   Author: ardamavi   File: camera.py    (license) View Source Project 6 votes vote down vote up
def main():
    # Get Model:
    model_file = open('Data/Model/model.json', 'r')
    model = model_file.read()
    model_file.close()
    model = model_from_json(model)
    model.load_weights("Data/Model/weights.h5")

    # Get camera:
    cap = cv2.VideoCapture(0)

    # Open game in browser:
    open_game(browser='chrome', url='http://apps.thecodepost.org/trex/trex.html')

    while 1:
        # Get image from camera:
        ret, img = cap.read()
        Y = predict(model, img)
        if Y == 0:
            release()
        elif Y == 1:
            press()
    cap.release() 
Example 21
Project: faceNet_RealTime   Author: jack55436001   File: facevalid_real_time.py    (license) View Source Project 6 votes vote down vote up
def main(args):

	saveFace = None;
	cap = cv2.VideoCapture(0)
	face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
	while(True):
	    # Capture frame-by-frame
	    ret, frame = cap.read()
	    faces = face_cascade.detectMultiScale(frame, 1.3, 5)
	    if len(faces) > 0:
	    	saveFace = frame
	    	break;
	    # Display the resulting frame
	    cv2.imshow('frame',frame)
	    if cv2.waitKey(1) & 0xFF == ord('q'):
	        break

	# When everything done, release the capture
	cap.release()
	cv2.destroyAllWindows()
	cv2.imwrite('C:/Users/USER/Desktop/facenet-RealTime/src/face_data/saveFace.jpg',frame)
	
	mypath = 'C:/Users/USER/Desktop/facenet-RealTime/src/face_data'
	onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
	myImage = []
	for file in onlyfiles:
		isImage = None
		file = mypath + '/' + file
		isImage = imghdr.what(file)
		if isImage != None:
			myImage.append(file)

	#begin facenet
	cp.main(args,myImage); 
Example 22
Project: serbian-alpr   Author: golubaca   File: IPStream.py    (license) View Source Project 6 votes vote down vote up
def start(self):
        """
        Create stream object.
        :return: stream
        """

        if self.protocol is "image":
            image = cv2.imread(self.ip_address, 1)
            plate = self.analize_plate.proccess(
                cv2.imencode('.jpg', image)[1].tostring())
            if plate:
                print plate['results']
        else:
            stream = cv2.VideoCapture(self.url)

            self.proccess(stream)
            # return stream 
Example 23
Project: party-pi   Author: JustinShenk   File: play.py    (license) View Source Project 6 votes vote down vote up
def initialize_webcam(self):
        """ Initialize camera and screenwidth and screenheight.
        """
        device = 'raspberry' if 'raspberrypi' in os.uname() else None
        self.raspberry = True if 'raspberry' == device else False
        if self.piCam:
            camera = self.setup_picamera()
            self.piCamera = camera
            return

        cam = cv2.VideoCapture(0)
        frame = None
        while frame is None:
            try:
                _, frame = cam.read()
                # Update class variables.
                self.screenheight, self.screenwidth = frame.shape[:2]
                cam.set(3, self.screenwidth)
                cam.set(4, self.screenheight)
            except:
                pass
        self.cam = cam
        return 
Example 24
Project: motorized_zoom_lens   Author: Kurokesu   File: main_2.8-12.py    (license) View Source Project 6 votes vote down vote up
def grab(cam, queue, width, height, fps):
    global running
    capture = cv2.VideoCapture(cam)
    capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    capture.set(cv2.CAP_PROP_FPS, fps)

    while(running):
        frame = {}
        capture.grab()
        retval, img = capture.retrieve(0)
        frame["img"] = img
        frame["1"] = config["1"]
        frame["2"] = config["2"]

        blur = get_blur(img, 0.05)
        frame["blur"] = blur

        if queue.qsize() < 10:
            queue.put(frame)
        else:
            print(queue.qsize()) 
Example 25
Project: CanLauncher   Author: hazenhamather   File: imageProcTest.py    (license) View Source Project 6 votes vote down vote up
def main():
    # cap = cv2.VideoCapture(0)
    os.system("cd /dev")
    os.system("v4l2-ctl --set-fmt-video=width=1920,height=1080,pixelformat=1")
    os.system("cd ~/CanLauncher")

    os.system("config-pin -a P9_14 pwm")
    os.system("config-pin -a P9_21 pwm")
    os.system("config-pin -a P9_22 pwm")

    GPIO.setup(startButton, GPIO.IN)
    GPIO.setup(confirmButton, GPIO.IN)
    # GPIO.setup(launchButton, GPIO.IN)

    time.sleep(0.5)

    boom() 
Example 26
Project: SOLAMS   Author: aishmittal   File: register.py    (license) View Source Project 6 votes vote down vote up
def startCapture(self):
        global new_user_added
        if new_user_added == True:

            self.initDir()
            self.capturing = True
            self.capture = cv2.VideoCapture(camera_port)
            self.capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.video_size.width())
            self.capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.video_size.height())

            self.timer = QtCore.QTimer()
            self.timer.timeout.connect(self.display_video_stream)
            self.timer.start(30)

        else:
            self.messageLbl.setText('Warning: First create new user') 
Example 27
Project: meleedb-segment   Author: sashahashi   File: MatchParser.py    (license) View Source Project 6 votes vote down vote up
def spaced_frames(parser, start=None, end=None, interval=None, num_samples=None, fuzz=4):
    if (interval is None and num_samples is None) or None not in (interval, num_samples):
        raise ValueError('exactly one of (interval, num_samples) must be set')

    vc = cv2.VideoCapture(parser.stream)
    video_length = vc.get(7) / vc.get(5)
    if not start or start < 0:
        start = 0
    if not end or end > video_length:
        end = video_length

    total_time = end - start

    if not num_samples:
        num_samples = total_time // interval

    for time in np.linspace(start, end, num=num_samples):
        time += randint(-1 * fuzz, fuzz) / vc.get(5)
        time = min([max([0, time]), total_time])
        vc.set(0, int(time * 1000))
        success, frame = vc.read()

        if success:
            yield (time, cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
    return 
Example 28
Project: c3d_ucf101_siamese_yilin   Author: fxing328   File: descriptor.py    (license) View Source Project 6 votes vote down vote up
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    # use cv to get frame number is not correct
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    
    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
	ret, frame_1 = video_1.read()
        if frame_1 is not None:
 	    tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
	    print count	
	else:
	    break
    pdb.set_trace()
    tensor = tensor_1[:,:count,:,:] 
    return tensor 
Example 29
Project: c3d_ucf101_siamese_yilin   Author: fxing328   File: mul_decriptor.py    (license) View Source Project 6 votes vote down vote up
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    
    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
	ret, frame_1 = video_1.read()
        if frame_1 is not None:
 	    tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
	    print count	
	else:
	    break
    return tensor_1 
Example 30
Project: c3d_ucf101_siamese_yilin   Author: fxing328   File: pair_evaluation_ucf.py    (license) View Source Project 6 votes vote down vote up
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    
    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
	ret, frame_1 = video_1.read()
        if frame_1 is not None:
 	    tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
	    print count
	else:
	    break
    tensor = tensor_1[:,:count,:,:]	
    return tensor 
Example 31
Project: c3d_ucf101_siamese_yilin   Author: fxing328   File: mul_decriptor1.py    (license) View Source Project 6 votes vote down vote up
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    
    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
	ret, frame_1 = video_1.read()
        if frame_1 is not None:
 	    tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
	    print count	
	else:
	    break
    return tensor_1 
Example 32
Project: c3d_ucf101_siamese_yilin   Author: fxing328   File: descriptor_ucf.py    (license) View Source Project 6 votes vote down vote up
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    
    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
	ret, frame_1 = video_1.read()
        if frame_1 is not None:
 	    tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
	    print count	
	else:
	    break
    tensor = tensor_1[:,:count,:,:]
    return tensor 
Example 33
Project: c3d_ucf101_siamese_yilin   Author: fxing328   File: pair_evaluation.py    (license) View Source Project 6 votes vote down vote up
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    
    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
	ret, frame_1 = video_1.read()
        if frame_1 is not None:
 	    tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
	    print count
	else:
	    break
    tensor = tensor_1[:,:count,:,:]	
    return tensor 
Example 34
Project: single_shot_multibox_detector   Author: oarriaga   File: video_demo.py    (license) View Source Project 6 votes vote down vote up
def start_video(self, model):
        camera = cv2.VideoCapture(0)
        while True:
            frame = camera.read()[1]
            if frame is None:
                continue
            image_array = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image_array = cv2.resize(image_array, (300, 300))
            image_array = substract_mean(image_array)
            image_array = np.expand_dims(image_array, 0)
            predictions = model.predict(image_array)
            detections = detect(predictions, self.prior_boxes)
            plot_detections(detections, frame, 0.6,
                            self.arg_to_class, self.colors)
            cv2.imshow('webcam', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        camera.release()
        cv2.destroyAllWindows() 
Example 35
Project: robotics1project   Author: pchorak   File: AR_Camera.py    (license) View Source Project 6 votes vote down vote up
def initialize(self):
        # Initialize video capture
        self.cap = cv2.VideoCapture(self.ID)

        frameRate = 20.0
        frameWidth = 640
        frameHeight = 480

        if cv2.__version__[0] == "2":
            # Latest Stable Version (2.x)
            self.cap.set(cv2.cv.CV_CAP_PROP_FPS, frameRate)
            self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, frameWidth)
            self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, frameHeight)
        else:
            # version 3.1.0 (BETA)
            self.cap.set(cv2.CAP_PROP_FPS, frameRate)
            self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, frameWidth)
            self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frameHeight)

        self.thresh = 0.4
        self.thresh_img = np.zeros((frameHeight, frameWidth, 3), dtype=np.uint8) 
Example 36
Project: nnp   Author: dribnet   File: fitting.py    (license) View Source Project 6 votes vote down vote up
def do_key_press(symbol, modifiers):
    global cur_vector
    print("SO: {}".format(symbol))
    if(symbol == key.R):
        if theApp.use_camera:
            theApp.set_camera_recording(not theApp.camera_recording)
    if(symbol == key.T):
        theApp.show_camera = not theApp.show_camera
    elif(symbol == key.SPACE):
        print("SPACEBAR")
        snapshot(None);
    elif(symbol == key.ESCAPE):
        print("ESCAPE")
        cv2.destroyAllWindows()
        if theApp.use_camera:
            cv2.VideoCapture(0).release()
        sys.exit(0) 
Example 37
Project: FindYourCandy   Author: BrainPad   File: capture.py    (license) View Source Project 6 votes vote down vote up
def capture(self):
        capture = cv2.VideoCapture(self.device)
        capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
        capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)

        if not capture.isOpened():
            raise Exception('Failed to open camera capture.')

        for _ in range(0, 10):
            ret, img = capture.read()
            if not ret or self._blur_index(img) < self.blur_thres:
                time.sleep(0.5)
                continue
            capture.release()
            return img

        capture.release()
        raise Exception('Failed to capture image.') 
Example 38
Project: Face-recognition-test   Author: jiangwei1995910   File: Read.py    (license) View Source Project 6 votes vote down vote up
def start():
    cap = cv2.VideoCapture(0)
    while(True):
        # Capture frame-by-frame
        ret, frame = cap.read()

        FaceArray=getFaceArray(frame)
        img2=frame
        for r in FaceArray :
            img2=cv2.rectangle(frame, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 3)
            img3 = frame[r[1]:r[3], r[0]:r[2]]  # ?????????????
            feature=Tools.get_feature(img3)
            name=readFace(feature)
            font=cv2.FONT_HERSHEY_SIMPLEX
            img2= cv2.putText(img2,name,(r[1],r[3]), font, 1,(255,255,255),2)

        cv2.imshow('frame',img2)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break 
Example 39
Project: commercials_project   Author: BryceLuna   File: process_frames.py    (license) View Source Project 6 votes vote down vote up
def get_frames(file_str):
    '''
    string => None
    This function takes in the source of a video, samples from
    the video and writes those samples to a folder
    '''
    vid = cv2.VideoCapture(file_str)

    if vid.isOpened():
        frame_count = int(vid.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
        step_size = int(1/float(pct_frames))

        for count in xrange(0,frame_count,step_size):
            w_path = write_path(file_str,count)
            vid.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,count)
            ret, frame = vid.read()
            cv2.imwrite(w_path,frame)
            count+=step_size
        vid.release()
    else:
        print 'unable to open file: {}'.format(file_str) 
Example 40
Project: commercials_project   Author: BryceLuna   File: load_data_2.py    (license) View Source Project 6 votes vote down vote up
def get_frames(file_str):
        '''
        string => None
        This function takes in the source of a video, samples from
        the video and writes those samples to a folder
        '''
        vid = cv2.VideoCapture(file_str)

        if vid.isOpened():
            frame_count = int(vid.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            step_size = int(1/float(pct_frames))

            for count in xrange(0,frame_count,step_size):
                w_path = write_path(file_str,count)
                vid.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,count)
                ret, frame = vid.read()
                count+=step_size
                return frame
            vid.release()
        else:
            print 'unable to open file: {}'.format(file_str) 
Example 41
Project: commercials_project   Author: BryceLuna   File: predict_frames.py    (license) View Source Project 6 votes vote down vote up
def read_video(self):

        vid = cv2.VideoCapture(self.video_path)
        
        if vid.isOpened():
        
            frame_count = int(vid.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
            self.predictions = np.zeros((frame_count,100,100,3))#need to know frame size
            for count in xrange(frame_count):
                ret,frame = vid.read() #probably don't want to get every frame
                processed_frame = self.process_frame(frame)
                self.predictions[count] = processed_frame
            vid.release()
        else:
            print 'unable to open file: {}'.format(file_str)


    #maybe should separate this algo, or somehow automatically detect what the model accepts
    #should probably convert to float32, divide by 255. 
Example 42
Project: MyoSEMG   Author: LuffyDai   File: Camera.py    (license) View Source Project 6 votes vote down vote up
def __init__(self, name, ui=myo_emg.Ui_MainWindow(), cap=capture.capture()):
        super(VideoThread, self).__init__()
        self.flag = True
        self.start_flag = False
        self.support_flag = True
        self.name = name
        self.cap = cap
        self.ui = ui
        self.out = None
        self.stop_signal.connect(self.stop_play)
        self.image_siganl.connect(self.saving_video)
        self.start_signal.connect(self.start_capture)
        self.cap.path_signal.connect(self.save_video)
        if self.name == "Video":
            self.videoLabel = ui.Video
            self.camera = cv2.VideoCapture("instruction.mp4")
            self.fps = self.camera.get(cv2.CAP_PROP_FPS)
        elif self.name == "Camera":
            self.videoLabel = ui.Camera
            self.camera = cv2.VideoCapture(camera_port) 
Example 43
Project: LogoDetectionInVideo   Author: nmemme   File: svm_video.py    (license) View Source Project 6 votes vote down vote up
def test(path):
	cap = cv2.VideoCapture(path_video)
	testing=[]
	while(True):
		ret, frame = cap.read()
		res=cv2.resize(frame,(250,250))
		
		gray_image = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
		xarr=np.squeeze(np.array(gray_image).astype(np.float32))
		m,v=cv2.PCACompute(xarr)
		arr= np.array(v)
		flat_arr= arr.ravel()
		testing.append(flat_arr)
		#cv2.imshow('frame', frame)
		#if cv2.waitKey(1) & 0xFF == ord("q"):
         #   break
	#cap.release()
    #cv2.destroyAllWindows()
	logos=svm.predict(testing)
	uniqlogos=list(set(logos))
	for i in uniqlogos:
		print(i) 
Example 44
Project: self-supervision   Author: gustavla   File: video_avi_flow_saliency.py    (license) View Source Project 6 votes vote down vote up
def extract_optical_flow(fn, times, frames=8, scale_factor=1.0):
    cap = cv2.VideoCapture(fn)
    n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    outputs = []
    if n_frames < frames * 2:
        return outputs

    def resize(im):
        if scale_factor != 1.0:
            new_size = (int(im.shape[1] * scale_factor), int(im.shape[0] * scale_factor))
            return cv2.resize(im, new_size, interpolation=cv2.INTER_LINEAR)
        else:
            return im

    for t in times:
        cap.set(cv2.CAP_PROP_POS_FRAMES, min(t * n_frames, n_frames - 1 - frames))
        ret, frame0 = cap.read()
        im0 = resize(cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY))
        mags = []
        middle_frame = frame0
        for f in range(frames - 1):
            ret, frame1 = cap.read()
            if f == frames // 2:
                middle_frame = frame1
            im1 = resize(cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY))
            flow = cv2.calcOpticalFlowFarneback(im0, im1,
                        None, 0.5, 3, 15, 3, 5, 1.2, 0)
            mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
            mags.append(mag)
            im0 = im1
        mag = np.sum(mags, 0)
        mag = mag.clip(min=0)
        norm_mag = (mag - mag.min()) / (mag.max() - mag.min() + 1e-5)
        x = middle_frame[..., ::-1].astype(np.float32) / 255
        outputs.append((x, norm_mag))
        return outputs 
Example 45
Project: face-recognition   Author: pratush07   File: face_training.py    (license) View Source Project 6 votes vote down vote up
def face_train_video(train_path,subject,max_train,stream):
    cap = cv2.VideoCapture(stream)
    ret=True
    ctr = 0
    # minimum 10 frames/images per video 
    while(ctr < max_train):
        # read till end of frames
        ret, img = cap.read()
        if not ret:
            break
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  
        cv2.imshow("Recognizing Face", img)
        cv2.waitKey(10)
        cv2.imwrite( join(train_path,subject)+ "." + str(ctr) +".jpg",img) # writes image  to disk
        ctr = ctr + 1
    cap.release()
    cv2.destroyAllWindows()

# predict live feed 
Example 46
Project: trackingtermites   Author: dmrib   File: video.py    (license) View Source Project 6 votes vote down vote up
def __init__(self, video_path, out_path, video_shape, filters, write_capture_info, subtractor='MOG'):
        """Initializer.

        Args:
            video_path (str): path to video file.
            out_path (str): output video destination path.
            video_shape (tuple): default size for frame redimensioning.
            filters (list): list of filter's names to apply in video source.
            write_info (bool): should write frame info when displaying.
            subtractor (str): name of background subtractor.
        Returns:
            None.
        """
        if video_path == '-1':
            video_path = int(video_path)
        self.source = cv2.VideoCapture(video_path)
        if not self.source.isOpened():
            print('Could not find video file.')
            sys.exit()

        if subtractor == 'MOG':
            self.subtractor = cv2.createBackgroundSubtractorMOG2()
        elif subtractor == 'GMG':
            self.subtractor = cv2.bgsegm.createBackgroundSubtractorGMG()

        self.current_frame = None
        self.playing = False
        self.video_shape = video_shape
        self.codec = cv2.VideoWriter_fourcc(*'XVID')
        self.out = cv2.VideoWriter('{}tracking-out.avi'.format(out_path),
                                   self.codec, 30.0, self.video_shape)
        self.filters = filters
        self.write_capture_info = write_capture_info
        self.start() 
Example 47
Project: pdc-project   Author: ealain   File: receiver.py    (license) View Source Project 6 votes vote down vote up
def receive():
    '''
    1. Locate screen
    2. Follow the variations of intensity in the screen
    '''
    sampling_period = 1/SAMPLING_FREQUENCY
    f = open(EXCHANGE_FILE_PATH, 'w')
    f.write('')
    x,y,w,h = screen_position()
    if((x,y,w,h) == (-1,-1,-1,-1)):
        print("Unable to detect screen")
        return
    cap = cv2.VideoCapture(0)
    values = []
    try:
        while(True):
            ret, frame = cap.read()
            sub_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)[y:y+h, x:x+w]
            values.append(str(np.mean(sub_frame)))
    except KeyboardInterrupt:
        pass
    f.write('\n'.join(values))
    f.close()

    decode() 
Example 48
Project: Facial-Recognition-Tool   Author: JeeveshN   File: face_recog.py    (license) View Source Project 6 votes vote down vote up
def recognize_video(face_recognizer):
	cap = cv2.VideoCapture(0)
	while True:
		if cap.grab():
			ref,image = cap.retrieve()
			image_grey=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
			faces = FACE_CASCADE.detectMultiScale(image_grey,scaleFactor=1.16,minNeighbors=5,minSize=(25,25),flags=0)
			for x,y,w,h in faces:
        			sub_img=image_grey[y:y+h,x:x+w]
        			img=image[y:y+h,x:x+w]
        			nbr,conf = face_recognizer.predict(sub_img)
        			cv2.rectangle(image,(x-5,y-5),(x+w+5,y+h+5),(255, 255,0),2)
        			cv2.putText(image,Data_list[nbr],(x,y-10), FONT, 0.5,(255,255,0),1)			
        		cv2.imshow("Faces Found",image)
		if (cv2.waitKey(1) & 0xFF == ord('q')) or (cv2.waitKey(1) & 0xFF == ord('Q')):
			break
	Datafile["Data"]=Data_list
    	Datafile.close()
	cap.release()
	cv2.destroyAllWindows() 
Example 49
Project: AVSR-Deep-Speech   Author: pandeydivesh15   File: video_stream.py    (GNU General Public License v2.0) View Source Project 5 votes vote down vote up
def __init__	(self, path, queue_size = 128):
		self.stream = cv2.VideoCapture(path)
		self.exit = False

		self.queue = Queue(maxsize=queue_size) 
Example 50
Project: robik   Author: RecunchoMaker   File: scanner.py    (GNU General Public License v2.0) View Source Project 5 votes vote down vote up
def __init__(self, cubo, settings):
        """TODO: to be defined1. """
        self.camera_id = DEFAULT_CAMERA
        self.cap = cv2.VideoCapture(self.camera_id)
        self.settings = settings

        self.cubo = cubo
        self.lastmov = 0
        self.lastmovtam = 1
        self.status = ""

        self.reset()