Python moviepy.editor.VideoFileClip() Examples

The following are 30 code examples of moviepy.editor.VideoFileClip(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module moviepy.editor , or try the search function .
Example #1
Source File: __init__.py    From aenet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def write_wav(self, video_obj, target_wav_file):
        '''
        Writes the audio stream of a video as a wav suitable as input to HTK

        ----------
        video_obj: a moviepy VideoFileClip

        target_wav_file: path to write the wav file to

        Returns
        -------
        None

        '''
        assert isinstance(video_obj, VideoFileClip), "video needs to be a instance of VideoFileClip"

        # Write audio stream of video to file in the desired format
        video_obj.audio.write_audiofile(target_wav_file, fps=16000,  # Set fps to 16k
                                        codec='pcm_s16le',
                                        ffmpeg_params=['-ac', '1'])  # Convert to mono 
Example #2
Source File: movies_utils.py    From mmvt with GNU General Public License v3.0 6 votes vote down vote up
def movie_in_movie(movie1_fname, movie2_fname, output_fname, pos=('right', 'bottom'), movie2_ratio=(1/3, 1/3),
                   margin=6, margin_color=(255, 255, 255), audio=False, fps=24, codec='libx264'):
    from moviepy import editor
    movie1 = editor.VideoFileClip(movie1_fname, audio=audio)
    w, h = movie1.size

    # THE PIANO FOOTAGE IS DOWNSIZED, HAS A WHITE MARGIN, IS
    # IN THE BOTTOM RIGHT CORNER
    movie2 = (editor.VideoFileClip(movie2_fname, audio=False).
             resize((w * movie2_ratio[0], h * movie2_ratio[1])).  # one third of the total screen
             margin(margin, color=margin_color).  # white margin
             margin(bottom=20, right=20, top=20, opacity=0).  # transparent
             set_pos(pos))

    final = editor.CompositeVideoClip([movie1, movie2])
    final.write_videofile(output_fname, fps=fps, codec=codec) 
Example #3
Source File: sum.py    From vidsum with GNU General Public License v3.0 6 votes vote down vote up
def create_summary(filename, regions):
    """ Join segments

    Args:
        filename(str): filename
        regions():
    Returns:
        VideoFileClip: joined subclips in segment

    """
    subclips = []
    input_video = VideoFileClip(filename)
    last_end = 0
    for (start, end) in regions:
        subclip = input_video.subclip(start, end)
        subclips.append(subclip)
        last_end = end
    return concatenate_videoclips(subclips) 
Example #4
Source File: movies_utils.py    From mmvt with GNU General Public License v3.0 6 votes vote down vote up
def add_text_to_movie(movie_fol, movie_name, out_movie_name, subs, fontsize=50, txt_color='red', font='Xolonium-Bold',
                      subs_delim=' ', bg_color=None):
    # Should install ImageMagick
    # For centos6: https://www.vultr.com/docs/install-imagemagick-on-centos-6
    # For centos7: http://helostore.com/blog/install-imagemagick-on-centos-7
    from moviepy import editor

    def annotate(clip, txt, txt_color=txt_color, fontsize=fontsize):
        """ Writes a text at the bottom of the clip. """
        # To make this code works the policy.xml should be editted
        #  identify -list policy
        # sudo gedit /etc/ImageMagick/policy.xml &
        # Put under comment the TEXT and LABEL lines
        txtclip = editor.TextClip(txt, fontsize=fontsize, color=txt_color)  # font=font
        # txtclip = txtclip.on_color((clip.w, txtclip.h + 6), color=(0, 0, 255), pos=(6, 'center'))
        cvc = editor.CompositeVideoClip([clip, txtclip.set_pos(('center', 'bottom'))])
        return cvc.set_duration(clip.duration)

    if isinstance(subs, str):
        subs = import_subs(movie_fol, subs, subs_delim)
    video = editor.VideoFileClip(op.join(movie_fol, movie_name))
    annotated_clips = [annotate(video.subclip(from_t, to_t), txt) for (from_t, to_t), txt in subs]
    final_clip = editor.concatenate_videoclips(annotated_clips)
    final_clip.write_videofile(op.join(movie_fol, out_movie_name)) 
Example #5
Source File: file_transfer.py    From mautrix-telegram with GNU Affero General Public License v3.0 6 votes vote down vote up
def _read_video_thumbnail(data: bytes, video_ext: str = "mp4", frame_ext: str = "png",
                          max_size: Tuple[int, int] = (1024, 720)) -> Tuple[bytes, int, int]:
    with tempfile.NamedTemporaryFile(prefix="mxtg_video_", suffix=f".{video_ext}") as file:
        # We don't have any way to read the video from memory, so save it to disk.
        file.write(data)

        # Read temp file and get frame
        frame = VideoFileClip(file.name).get_frame(0)

    # Convert to png and save to BytesIO
    image = Image.fromarray(frame).convert("RGBA")

    thumbnail_file = BytesIO()
    if max_size:
        image.thumbnail(max_size, Image.ANTIALIAS)
    image.save(thumbnail_file, frame_ext)

    w, h = image.size
    return thumbnail_file.getvalue(), w, h 
Example #6
Source File: data_generator.py    From Multimodal-Emotion-Recognition with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_samples(subject_id):
    arousal_label_path = root_dir / 'Ratings_affective_behaviour_CCC_centred/arousal/{}.csv'.format(subject_id)
    valence_label_path = root_dir / 'Ratings_affective_behaviour_CCC_centred/valence/{}.csv'.format(subject_id)

    clip = VideoFileClip(str(root_dir / "Video_recordings_MP4/{}.mp4".format(subject_id)))

    subsampled_audio = clip.audio.set_fps(16000)

    audio_frames = []
    for i in range(1, 7501):
        time = 0.04 * i

        audio = np.array(list(subsampled_audio.subclip(time - 0.04, time).iter_frames()))
        audio = audio.mean(1)[:640]

        audio_frames.append(audio.astype(np.float32))

    arousal = np.loadtxt(str(arousal_label_path), delimiter=',')[:, 1][1:]
    valence = np.loadtxt(str(valence_label_path), delimiter=',')[:, 1][1:]

    return audio_frames, np.dstack([arousal, valence])[0].astype(np.float32) 
Example #7
Source File: img_utils.py    From tools_python with Apache License 2.0 6 votes vote down vote up
def one_pic_to_video(image_path, output_video_path, fps, time):
    """
    一张图片合成视频
    one_pic_to_video('./../source/1.jpeg', './../source/output.mp4', 25, 10)
    :param path: 图片文件路径
    :param output_video_path:合成视频的路径
    :param fps:帧率
    :param time:时长
    :return:
    """

    image_clip = ImageClip(image_path)
    img_width, img_height = image_clip.w, image_clip.h

    # 总共的帧数
    frame_num = (int)(fps * time)

    img_size = (int(img_width), int(img_height))

    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')

    video = cv2.VideoWriter(output_video_path, fourcc, fps, img_size)

    for index in range(frame_num):
        frame = cv2.imread(image_path)
        # 直接缩放到指定大小
        frame_suitable = cv2.resize(frame, (img_size[0], img_size[1]), interpolation=cv2.INTER_CUBIC)

        # 把图片写进视频
        # 重复写入多少次
        video.write(frame_suitable)

    # 释放资源
    video.release()

    return VideoFileClip(output_video_path) 
Example #8
Source File: videogrep.py    From videogrep with MIT License 6 votes vote down vote up
def create_supercut(composition, outputfile, padding):
    """Concatenate video clips together and output finished video file to the
    output directory.
    """
    print("[+] Creating clips.")
    demo_supercut(composition, padding)

    # add padding when necessary
    for (clip, nextclip) in zip(composition, composition[1:]):
        if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
            nextclip['start'] += padding

    # put all clips together:
    all_filenames = set([c['file'] for c in composition])
    videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
    cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]

    print("[+] Concatenating clips.")
    final_clip = concatenate(cut_clips)

    print("[+] Writing ouput file.")
    final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac') 
Example #9
Source File: movies_utils.py    From mmvt with GNU General Public License v3.0 5 votes vote down vote up
def create_animated_gif(movie_fol, movie_name, out_movie_name, fps=None):
    from moviepy import editor
    video = editor.VideoFileClip(op.join(movie_fol, movie_name))
    video.write_gif(op.join(movie_fol, out_movie_name), fps=fps)#, program='ImageMagick', opt='OptimizeTransparency') 
Example #10
Source File: video.py    From Stone-Soup with MIT License 5 votes vote down vote up
def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        end_time_sec = self.end_time.total_seconds() if self.end_time is not None else None
        self.clip = mpy.VideoFileClip(str(self.path)) \
            .subclip(self.start_time.total_seconds(), end_time_sec) 
Example #11
Source File: movies_utils.py    From mmvt with GNU General Public License v3.0 5 votes vote down vote up
def crop_movie(fol, movie_name, out_movie_name, crop_ys=(), crop_xs=(), **kwargs):
    # crop_ys = (60, 1170)
    from moviepy import editor
    video = editor.VideoFileClip(op.join(fol, movie_name))
    if len(crop_xs) > 0:
        crop_video = video.crop(x1=crop_xs[0], x2=crop_xs[1])
    if len(crop_ys) > 0:
        crop_video = video.crop(y1=crop_ys[0], y2=crop_ys[1])
    crop_video.write_videofile(op.join(fol, out_movie_name)) 
Example #12
Source File: movies_utils.py    From mmvt with GNU General Public License v3.0 5 votes vote down vote up
def cut_movie(movie_fol, movie_name, out_movie_name, subclips_times):
    from moviepy import editor
    # subclips_times [(3, 4), (6, 17), (38, 42)]
    video = editor.VideoFileClip(op.join(movie_fol, movie_name))
    subclips = []
    for from_t, to_t in subclips_times:
        clip = video.subclip(from_t, to_t)
        subclips.append(clip)
    final_clip = editor.concatenate_videoclips(subclips)
    final_clip.write_videofile(op.join(movie_fol, out_movie_name)) 
Example #13
Source File: gen_video.py    From Advanced_Lane_Lines with MIT License 5 votes vote down vote up
def get_image(video, dst, frame_list):
	'''
	get image from the video
	frame_list = [1, 3, 5, 7, 9]
	'''
	clip = VideoFileClip(video)

	for t in frame_list:
		imgpath = os.path.join(dst, '{}.jpg'.format(t))
		clip.save_frame(imgpath, t) 
Example #14
Source File: ssd_objection_recog.py    From deep_learning with MIT License 5 votes vote down vote up
def process_video (input_path, output_path):
    clip = VideoFileClip (input_path)
    result = clip.fl_image(process_image)
    %time result.write_videofile(output_path, audio=False) 
Example #15
Source File: crabrave.py    From Trusty-cogs with MIT License 5 votes vote down vote up
def make_crab(self, t: str, u_id: int) -> bool:
        """Non blocking crab rave video generation from DankMemer bot

        https://github.com/DankMemer/meme-server/blob/master/endpoints/crab.py
        """
        fp = str(cog_data_path(self) / f"Verdana.ttf")
        clip = VideoFileClip(str(cog_data_path(self)) + "/template.mp4")
        # clip.volume(0.5)
        text = TextClip(t[0], fontsize=48, color="white", font=fp)
        text2 = (
            TextClip("____________________", fontsize=48, color="white", font=fp)
            .set_position(("center", 210))
            .set_duration(15.4)
        )
        text = text.set_position(("center", 200)).set_duration(15.4)
        text3 = (
            TextClip(t[1], fontsize=48, color="white", font=fp)
            .set_position(("center", 270))
            .set_duration(15.4)
        )

        video = CompositeVideoClip(
            [clip, text.crossfadein(1), text2.crossfadein(1), text3.crossfadein(1)]
        ).set_duration(15.4)
        video = video.volumex(0.1)
        video.write_videofile(
            str(cog_data_path(self)) + f"/{u_id}crabrave.mp4",
            threads=1,
            preset="superfast",
            verbose=False,
            logger=None,
            temp_audiofile=str(cog_data_path(self) / f"{u_id}crabraveaudio.mp3")
            # ffmpeg_params=["-filter:a", "volume=0.5"]
        )
        clip.close()
        video.close()
        return True 
Example #16
Source File: utils.py    From magic-the-gifening with MIT License 5 votes vote down vote up
def create_mtg_gif(name, id, border):
    if border == 'm':   # Modern (post-8th Ed)
        card_upper_corner = (19, 38)
        gif_width = 202 - card_upper_corner[0]
        gif_height = 172 - card_upper_corner[1]
    elif border == 'c':   # Current (post-Magic 2015)
        card_upper_corner = (17, 34)
        gif_width = 204 - card_upper_corner[0]
        gif_height = 173 - card_upper_corner[1]
    else:   # Old (pre-8th Ed)
        card_upper_corner = (25, 30)
        gif_width = 196 - card_upper_corner[0]
        gif_height = 168 - card_upper_corner[1]

    mtg_card = Image.open(BytesIO(requests.get(get_mtg_image(id)).content))
    mtg_card = ImageClip(np.asarray(mtg_card)).resize((222, 310))

    get_giphy_gif(name)
    giphy_gif = (VideoFileClip('giphy_gif.mp4',
                               target_resolution=(gif_height, gif_width))
                 .set_pos(card_upper_corner)

                 )

    if giphy_gif.duration < 2:
        giphy_gif = giphy_gif.fx(loop, n=1+int(2 // giphy_gif.duration))

    mtg_gif = CompositeVideoClip([mtg_card, giphy_gif])
    mtg_gif = mtg_gif.set_start(0).set_duration(giphy_gif.duration)
    # mtg_gif.write_gif("mtg_gif.gif")
    mtg_gif.write_videofile("mtg_gif.mp4", codec='libx264',
                            bitrate=str(np.power(10, 7)), verbose=False,
                            progress_bar=False,
                            audio=False, ffmpeg_params=['-pix_fmt', 'yuv420p']) 
Example #17
Source File: run.py    From starthinker with Apache License 2.0 5 votes vote down vote up
def edit_video(video):
  clips = [mp.VideoFileClip(video['file_or_url'])]

  for effect in video['effects']:
    clips.extend(get_effects(clips[0], effect))
 
  video = mp.CompositeVideoClip(clips)

  return video 
Example #18
Source File: youtube.py    From smd with MIT License 5 votes vote down vote up
def convertVideoToMusic(self, uri):
        #logging
        #logging.info(f"Start converting")

        try:
            fullpath = os.getcwd() + f'/cache/{uri}/'
            if not os.path.exists(fullpath):
                os.makedirs(fullpath)
        except:
            #logging
            logging.error(f"Youtube:os.makedirs(fullpath)")

        print(uri)
        clip = mp.VideoFileClip(f'cache/{uri}/{uri}.mp4').subclip()
        clip.audio.write_audiofile(f'cache/{uri}/{uri}.mp3', bitrate='3000k', progress_bar=True)

        #logging.info(f"Converting successful")

        try:

            pass

        except Exception as e:
            logging.error(f"Youtube.convertVideoToMusic")
            return -1

        finally:
            return 0 
Example #19
Source File: tensorboardmonitor.py    From midlevel-reps with MIT License 5 votes vote down vote up
def _close_video_recorder(self):
        self.video_recorder.close()
        if self.video_recorder.functional:
            path, metadata_path = self.video_recorder.path, self.video_recorder.metadata_path
            self.videos.append((path, metadata_path))

            # Not guaranteed to be working
            clip = VideoFileClip(path)
            clip_frames = np.array([f for f in clip.iter_frames(dtype=np.uint8)])
            clip_frames = np.rollaxis(clip_frames, -1, 1)
            clip_frames = clip_frames[:, ::-1, ...]
#             assert clip_frames.shape == 2, "shape is {}".format(clip_frames.shape)
            self.writer.add_video(self.visdom_env, clip_frames[np.newaxis, ...], global_step=self.episode_id, fps=clip.fps) 
Example #20
Source File: InstagramAPI.py    From Osintgram with GNU General Public License v3.0 5 votes vote down vote up
def configureVideo(self, upload_id, video, thumbnail, caption=''):
        clip = VideoFileClip(video)
        self.uploadPhoto(photo=thumbnail, caption=caption, upload_id=upload_id)
        data = json.dumps({
            'upload_id': upload_id,
            'source_type': 3,
            'poster_frame_index': 0,
            'length': 0.00,
            'audio_muted': False,
            'filter_type': 0,
            'video_result': 'deprecated',
            'clips': {
                'length': clip.duration,
                'source_type': '3',
                'camera_position': 'back',
            },
            'extra': {
                'source_width': clip.size[0],
                'source_height': clip.size[1],
            },
            'device': self.DEVICE_SETTINTS,
            '_csrftoken': self.token,
            '_uuid': self.uuid,
            '_uid': self.username_id,
            'caption': caption,
        })
        return self.SendRequest('media/configure/?video=1', self.generateSignature(data)) 
Example #21
Source File: utils.py    From thingscoop with MIT License 5 votes vote down vote up
def create_compilation(filename, index):
    dims = get_video_dimensions(filename)
    subclips = []
    video_file = VideoFileClip(filename)
    for label in sorted(index.keys()):
        label_img_filename = create_title_frame(label_as_title(label), dims)
        label_clip = ImageClip(label_img_filename, duration=2)
        os.remove(label_img_filename)
        subclips.append(label_clip)
        for region in index[label]:
            subclip = video_file.subclip(*region)
            subclips.append(subclip)
    if not subclips: return None
    return concatenate_videoclips(subclips) 
Example #22
Source File: video_utils.py    From videograph with GNU General Public License v3.0 5 votes vote down vote up
def video_uniform_sample_n_frames_and_save(n_samples, video_path, frames_path, image_name_format, resize_type, verbose=False):
    if resize_type is not None:
        assert resize_type in ['resize', 'resize_crop', 'resize_crop_scaled']

    resize_function = None
    if resize_type == 'resize':
        resize_function = image_utils.resize_frame
    elif resize_type == 'resize_crop':
        resize_function = image_utils.resize_crop
    elif resize_type == 'resize_crop_scaled':
        resize_function = image_utils.resize_crop_scaled

    cap = moviepyeditor.VideoFileClip(video_path)
    fps = cap.fps
    duration = cap.duration
    step = duration / (n_samples)

    for i in range(n_samples):
        num = i + 1
        if verbose:
            print(' ... reading frame %d/%d' % (num, n_samples))

        time_sec = i * step
        frame = cap.get_frame(time_sec)

        if resize_type is not None:
            # resize frame to fit in the array, it's going to be used by caffe anyway
            frame = resize_function(frame)

        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        image_name = image_name_format % (num,)
        frame_path = os.path.join(frames_path, image_name)
        cv2.imwrite(frame_path, frame)

    # very important, or we'd have memory leakage
    cap.reader.close()
    cap.close()
    del cap.reader
    del cap

    return fps, n_samples, duration 
Example #23
Source File: utils.py    From thingscoop with MIT License 5 votes vote down vote up
def create_supercut(regions):
    subclips = []
    filenames = set(map(lambda (filename, _): filename, regions))
    video_files = {filename: VideoFileClip(filename) for filename in filenames}
    for filename, region in regions:
        subclip = video_files[filename].subclip(*region)
        subclips.append(subclip)
    if not subclips: return None
    return concatenate_videoclips(subclips) 
Example #24
Source File: remove_dup_file_type.py    From mmvt with GNU General Public License v3.0 5 votes vote down vote up
def combine_movies(fol, final_movie_name, fps=60, movie_type='mp4'):
    from moviepy.editor import VideoFileClip, concatenate_videoclips

    parts = [VideoFileClip(op.join(fol, '{}_{}.{}'.format(movie_name, fps, movie_type))) for movie_name in [
        'meg_helmet_with_brain', 'eeg_with_brain', 'meg', 'connections', 'electrodes']]
    final_movie = concatenate_videoclips(parts, method='chain')
    final_movie.write_videofile(op.join(fol, '{}.{}'.format(final_movie_name, movie_type)), fps=fps, threads=1) 
Example #25
Source File: video_frame_remover.py    From video_frame_remover with MIT License 5 votes vote down vote up
def remove_frame(in_fname, out_fname, n_sample_frames=100):
    sample_frames = get_frames(in_fname, n_sample_frames)
    input_frame = get_median_frame(sample_frames)
    res = get_frame_box_coords(input_frame)
    if res is None:
        print("No border was detected in {}".format(in_fname))
        return None
    else:
        x, y, w, h = res
    clip = VideoFileClip(in_fname)
    crop_clip = crop(clip, x1=x, y1=y, x2=x + w, y2=y + h)
    crop_clip.write_videofile(out_fname) 
Example #26
Source File: test.py    From seg-mentor with MIT License 5 votes vote down vote up
def segment_movie(fcnfunc, checkpoint, video_file_in, pixels=None):
    from PIL import Image
    from moviepy.editor import VideoFileClip

    image_ph = tf.placeholder(tf.int32)  # , shape = smth w. pixels (doesn't work..:)
    image_t3d, predictions = prepare_graph(fcnfunc, image_ph, pixels)

    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        tf.train.Saver().restore(sess, checkpoint)
        #ext = '.mp4'
        ext = video_file_in[video_file_in.find('.') :]
        video_file_out = video_file_in.replace(ext, '_segmented'+ext)
        video_file_out = video_file_out.replace('.avi', '.mp4')
        #print video_file_out

        input_clip = VideoFileClip(video_file_in)

        mask_alpha = round(0.3*255)
        colors = np.random.random([21, 3])
        colors -= np.min(colors, axis=1)[:, np.newaxis]
        colors /= np.max(colors, axis=1)[:, np.newaxis]
        colors *= 255
        colors = np.concatenate(( np.round(colors), mask_alpha*np.ones((21,1))), axis=1)
        background_class = 0 # TODO what about other cases?
        colors[background_class][3] = 0

        def process_frame(image_in):
            scaled_image, inferred_pixel_labels = sess.run([image_t3d, predictions], {image_ph: image_in})
            seg_mask = np.take(colors, inferred_pixel_labels, axis=0)
            # print seg_mask.shape, type(seg_mask)
            image_in_walpha = np.concatenate((scaled_image, (255-mask_alpha)*np.ones(scaled_image.shape[:2]+(1,))), axis=2)
            # print inferred_pixel_labels.shape, seg_mask.shape, image_in_walpha.shape
            # print np.min(rescaled_image), np.max(rescaled_image)
            composite = Image.alpha_composite(Image.fromarray(np.uint8(image_in_walpha)),
                                              Image.fromarray(np.uint8(seg_mask)))
            composite_backscaled = composite.resize(image_in.shape[1::-1], Image.LANCZOS)
            return np.array(composite_backscaled)[:,:,:3]

        annotated_clip = input_clip.fl_image(process_frame)
        annotated_clip.write_videofile(video_file_out, audio=False) 
Example #27
Source File: inference.py    From celeb-detection-oss with Mozilla Public License 2.0 5 votes vote down vote up
def process_gif(path):
    gif = mov_editor.VideoFileClip(path)
    selected_frames = evenly_spaced_sampling(list(gif.iter_frames()), gif_frames)
    face_images_by_frames = face_detector.perform_bulk(selected_frames, range(len(selected_frames)))
    face_images = []
    for frame_faces in face_images_by_frames.values():
        face_images.extend([preprocess_image(image, image_size) for image, _ in frame_faces])
    return face_recognizer.perform(face_images) 
Example #28
Source File: app.py    From celeb-detection-oss with Mozilla Public License 2.0 5 votes vote down vote up
def load_gif(path, url):
    try:
        gif = mov_editor.VideoFileClip(url)
        gif_hash = hashlib.md5(gif.filename.encode('utf-8')).hexdigest()
        base_name = f'{gif_hash}.gif'
        filename = os.path.join(path, base_name)
        gif.write_gif(filename)
        app.logger.info(f'Saved {url} to {filename}')
        return base_name, gif
    except ACCEPTABLE_ERRORS as ex:
        app.logger.warn(f'Cannot download {url} error: {ex}') 
Example #29
Source File: test.py    From Object_Detection_Tracking with Apache License 2.0 5 votes vote down vote up
def speed_test_moviepy(video_dir, video_list=VIDEO_LIST):
    from moviepy.editor import VideoFileClip
    for video_name in video_list:
        print('\t', video_name, flush=True)
        bar = ProgressBar().start()
        clip = VideoFileClip(osp.join(video_dir, video_name))
        for i in bar(range(int(clip.duration * clip.fps))):
            clip.get_frame(i / clip.fps) 
Example #30
Source File: videogrep.py    From videogrep with MIT License 5 votes vote down vote up
def split_clips(composition, outputfile):
    all_filenames = set([c['file'] for c in composition])
    videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
    cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]

    basename, ext = os.path.splitext(outputfile)
    print("[+] Writing ouput files.")
    for i, clip in enumerate(cut_clips):
        clipfilename = basename + '_' + str(i).zfill(5) + ext
        clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')