Python librosa.frames_to_time() Examples

The following are 10 code examples for showing how to use librosa.frames_to_time(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module librosa , or try the search function .

Example 1
Project: amen   Author: algorithmic-music-exploration   File: audio.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _get_beats(self):
        """
        Gets beats using librosa's beat tracker.
        """
        _, beat_frames = librosa.beat.beat_track(
            y=self.analysis_samples, sr=self.analysis_sample_rate, trim=False
        )

        # pad beat times to full duration
        f_max = librosa.time_to_frames(self.duration, sr=self.analysis_sample_rate)
        beat_frames = librosa.util.fix_frames(beat_frames, x_min=0, x_max=f_max)

        # convert frames to times
        beat_times = librosa.frames_to_time(beat_frames, sr=self.analysis_sample_rate)

        # make the list of (start, duration) tuples that TimingList expects
        starts_durs = [(s, t - s) for (s, t) in zip(beat_times, beat_times[1:])]

        return starts_durs 
Example 2
Project: amen   Author: algorithmic-music-exploration   File: audio.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _get_segments(self):
        """
        Gets Echo Nest style segments using librosa's onset detection and backtracking.
        """

        onset_frames = librosa.onset.onset_detect(
            y=self.analysis_samples, sr=self.analysis_sample_rate, backtrack=True
        )
        segment_times = librosa.frames_to_time(
            onset_frames, sr=self.analysis_sample_rate
        )

        # make the list of (start, duration) tuples that TimingList expects
        starts_durs = [(s, t - s) for (s, t) in zip(segment_times, segment_times[1:])]

        return starts_durs 
Example 3
Project: amen   Author: algorithmic-music-exploration   File: audio.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _convert_to_dataframe(cls, feature_data, columns):
        """
        Take raw librosa feature data, convert to a pandas dataframe.

        Parameters
        ---------
        feature_data: numpy array
            a N by T array, where N is the number of features, and T is the number of time dimensions

        columns: list [strings]
            a list of column names of length N, the same as the N dimension of feature_data

        Returns
        -----
        pandas.DataFrame
        """
        feature_data = feature_data.transpose()
        frame_numbers = np.arange(len(feature_data))
        indexes = librosa.frames_to_time(frame_numbers)
        indexes = pd.to_timedelta(indexes, unit='s')
        data = pd.DataFrame(data=feature_data, index=indexes, columns=columns)
        return data 
Example 4
Project: rcaudio   Author: mhy12345   File: beat_analyzer.py    License: MIT License 6 votes vote down vote up
def run(self):
        while self.recorder.start_time is None:
            time.sleep(1)
        self.current_b = time.time()
        self.start_time = self.recorder.start_time
        while self.running.isSet():
            if len(self.audio_data) < 4 * self.sr:
                time.sleep(.5)
                self.logger.debug("The data is not enough...")
                continue
            start_samples = len(self.audio_data) - self.rec_size if len(self.audio_data) > self.rec_size else 0
            data = np.array(self.audio_data[start_samples:]).astype(np.float32)
            start_time = start_samples / self.sr
            tmpo, _beat_frames = librosa.beat.beat_track(y=data,sr = self.sr)
            beat_times = librosa.frames_to_time(_beat_frames) + start_time + self.start_time

            if len(beat_times) < 5:
                self.logger.debug("The beats count <%d> is not enough..."%len(beat_times))
                continue
            
            self.expected_k,self.expected_b = np.polyfit(range(len(beat_times)),beat_times,1) 
Example 5
Project: rcaudio   Author: mhy12345   File: beat_analyzer.py    License: MIT License 6 votes vote down vote up
def run(self):
        while self.recorder.start_time is None:
            time.sleep(1)
        self.current_b = time.time()
        self.start_time = self.recorder.start_time
        while self.running.isSet():
            if len(self.audio_data) < 4 * self.sr:
                time.sleep(.5)
                self.logger.debug("The data is not enough...")
                continue
            start_samples = len(self.audio_data) - self.rec_size if len(self.audio_data) > self.rec_size else 0
            data = np.array(self.audio_data[start_samples:]).astype(np.float32)
            start_time = start_samples / self.sr
            tmpo, _beat_frames = librosa.beat.beat_track(y=data,sr = self.sr)
            beat_times = librosa.frames_to_time(_beat_frames) + start_time + self.start_time

            if len(beat_times) < 5:
                self.logger.debug("The beats count <%d> is not enough..."%len(beat_times))
                continue
            
            self.expected_k,self.expected_b = np.polyfit(range(len(beat_times)),beat_times,1) 
Example 6
def __init__(self, loadedAudio):
        self.wav = loadedAudio[0]
        self.samplefreq = loadedAudio[1]
        #If imported as 16-bit, convert to floating 32-bit ranging from -1 to 1
        if (self.wav.dtype == 'int16'):
            self.wav = self.wav/(2.0**15)
        self.channels = 1  #Assumes mono, if stereo then 2 (found by self.wav.shape[1])
        self.sample_points = self.wav.shape[0]
        self.audio_length_seconds = self.sample_points/self.samplefreq
        self.time_array_seconds = np.arange(0, self.sample_points, 1)/self.samplefreq
        self.tempo_bpm = librosa.beat.beat_track(y=self.wav, sr=self.samplefreq)[0]
        self.beat_frames = librosa.beat.beat_track(y=self.wav, sr=self.samplefreq)[1]
        #Transform beat array into seconds (these are the times when the beat hits)
        self.beat_times = librosa.frames_to_time(self.beat_frames, sr=self.samplefreq)
        #Get the rolloff frequency - the frequency at which the loudness drops off by 90%, like a low pass filter
        self.rolloff_freq = np.mean(librosa.feature.spectral_rolloff(y=self.wav, sr=self.samplefreq, hop_length=512, roll_percent=0.9)) 
Example 7
Project: msaf   Author: urinieto   File: base.py    License: MIT License 5 votes vote down vote up
def estimate_beats(self):
        """Estimates the beats using librosa.

        Returns
        -------
        times: np.array
            Times of estimated beats in seconds.
        frames: np.array
            Frame indeces of estimated beats.
        """
        # Compute harmonic-percussive source separation if needed
        if self._audio_percussive is None:
            self._audio_harmonic, self._audio_percussive = self.compute_HPSS()

        # Compute beats
        tempo, frames = librosa.beat.beat_track(
            y=self._audio_percussive, sr=self.sr,
            hop_length=self.hop_length)

        # To times
        times = librosa.frames_to_time(frames, sr=self.sr,
                                       hop_length=self.hop_length)

        # TODO: Is this really necessary?
        if len(times) > 0 and times[0] == 0:
            times = times[1:]
            frames = frames[1:]

        return times, frames 
Example 8
Project: msaf   Author: urinieto   File: base.py    License: MIT License 5 votes vote down vote up
def _compute_framesync_times(self):
        """Computes the framesync times based on the framesync features."""
        self._framesync_times = librosa.core.frames_to_time(
            np.arange(self._framesync_features.shape[0]), self.sr,
            self.hop_length) 
Example 9
Project: delta   Author: didi   File: speech_cls_task.py    License: Apache License 2.0 5 votes vote down vote up
def get_duration(self, filename, sr):  #pylint: disable=invalid-name
    ''' time in second '''
    if filename.endswith('.npy'):
      nframe = np.load(filename).shape[0]
      return librosa.frames_to_time(
          nframe, hop_length=self._winstep * sr, sr=sr)

    if filename.endswith('.wav'):
      return librosa.get_duration(filename=filename)

    raise ValueError("filename suffix not .npy or .wav: {}".format(
        os.path.splitext(filename)[-1])) 
Example 10
Project: jams   Author: marl   File: example_beat.py    License: ISC License 4 votes vote down vote up
def beat_track(infile, outfile):

    # Load the audio file
    y, sr = librosa.load(infile)

    # Compute the track duration
    track_duration = librosa.get_duration(y=y, sr=sr)

    # Extract tempo and beat estimates
    tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)

    # Convert beat frames to time
    beat_times = librosa.frames_to_time(beat_frames, sr=sr)

    # Construct a new JAMS object and annotation records
    jam = jams.JAMS()

    # Store the track duration
    jam.file_metadata.duration = track_duration

    beat_a = jams.Annotation(namespace='beat')
    beat_a.annotation_metadata = jams.AnnotationMetadata(data_source='librosa beat tracker')

    # Add beat timings to the annotation record.
    # The beat namespace does not require value or confidence fields,
    # so we can leave those blank.
    for t in beat_times:
        beat_a.append(time=t, duration=0.0)

    # Store the new annotation in the jam
    jam.annotations.append(beat_a)

    # Add tempo estimation to the annotation.
    tempo_a = jams.Annotation(namespace='tempo', time=0, duration=track_duration)
    tempo_a.annotation_metadata = jams.AnnotationMetadata(data_source='librosa tempo estimator')

    # The tempo estimate is global, so it should start at time=0 and cover the full
    # track duration.
    # If we had a likelihood score on the estimation, it could be stored in
    # `confidence`.  Since we have no competing estimates, we'll set it to 1.0.
    tempo_a.append(time=0.0,
                   duration=track_duration,
                   value=tempo,
                   confidence=1.0)

    # Store the new annotation in the jam
    jam.annotations.append(tempo_a)

    # Save to disk
    jam.save(outfile)