Python numpy.Array() Examples

The following are 30 code examples for showing how to use numpy.Array(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: AiGEM_TeamHeidelberg2017   Author: igemsoftware2017   File: helpers.py    License: MIT License 6 votes vote down vote up
def _variable_on_cpu(name, shape, initializer, trainable):
    """Helper function to get a variable stored on cpu.

    Args:
      name: A `str` holding the name of the variable.
      shape: An `Array` defining the shape of the Variable. For example: [2,1,3].
      initializer: The `tf.Initializer` to use to initialize the variable.
      trainable: A `bool` stating wheter the variable is trainable or not.

    Returns:
      A `tf.Variable` on CPU.
    """
    with tf.device('/cpu:0'): #TODO will this work?
        dtype = tf.float32
        var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype, trainable=trainable)
    #dtf.add_to_collection('CPU', var)
    return var 
Example 2
Project: exbert   Author: bhoov   File: token_processing.py    License: Apache License 2.0 6 votes vote down vote up
def process_hidden_tensors(t):
    """Embeddings are returned from the BERT model in a non-ideal embedding shape:
        - unnecessary batch dimension
        - Undesired second sentence "[SEP]".
    
    Drop the unnecessary information and just return what we need for the first sentence
    """
    # Drop unnecessary batch dim and second sent
    t = t.squeeze(0)[:-1]

    # Drop second sentence sep ??
    t = t[1:-1]

    # Convert to numpy
    return t.data.numpy()


# np.Array -> np.Array 
Example 3
Project: vasppy   Author: bjmorgan   File: kpoints.py    License: MIT License 6 votes vote down vote up
def __init__( self, title, subdivisions, grid_centering='G', shift=np.array( [ 0., 0., 0. ] ) ):
        """
        Initialise an AutoKPoints object

        Args:
            title (Str): The first line of the file, treated as a comment by VASP.
            grid_centering (Str, optional): Specify gamma-centered (G) or the original Monkhorst-Pack scheme (MP). Default is 'G'.
            subdivisions: (np.Array( Int, Int, Int )): Numbers of subdivisions along each reciprocal lattice vector.
            shift: (np.Array( Float, Float, Float ), optional): Optional shift of the mesh (s_1, s_2, s_3). Default is ( [ 0., 0., 0. ] ).

        Returns:
            None
        """
        accepted_grid_centerings = [ 'G', 'MP' ]
        if grid_centering not in accepted_grid_centerings:
            raise ValueError
        self.title = title
        self.grid_centering = grid_centering
        self.subdivisions = subdivisions
        self.shift = shift 
Example 4
Project: fritz-models   Author: fritzlabs   File: tfrecord_helpers.py    License: MIT License 6 votes vote down vote up
def update_mask(record, mask_array):
    """Update mask in tensorflow example.

    Args:
        record (tf.train.Example): Record to update
        mask_array (numpy.Array): HxW array of class values.

    Returns: Updated tf.train.Example.
    """
    def norm2bytes(value):
        return value.encode() if isinstance(value, str) and six.PY3 else value

    mask_data = get_png_string(mask_array)
    feature = record.features.feature['image/segmentation/class/encoded']
    feature.bytes_list.value.pop()
    feature.bytes_list.value.append(norm2bytes(mask_data))
    return record 
Example 5
Project: tenpy   Author: tenpy   File: a_mps.py    License: GNU General Public License v3.0 5 votes vote down vote up
def split_truncate_theta(theta, chi_max, eps):
    """Split and truncate a two-site wave function in mixed canonical form.

    Split a two-site wave function as follows::
          vL --(theta)-- vR     =>    vL --(A)--diag(S)--(B)-- vR
                |   |                       |             |
                i   j                       i             j

    Afterwards, truncate in the new leg (labeled ``vC``).

    Parameters
    ----------
    theta : np.Array[ndim=4]
        Two-site wave function in mixed canonical form, with legs ``vL, i, j, vR``.
    chi_max : int
        Maximum number of singular values to keep
    eps : float
        Discard any singular values smaller than that.

    Returns
    -------
    A : np.Array[ndim=3]
        Left-canonical matrix on site i, with legs ``vL, i, vC``
    S : np.Array[ndim=1]
        Singular/Schmidt values.
    B : np.Array[ndim=3]
        Right-canonical matrix on site j, with legs ``vC, j, vR``
    """
    chivL, dL, dR, chivR = theta.shape
    theta = np.reshape(theta, [chivL * dL, dR * chivR])
    X, Y, Z = svd(theta, full_matrices=False)
    # truncate
    chivC = min(chi_max, np.sum(Y > eps))
    piv = np.argsort(Y)[::-1][:chivC]  # keep the largest `chivC` singular values
    X, Y, Z = X[:, piv], Y[piv], Z[piv, :]
    # renormalize
    S = Y / np.linalg.norm(Y)  # == Y/sqrt(sum(Y**2))
    # split legs of X and Z
    A = np.reshape(X, [chivL, dL, chivC])
    B = np.reshape(Z, [chivC, dR, chivR])
    return A, S, B 
Example 6
Project: How_to_generate_music_in_tensorflow_LIVE   Author: llSourcell   File: batchbuilder.py    License: Apache License 2.0 5 votes vote down vote up
def reconstruct_batch(self, output, batch_id, chosen_labels=None):
        """ Create the song associated with the network output
        Args:
            output (list[np.Array]): The ouput of the network (size batch_size*output_dim)
            batch_id (int): The batch that we must reconstruct
            chosen_labels (list[np.Array[batch_size, int]]): the sampled class at each timestep (useful to reconstruct the generated song)
        Return:
            Song: The reconstructed song
        """
        raise NotImplementedError('Abstract class') 
Example 7
Project: How_to_generate_music_in_tensorflow_LIVE   Author: llSourcell   File: batchbuilder.py    License: Apache License 2.0 5 votes vote down vote up
def reconstruct_batch(self, output, batch_id, chosen_labels=None):
        """ Create the song associated with the network output
        Args:
            output (list[np.Array]): The ouput of the network (size batch_size*output_dim)
            batch_id (int): The batch id
            chosen_labels (list[np.Array[batch_size, int]]): the sampled class at each timestep (useful to reconstruct the generated song)
        Return:
            Song: The reconstructed song
        """
        assert Relative.HAS_EMPTY == True

        processed_song = Relative.RelativeSong()
        processed_song.first_note = music.Note()
        processed_song.first_note.note = 56  # TODO: Define what should be the first note
        print('Reconstruct')
        for i, note in enumerate(output):
            relative = Relative.RelativeNote()
            # Here if we did sample the output, we should get which has heen the selected output
            if not chosen_labels or i == len(chosen_labels):  # If chosen_labels, the last generated note has not been sampled
                chosen_label = int(np.argmax(note[batch_id,:]))  # Cast np.int64 to int to avoid compatibility with mido
            else:
                chosen_label = int(chosen_labels[i][batch_id])
            print(chosen_label, end=' ')  # TODO: Add a text output connector
            if chosen_label == 0:  # <next> token
                relative.pitch_class = None
                #relative.scale = # Note used
                #relative.prev_tick =
            else:
                relative.pitch_class = chosen_label-1
                #relative.scale =
                #relative.prev_tick =
            processed_song.notes.append(relative)
        print()
        return self.reconstruct_song(processed_song) 
Example 8
Project: AiGEM_TeamHeidelberg2017   Author: igemsoftware2017   File: helpers.py    License: MIT License 5 votes vote down vote up
def update(self, sigmoid_logits, true_labels):
        """Update the ROC tracker, with the predictions on one batch made during validation.

        Args:
          sigmoid_logits: `np.Array` and 2D arrray holding the sigmoid logits for the validation batch.
          true_labels: `np.Array` and 2D arrray holding the true labels for the validation batch.
        """
        # threshold this thing
        # we consider a class "predicted" if it's sigmoid activation is higher than 0.5 (predicted labels)
        batch_predicted_labels = np.greater(sigmoid_logits, 0.5)
        batch_predicted_labels = batch_predicted_labels.astype(float)


        batch_pred_pos = np.sum(batch_predicted_labels, axis=0) #sum up along the batch dim, keep the channels
        batch_actual_pos = np.sum(true_labels, axis=0) #sum up along the batch dim, keep the channels
        # calculate the true positives:
        batch_true_pos = np.sum(np.multiply(batch_predicted_labels, true_labels), axis=0)

        # and update the counts
        self.pred_positives_sum += batch_pred_pos #what the model said
        self.actual_positives_sum += batch_actual_pos #what the labels say
        self.true_positive_sum += batch_true_pos # where labels and model predictions>0.5 match

        assert len(self.true_positive_sum) == self._opts._nclasses

        # add the predictions to the roc_score tracker
        self.roc_score.append(sigmoid_logits)
        self.roc_labels.append(true_labels) 
Example 9
Project: AiGEM_TeamHeidelberg2017   Author: igemsoftware2017   File: helpers.py    License: MIT License 5 votes vote down vote up
def softmax(X, theta = 1.0, axis = None):
    """Compute the softmax of each element along an axis of X.

    Args:
      X: `ND-Array`, Probably should be floats.
      theta: float parameter, used as a multiplier
        prior to exponentiation. Default = 1.0 (optional).
      axis: axis to compute values along. Default is the
        first non-singleton axis (optional).

    Returns:
    An `Array` of same shape as X. The result will sum to 1 along the specified axis.
    """
    # make X at least 2d
    y = np.atleast_2d(X)
    # find axis
    if axis is None:
        axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
    # multiply y against the theta parameter,
    y = y * float(theta)
    # subtract the max for numerical stability
    y = y - np.expand_dims(np.max(y, axis = axis), axis)
    # exponentiate y
    y = np.exp(y)
    # take the sum along the specified axis
    ax_sum = np.expand_dims(np.sum(y, axis = axis), axis)
    # finally: divide elementwise
    p = y / ax_sum
    # flatten if X was 1D
    if len(X.shape) == 1: p = p.flatten()
    return p 
Example 10
Project: exbert   Author: bhoov   File: token_processing.py    License: Apache License 2.0 5 votes vote down vote up
def normalize(a):
    """Divide each head by its norm"""
    norms = np.linalg.norm(a, axis=-1, keepdims=True)
    return a / norms


# np.Array:<a,b,c,d> -> np.Array<a,b,c*d> 
Example 11
Project: exbert   Author: bhoov   File: index_wrapper.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, folder, pattern=FAISS_LAYER_PATTERN):
        super().__init__(folder, pattern)

        self.head_mask = partial(create_mask, self.head_size, self.nheads)

    # Int -> [Int] -> np.Array -> Int -> (np.Array(),  ) 
Example 12
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 13
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 14
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 15
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 16
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 17
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 18
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 19
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 20
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 21
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 22
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 23
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 24
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 25
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 26
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 27
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 28
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 29
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example 30
Project: TTS   Author: mozilla   File: synthesis.py    License: Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs