Python numpy.Array() Examples

The following are 30 code examples of numpy.Array(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: tfrecord_helpers.py    From fritz-models with MIT License 6 votes vote down vote up
def update_mask(record, mask_array):
    """Update mask in tensorflow example.

    Args:
        record (tf.train.Example): Record to update
        mask_array (numpy.Array): HxW array of class values.

    Returns: Updated tf.train.Example.
    """
    def norm2bytes(value):
        return value.encode() if isinstance(value, str) and six.PY3 else value

    mask_data = get_png_string(mask_array)
    feature = record.features.feature['image/segmentation/class/encoded']
    feature.bytes_list.value.pop()
    feature.bytes_list.value.append(norm2bytes(mask_data))
    return record 
Example #2
Source File: helpers.py    From AiGEM_TeamHeidelberg2017 with MIT License 6 votes vote down vote up
def _variable_on_cpu(name, shape, initializer, trainable):
    """Helper function to get a variable stored on cpu.

    Args:
      name: A `str` holding the name of the variable.
      shape: An `Array` defining the shape of the Variable. For example: [2,1,3].
      initializer: The `tf.Initializer` to use to initialize the variable.
      trainable: A `bool` stating wheter the variable is trainable or not.

    Returns:
      A `tf.Variable` on CPU.
    """
    with tf.device('/cpu:0'): #TODO will this work?
        dtype = tf.float32
        var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype, trainable=trainable)
    #dtf.add_to_collection('CPU', var)
    return var 
Example #3
Source File: token_processing.py    From exbert with Apache License 2.0 6 votes vote down vote up
def process_hidden_tensors(t):
    """Embeddings are returned from the BERT model in a non-ideal embedding shape:
        - unnecessary batch dimension
        - Undesired second sentence "[SEP]".
    
    Drop the unnecessary information and just return what we need for the first sentence
    """
    # Drop unnecessary batch dim and second sent
    t = t.squeeze(0)[:-1]

    # Drop second sentence sep ??
    t = t[1:-1]

    # Convert to numpy
    return t.data.numpy()


# np.Array -> np.Array 
Example #4
Source File: kpoints.py    From vasppy with MIT License 6 votes vote down vote up
def __init__( self, title, subdivisions, grid_centering='G', shift=np.array( [ 0., 0., 0. ] ) ):
        """
        Initialise an AutoKPoints object

        Args:
            title (Str): The first line of the file, treated as a comment by VASP.
            grid_centering (Str, optional): Specify gamma-centered (G) or the original Monkhorst-Pack scheme (MP). Default is 'G'.
            subdivisions: (np.Array( Int, Int, Int )): Numbers of subdivisions along each reciprocal lattice vector.
            shift: (np.Array( Float, Float, Float ), optional): Optional shift of the mesh (s_1, s_2, s_3). Default is ( [ 0., 0., 0. ] ).

        Returns:
            None
        """
        accepted_grid_centerings = [ 'G', 'MP' ]
        if grid_centering not in accepted_grid_centerings:
            raise ValueError
        self.title = title
        self.grid_centering = grid_centering
        self.subdivisions = subdivisions
        self.shift = shift 
Example #5
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #6
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #7
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #8
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #9
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #10
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #11
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #12
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #13
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #14
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #15
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #16
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #17
Source File: batchbuilder.py    From How_to_generate_music_in_tensorflow_LIVE with Apache License 2.0 5 votes vote down vote up
def reconstruct_batch(self, output, batch_id, chosen_labels=None):
        """ Create the song associated with the network output
        Args:
            output (list[np.Array]): The ouput of the network (size batch_size*output_dim)
            batch_id (int): The batch that we must reconstruct
            chosen_labels (list[np.Array[batch_size, int]]): the sampled class at each timestep (useful to reconstruct the generated song)
        Return:
            Song: The reconstructed song
        """
        raise NotImplementedError('Abstract class') 
Example #18
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #19
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #20
Source File: synthesis.py    From TTS with Mozilla Public License 2.0 5 votes vote down vote up
def apply_griffin_lim(inputs, input_lens, CONFIG, ap):
    '''Apply griffin-lim to each sample iterating throught the first dimension.
    Args:
        inputs (Tensor or np.Array): Features to be converted by GL. First dimension is the batch size.
        input_lens (Tensor or np.Array): 1D array of sample lengths.
        CONFIG (Dict): TTS config.
        ap (AudioProcessor): TTS audio processor.
    '''
    wavs = []
    for idx, spec in enumerate(inputs):
        wav_len = (input_lens[idx] * ap.hop_length) - ap.hop_length  # inverse librosa padding
        wav = inv_spectrogram(spec, ap, CONFIG)
        # assert len(wav) == wav_len, f" [!] wav lenght: {len(wav)} vs expected: {wav_len}"
        wavs.append(wav[:wav_len])
    return wavs 
Example #21
Source File: state.py    From NetworkAttackSimulator with MIT License 5 votes vote down vote up
def __init__(self, network_tensor, host_num_map):
        """
        Parameters
        ----------
        state_tensor : np.Array
            the tensor representation of the network state
        host_num_map : dict
            mapping from host address to host number (this is used
            to map host address to host row in the network tensor)
        """
        self.tensor = network_tensor
        self.host_num_map = host_num_map 
Example #22
Source File: utils.py    From xarrayutils with MIT License 5 votes vote down vote up
def lag_and_combine(ds, lags, dim="time"):
    """Creates lagged versions of the input object,
    combined along new `lag` dimension.
    NOTE: Lagging produces missing values at boundary. Use `.fillna(...)`
    to avoid problems with e.g. xr_linregress.

    Parameters
    ----------
    ds : {xr.DataArray, xr.Dataset}
        Input object
    lags : np.Array
        Lags to be computed and combined. Values denote number of timesteps.
        Negative lag indicates a shift backwards (to the left of the axis).
    dim : str
        dimension of `ds` to be lagged

    Returns
    -------
    {xr.DataArray, xr.Dataset}
        Lagged version of `ds` with additional dimension `lag`

    """

    datasets = []
    for ll in lags:
        datasets.append(ds.shift(**{dim: ll}))
    return xr.concat(datasets, dim=concat_dim_da(lags, "lag"))


##################
# Refactored stuff 
Example #23
Source File: py_policy.py    From agents with Apache License 2.0 5 votes vote down vote up
def time_step_spec(self) -> ts.TimeStep:
    """Describes the `TimeStep` np.Arrays expected by `action(time_step)`.

    Returns:
      A `TimeStep` namedtuple with `ArraySpec` objects instead of np.Array,
      which describe the shape, dtype and name of each array expected by
      `action()`.
    """
    return self._time_step_spec 
Example #24
Source File: py_policy.py    From agents with Apache License 2.0 5 votes vote down vote up
def action_spec(self) -> types.NestedArraySpec:
    """Describes the ArraySpecs of the np.Array returned by `action()`.

    `action` can be a single np.Array, or a nested dict, list or tuple of
    np.Array.

    Returns:
      A single BoundedArraySpec, or a nested dict, list or tuple of
      `BoundedArraySpec` objects, which describe the shape and
      dtype of each np.Array returned by `action()`.
    """
    return self._action_spec 
Example #25
Source File: py_policy.py    From agents with Apache License 2.0 5 votes vote down vote up
def policy_state_spec(self) -> types.NestedArraySpec:
    """Describes the arrays expected by functions with `policy_state` as input.

    Returns:
      A single BoundedArraySpec, or a nested dict, list or tuple of
      `BoundedArraySpec` objects, which describe the shape and
      dtype of each np.Array returned by `action()`.
    """
    return self._policy_state_spec 
Example #26
Source File: py_policy.py    From agents with Apache License 2.0 5 votes vote down vote up
def info_spec(self) -> types.NestedArraySpec:
    """Describes the Arrays emitted as info by `action()`.

    Returns:
      A nest of ArraySpec which describe the shape and dtype of each Array
      emitted as `info` by `action()`.
    """
    return self._info_spec 
Example #27
Source File: py_policy.py    From agents with Apache License 2.0 5 votes vote down vote up
def policy_step_spec(self) -> policy_step.PolicyStep:
    """Describes the output of `action()`.

    Returns:
      A nest of ArraySpec which describe the shape and dtype of each Array
      emitted by `action()`.
    """
    return self._policy_step_spec 
Example #28
Source File: batchbuilder.py    From MusicGenerator with Apache License 2.0 5 votes vote down vote up
def reconstruct_batch(self, output, batch_id, chosen_labels=None):
        """ Create the song associated with the network output
        Args:
            output (list[np.Array]): The ouput of the network (size batch_size*output_dim)
            batch_id (int): The batch that we must reconstruct
            chosen_labels (list[np.Array[batch_size, int]]): the sampled class at each timestep (useful to reconstruct the generated song)
        Return:
            Song: The reconstructed song
        """
        raise NotImplementedError('Abstract class') 
Example #29
Source File: batchbuilder.py    From MusicGenerator with Apache License 2.0 5 votes vote down vote up
def reconstruct_batch(self, output, batch_id, chosen_labels=None):
        """ Create the song associated with the network output
        Args:
            output (list[np.Array]): The ouput of the network (size batch_size*output_dim)
            batch_id (int): The batch id
            chosen_labels (list[np.Array[batch_size, int]]): the sampled class at each timestep (useful to reconstruct the generated song)
        Return:
            Song: The reconstructed song
        """
        assert Relative.HAS_EMPTY == True

        processed_song = Relative.RelativeSong()
        processed_song.first_note = music.Note()
        processed_song.first_note.note = 56  # TODO: Define what should be the first note
        print('Reconstruct')
        for i, note in enumerate(output):
            relative = Relative.RelativeNote()
            # Here if we did sample the output, we should get which has heen the selected output
            if not chosen_labels or i == len(chosen_labels):  # If chosen_labels, the last generated note has not been sampled
                chosen_label = int(np.argmax(note[batch_id,:]))  # Cast np.int64 to int to avoid compatibility with mido
            else:
                chosen_label = int(chosen_labels[i][batch_id])
            print(chosen_label, end=' ')  # TODO: Add a text output connector
            if chosen_label == 0:  # <next> token
                relative.pitch_class = None
                #relative.scale = # Note used
                #relative.prev_tick =
            else:
                relative.pitch_class = chosen_label-1
                #relative.scale =
                #relative.prev_tick =
            processed_song.notes.append(relative)
        print()
        return self.reconstruct_song(processed_song) 
Example #30
Source File: abic.py    From geoist with MIT License 5 votes vote down vote up
def forward(self,model_density=None):
        ''' Calculate gravity field from model_density.
        Args:
            model_density (np.Array): densities of each model cell. Reshaped from
            (nz,ny,nx) to (nz*ny*nx)
        '''
        if model_density is None:
            model_density = self._model_density
        else:
            model_density = model_density.ravel()
        self.obs_data = self.kernel_op.gtoep.matvec(model_density)