Python tensorflow.shape() Examples

The following are 30 code examples of tensorflow.shape(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: loss.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fprop(self, x, y, **kwargs):
        if self.attack is not None:
            x = x, self.attack(x)
        else:
            x = x,

        # Catching RuntimeError: Variable -= value not supported by tf.eager.
        try:
            y -= self.smoothing * (y - 1. / tf.cast(y.shape[-1], tf.float32))
        except RuntimeError:
            y.assign_sub(self.smoothing * (y - 1. / tf.cast(y.shape[-1],
                                                            tf.float32)))

        logits = [self.model.get_logits(x, **kwargs) for x in x]
        loss = sum(
            softmax_cross_entropy_with_logits(labels=y,
                                              logits=logit)
            for logit in logits)
        warnings.warn("LossCrossEntropy is deprecated, switch to "
                      "CrossEntropy. LossCrossEntropy may be removed on "
                      "or after 2019-03-06.")
        return loss 
Example #2
Source File: tfutil.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def print_layers(self, title=None, hide_layers_with_no_params=False):
        if title is None: title = self.name
        print()
        print('%-28s%-12s%-24s%-24s' % (title, 'Params', 'OutputShape', 'WeightShape'))
        print('%-28s%-12s%-24s%-24s' % (('---',) * 4))

        total_params = 0
        for layer_name, layer_output, layer_trainables in self.list_layers():
            weights = [var for var in layer_trainables if var.name.endswith('/weight:0')]
            num_params = sum(np.prod(shape_to_list(var.shape)) for var in layer_trainables)
            total_params += num_params
            if hide_layers_with_no_params and num_params == 0:
                continue

            print('%-28s%-12s%-24s%-24s' % (
                layer_name,
                num_params if num_params else '-',
                layer_output.shape,
                weights[0].shape if len(weights) == 1 else '-'))

        print('%-28s%-12s%-24s%-24s' % (('---',) * 4))
        print('%-28s%-12s%-24s%-24s' % ('Total', total_params, '', ''))
        print()

    # Construct summary ops to include histograms of all trainable parameters in TensorBoard. 
Example #3
Source File: dump_tfrecord.py    From cwavegan with MIT License 6 votes vote down vote up
def _mapper(example_proto):
  features = {
      'samples': tf.FixedLenSequenceFeature([1], tf.float32, allow_missing=True),
      'label': tf.FixedLenSequenceFeature([], tf.string, allow_missing=True)
  }
  example = tf.parse_single_example(example_proto, features)

  wav = example['samples'][:, 0]

  wav = wav[:16384]
  wav_len = tf.shape(wav)[0]
  wav = tf.pad(wav, [[0, 16384 - wav_len]])

  label = tf.reduce_join(example['label'], 0)

  return wav, label 
Example #4
Source File: sequence.py    From icme2019 with MIT License 6 votes vote down vote up
def call(self, x):
        if (self.size == None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])

        position_j = 1. / \
            K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size)
        position_j = K.expand_dims(position_j, 0)

        position_i = tf.cumsum(K.ones_like(x[:, :, 0]), 1) - 1
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        outputs = K.concatenate(
            [K.cos(position_ij), K.sin(position_ij)], 2)

        if self.mode == 'sum':
            if self.scale:
                outputs = outputs * outputs ** 0.5
            return x + outputs
        elif self.mode == 'concat':
            return K.concatenate([outputs, x], 2) 
Example #5
Source File: tfutil.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def _create_autosummary_var(name, value_expr):
    assert not _autosummary_finalized
    v = tf.cast(value_expr, tf.float32)
    if v.shape.ndims is 0:
        v = [v, np.float32(1.0)]
    elif v.shape.ndims is 1:
        v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]
    else:
        v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]
    v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))
    with tf.control_dependencies(None):
        var = tf.Variable(tf.zeros(2)) # [numerator, denominator]
    update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
    if name in _autosummary_vars:
        _autosummary_vars[name].append(var)
    else:
        _autosummary_vars[name] = [var]
    return update_op

#----------------------------------------------------------------------------
# Call filewriter.add_summary() with all summaries in the default graph,
# automatically finalizing and merging them on the first call. 
Example #6
Source File: sequence.py    From icme2019 with MIT License 6 votes vote down vote up
def build(self, input_shape):
        if not self.supports_masking:
            if not isinstance(input_shape, list) or len(input_shape) != 3:
                raise ValueError('A `AttentionSequencePoolingLayer` layer should be called '
                                 'on a list of 3 inputs')

            if len(input_shape[0]) != 3 or len(input_shape[1]) != 3 or len(input_shape[2]) != 2:
                raise ValueError("Unexpected inputs dimensions,the 3 tensor dimensions are %d,%d and %d , expect to be 3,3 and 2" % (
                    len(input_shape[0]), len(input_shape[1]), len(input_shape[2])))

            if input_shape[0][-1] != input_shape[1][-1] or input_shape[0][1] != 1 or input_shape[2][1] != 1:
                raise ValueError('A `AttentionSequencePoolingLayer` layer requires '
                                 'inputs of a 3 inputs with shape (None,1,embedding_size),(None,T,embedding_size) and (None,1)'
                                 'Got different shapes: %s,%s and %s' % (input_shape))
        else:
            pass
        super(AttentionSequencePoolingLayer, self).build(
            input_shape)  # Be sure to call this somewhere! 
Example #7
Source File: utils_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def model_argmax(sess, x, predictions, samples, feed=None):
    """
    Helper function that computes the current class prediction
    :param sess: TF session
    :param x: the input placeholder
    :param predictions: the model's symbolic output
    :param samples: numpy array with input samples (dims must match x)
    :param feed: An optional dictionary that is appended to the feeding
             dictionary before the session runs. Can be used to feed
             the learning phase of a Keras model for instance.
    :return: the argmax output of predictions, i.e. the current predicted class
    """
    feed_dict = {x: samples}
    if feed is not None:
        feed_dict.update(feed)
    probabilities = sess.run(predictions, feed_dict)

    if samples.shape[0] == 1:
        return np.argmax(probabilities)
    else:
        return np.argmax(probabilities, axis=1) 
Example #8
Source File: tensor.py    From spleeter with MIT License 6 votes vote down vote up
def pad_and_reshape(instr_spec, frame_length, F):
    """
    :param instr_spec:
    :param frame_length:
    :param F:
    :returns:
    """
    spec_shape = tf.shape(instr_spec)
    extension_row = tf.zeros((spec_shape[0], spec_shape[1], 1, spec_shape[-1]))
    n_extra_row = (frame_length) // 2 + 1 - F
    extension = tf.tile(extension_row, [1, 1, n_extra_row, 1])
    extended_spec = tf.concat([instr_spec, extension], axis=2)
    old_shape = tf.shape(extended_spec)
    new_shape = tf.concat([
        [old_shape[0] * old_shape[1]],
        old_shape[2:]],
        axis=0)
    processed_instr_spec = tf.reshape(extended_spec, new_shape)
    return processed_instr_spec 
Example #9
Source File: tensor.py    From spleeter with MIT License 6 votes vote down vote up
def check_tensor_shape(tensor_tf, target_shape):
    """ Return a Tensorflow boolean graph that indicates whether
    sample[features_key] has the specified target shape. Only check
    not None entries of target_shape.

    :param tensor_tf: Tensor to check shape for.
    :param target_shape: Target shape to compare tensor to.
    :returns: True if shape is valid, False otherwise (as TF boolean).
    """
    result = tf.constant(True)
    for i, target_length in enumerate(target_shape):
        if target_length:
            result = tf.logical_and(
                result,
                tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[i]))
    return result 
Example #10
Source File: __init__.py    From spleeter with MIT License 6 votes vote down vote up
def _inverse_stft(self, stft_t, time_crop=None):
        """ Inverse and reshape the given STFT

        :param stft_t: input STFT
        :returns: inverse STFT (waveform)
        """
        inversed = inverse_stft(
            tf.transpose(stft_t, perm=[2, 0, 1]),
            self._frame_length,
            self._frame_step,
            window_fn=lambda frame_length, dtype: (
                hann_window(frame_length, periodic=True, dtype=dtype))
        ) * self.WINDOW_COMPENSATION_FACTOR
        reshaped = tf.transpose(inversed)
        if time_crop is None:
            time_crop = tf.shape(self._features['waveform'])[0]
        return reshaped[:time_crop, :] 
Example #11
Source File: griffin_lim.py    From Griffin_lim with MIT License 5 votes vote down vote up
def _inv_preemphasis(x):
    N = tf.shape(x)[0]
    i = tf.constant(0)
    W = tf.zeros(shape=tf.shape(x), dtype=tf.float32)

    def condition(i, y):
        return tf.less(i, N)

    def body(i, y):
        tmp = tf.slice(x, [0], [i + 1])
        tmp = tf.concat([tf.zeros([N - i - 1]), tmp], -1)
        y = hparams.preemphasis * y + tmp
        i = tf.add(i, 1)
        return [i, y]

    final = tf.while_loop(condition, body, [i, W])

    y = final[1]

    return y 
Example #12
Source File: griffin_lim.py    From Griffin_lim with MIT License 5 votes vote down vote up
def _db_to_amp(x):
    return tf.pow(tf.ones(tf.shape(x)) * 10.0, x * 0.05) 
Example #13
Source File: __init__.py    From spleeter with MIT License 5 votes vote down vote up
def _build_mwf_output_waveform(self):
        """ Perform separation with multichannel Wiener Filtering using Norbert.
        Note: multichannel Wiener Filtering is not coded in Tensorflow and thus
        may be quite slow.

        :returns: dictionary of separated waveforms (key: instrument name,
            value: estimated waveform of the instrument)
        """
        import norbert  # pylint: disable=import-error
        output_dict = self.model_outputs
        x = self.stft_feature
        v = tf.stack(
            [
                pad_and_reshape(
                    output_dict[f'{instrument}_spectrogram'],
                    self._frame_length,
                    self._F)[:tf.shape(x)[0], ...]
                for instrument in self._instruments
            ],
            axis=3)
        input_args = [v, x]
        stft_function = tf.py_function(
            lambda v, x: norbert.wiener(v.numpy(), x.numpy()),
            input_args,
            tf.complex64),
        return {
            instrument: self._inverse_stft(stft_function[0][:, :, :, k])
            for k, instrument in enumerate(self._instruments)
        } 
Example #14
Source File: tensor.py    From spleeter with MIT License 5 votes vote down vote up
def pad_and_partition(tensor, segment_len):
    """ Pad and partition a tensor into segment of len segment_len
    along the first dimension. The tensor is padded with 0 in order
    to ensure that the first dimension is a multiple of segment_len.

    Tensor must be of known fixed rank

    :Example:

    >>> tensor = [[1, 2, 3], [4, 5, 6]]
    >>> segment_len = 2
    >>> pad_and_partition(tensor, segment_len)
    [[[1, 2], [4, 5]], [[3, 0], [6, 0]]]

    :param tensor:
    :param segment_len:
    :returns:
    """
    tensor_size = tf.math.floormod(tf.shape(tensor)[0], segment_len)
    pad_size = tf.math.floormod(segment_len - tensor_size, segment_len)
    padded = tf.pad(
        tensor,
        [[0, pad_size]] + [[0, 0]] * (len(tensor.shape)-1))
    split = (tf.shape(padded)[0] + segment_len - 1) // segment_len
    return tf.reshape(
        padded,
        tf.concat(
            [[split, segment_len], tf.shape(padded)[1:]],
            axis=0)) 
Example #15
Source File: picklable_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_input_shape(self, shape):
        self.input_shape = shape
        output_width = 1
        for factor in shape[1:]:
            output_width *= factor
        self.output_width = output_width
        self.output_shape = [None, output_width] 
Example #16
Source File: tensor.py    From spleeter with MIT License 5 votes vote down vote up
def sync_apply(tensor_dict, func, concat_axis=1):
    """ Return a function that applies synchronously the provided func on the
    provided dictionnary of tensor. This means that func is applied to the
    concatenation of the tensors in tensor_dict. This is useful for performing
    random operation that needs the same drawn value on multiple tensor, such
    as a random time-crop on both input data and label (the same crop should be
    applied to both input data and label, so random crop cannot be applied
    separately on each of them).

    IMPORTANT NOTE: all tensor are assumed to be the same shape.

    Params:
        - tensor_dict: dictionary (key: strings, values: tf.tensor)
        a dictionary of tensor.
        - func: function
        function to be applied to the concatenation of the tensors in
        tensor_dict
        - concat_axis: int
        The axis on which to perform the concatenation.

        Returns:
        processed tensors dictionary with the same name (keys) as input
        tensor_dict.
    """
    if concat_axis not in {0, 1}:
        raise NotImplementedError(
            'Function only implemented for concat_axis equal to 0 or 1')
    tensor_list = list(tensor_dict.values())
    concat_tensor = tf.concat(tensor_list, concat_axis)
    processed_concat_tensor = func(concat_tensor)
    tensor_shape = tf.shape(list(tensor_dict.values())[0])
    D = tensor_shape[concat_axis]
    if concat_axis == 0:
        return {
            name: processed_concat_tensor[index * D:(index + 1) * D, :, :]
            for index, name in enumerate(tensor_dict)
        }
    return {
        name: processed_concat_tensor[:, index * D:(index + 1) * D, :]
        for index, name in enumerate(tensor_dict)
    } 
Example #17
Source File: __init__.py    From spleeter with MIT License 5 votes vote down vote up
def get_input_dict_placeholders(self):
        features = {
            self.stft_input_name: placeholder(tf.complex64,
                                              shape=(None, self.params["frame_length"]//2+1,
                                                     self.params['n_channels']),
                                              name=self.stft_input_name),
            'audio_id': placeholder(tf.string, name="audio_id")}
        return features 
Example #18
Source File: picklable_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_input_shape(self, shape):
        self.input_shape = shape
        self.output_shape = shape 
Example #19
Source File: picklable_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_input_shape(self, shape):
        self.input_shape = tuple(shape)
        self.in_filter = shape[-1]
        self.gn1 = GroupNorm(name=self.name + "_gn1")
        self.gn1.set_input_shape(shape)
        strides = (self.stride, self.stride)
        self.conv1 = Conv2D(self.out_filter, (3, 3), strides, "SAME",
                            name=self.name + "_conv1", init_mode="inv_sqrt")
        self.conv1.set_input_shape(shape)
        self.gn2 = GroupNorm(name=self.name + "_gn2")
        self.gn2.set_input_shape(self.conv1.get_output_shape())
        self.conv2 = Conv2D(self.out_filter, (3, 3), (1, 1), "SAME",
                            name=self.name + "_conv2", init_mode="inv_sqrt")
        self.conv2.set_input_shape(self.conv1.get_output_shape())
        self.output_shape = self.conv2.get_output_shape() 
Example #20
Source File: picklable_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_input_shape(self, shape):
        self.input_shape = shape
        self.output_shape = shape 
Example #21
Source File: picklable_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_input_shape(self, shape):
        self.input_shape = shape
        self.output_shape = shape 
Example #22
Source File: picklable_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_input_shape(self, shape):
        self.input_shape = shape
        self.output_shape = shape 
Example #23
Source File: picklable_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_input_shape(self, shape):
        self.input_shape = shape
        self.output_shape = shape 
Example #24
Source File: picklable_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_input_shape(self, shape):
        self.input_shape = shape
        self.output_shape = shape 
Example #25
Source File: loss.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fprop(self, x, y, **kwargs):
        mix = tf.distributions.Beta(self.beta, self.beta)
        mix = mix.sample([tf.shape(x)[0]] + [1] * (len(x.shape) - 1))
        xm = x + mix * (x[::-1] - x)
        ym = y + mix * (y[::-1] - y)
        logits = self.model.get_logits(xm, **kwargs)
        loss = softmax_cross_entropy_with_logits(labels=ym, logits=logits)
        warnings.warn("LossMixUp is deprecated, switch to "
                      "MixUp. LossFeaturePairing may be removed "
                      "on or after 2019-03-06.")
        return loss 
Example #26
Source File: loss.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fprop(self, x, y, **kwargs):
        mix = tf.distributions.Beta(self.beta, self.beta)
        mix = mix.sample([tf.shape(x)[0]] + [1] * (len(x.shape) - 1))
        xm = x + mix * (x[::-1] - x)
        ym = y + mix * (y[::-1] - y)
        logits = self.model.get_logits(xm, **kwargs)
        loss = tf.reduce_mean(softmax_cross_entropy_with_logits(labels=ym,
                                                                logits=logits))
        return loss 
Example #27
Source File: attacks_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def attack(self, imgs, targets):
        """
        Perform the EAD attack on the given instance for the given targets.

        If self.targeted is true, then the targets represents the target labels
        If self.targeted is false, then targets are the original class labels
        """

        batch_size = self.batch_size
        r = []
        for i in range(0, len(imgs) // batch_size):
            _logger.debug(
                ("Running EAD attack on instance " + "{} of {}").format(
                    i * batch_size, len(imgs)))
            r.extend(
                self.attack_batch(
                    imgs[i * batch_size:(i + 1) * batch_size],
                    targets[i * batch_size:(i + 1) * batch_size]))
        if len(imgs) % batch_size != 0:
            last_elements = len(imgs) - (len(imgs) % batch_size)
            _logger.debug(
                ("Running EAD attack on instance " + "{} of {}").format(
                    last_elements, len(imgs)))
            temp_imgs = np.zeros((batch_size, ) + imgs.shape[2:])
            temp_targets = np.zeros((batch_size, ) + targets.shape[2:])
            temp_imgs[:(len(imgs) % batch_size)] = imgs[last_elements:]
            temp_targets[:(len(imgs) % batch_size)] = targets[last_elements:]
            temp_data = self.attack_batch(temp_imgs, temp_targets)
            r.extend(temp_data[:(len(imgs) % batch_size)],
                     targets[last_elements:])
        return np.array(r) 
Example #28
Source File: attacks_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vatm(model,
         x,
         logits,
         eps,
         num_iterations=1,
         xi=1e-6,
         clip_min=None,
         clip_max=None,
         scope=None):
    """
    Tensorflow implementation of the perturbation method used for virtual
    adversarial training: https://arxiv.org/abs/1507.00677
    :param model: the model which returns the network unnormalized logits
    :param x: the input placeholder
    :param logits: the model's unnormalized output tensor (the input to
                   the softmax layer)
    :param eps: the epsilon (input variation parameter)
    :param num_iterations: the number of iterations
    :param xi: the finite difference parameter
    :param clip_min: optional parameter that can be used to set a minimum
                    value for components of the example returned
    :param clip_max: optional parameter that can be used to set a maximum
                    value for components of the example returned
    :param seed: the seed for random generator
    :return: a tensor for the adversarial example
    """
    with tf.name_scope(scope, "virtual_adversarial_perturbation"):
        d = tf.random_normal(tf.shape(x), dtype=tf_dtype)
        for i in range(num_iterations):
            d = xi * utils_tf.l2_batch_normalize(d)
            logits_d = model.get_logits(x + d)
            kl = utils_tf.kl_with_logits(logits, logits_d)
            Hd = tf.gradients(kl, d)[0]
            d = tf.stop_gradient(Hd)
        d = eps * utils_tf.l2_batch_normalize(d)
        adv_x = x + d
        if (clip_min is not None) and (clip_max is not None):
            adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
        return adv_x 
Example #29
Source File: attacks.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def attack(self, x, y):
        """
        This method creates a symbolic graph that given an input image,
        first randomly perturbs the image. The
        perturbation is bounded to an epsilon ball. Then multiple steps of
        gradient descent is performed to increase the probability of a target
        label or decrease the probability of the ground-truth label.

        :param x: A tensor with the input image.
        """
        import tensorflow as tf
        from cleverhans.utils_tf import clip_eta

        if self.rand_init:
            eta = tf.random_uniform(
                tf.shape(x), -self.eps, self.eps, dtype=self.tf_dtype)
            eta = clip_eta(eta, self.ord, self.eps)
        else:
            eta = tf.zeros_like(x)

        def cond(i, _):
            return tf.less(i, self.nb_iter)

        def body(i, e):
            new_eta = self.attack_single_step(x, e, y)
            return i + 1, new_eta

        _, eta = tf.while_loop(cond, body, [tf.zeros([]), eta], back_prop=True)

        adv_x = x + eta
        if self.clip_min is not None and self.clip_max is not None:
            adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)

        return adv_x 
Example #30
Source File: tf_logits.py    From Black-Box-Audio with MIT License 5 votes vote down vote up
def get_logits(new_input, length, first=[]):
    """
    Compute the logits for a given waveform.

    First, preprocess with the TF version of MFC above,
    and then call DeepSpeech on the features.
    """
    # new_input = tf.Print(new_input, [tf.shape(new_input)])

    # We need to init DeepSpeech the first time we're called
    if first == []:
        first.append(False)
        # Okay, so this is ugly again.
        # We just want it to not crash.
        tf.app.flags.FLAGS.alphabet_config_path = "DeepSpeech/data/alphabet.txt"
        DeepSpeech.initialize_globals()
        print('initialized deepspeech globals')

    batch_size = new_input.get_shape()[0]

    # 1. Compute the MFCCs for the input audio
    # (this is differentable with our implementation above)
    empty_context = np.zeros((batch_size, 9, 26), dtype=np.float32)
    new_input_to_mfcc = compute_mfcc(new_input)[:, ::2]
    features = tf.concat((empty_context, new_input_to_mfcc, empty_context), 1)

    # 2. We get to see 9 frames at a time to make our decision,
    # so concatenate them together.
    features = tf.reshape(features, [new_input.get_shape()[0], -1])
    features = tf.stack([features[:, i:i+19*26] for i in range(0,features.shape[1]-19*26+1,26)],1)
    features = tf.reshape(features, [batch_size, -1, 19*26])

    # 3. Whiten the data
    mean, var = tf.nn.moments(features, axes=[0,1,2])
    features = (features-mean)/(var**.5)

    # 4. Finally we process it with DeepSpeech
    logits = DeepSpeech.BiRNN(features, length, [0]*10)

    return logits