Python tensorflow.div() Examples

The following are 30 code examples of tensorflow.div(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: vgslspecs_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def ExpectScaledSize(self, spec, target_shape, factor=1):
    """Tests that the output of the graph of the given spec has target_shape."""
    with tf.Graph().as_default():
      with self.test_session() as sess:
        self.SetupInputs()
        # Only the placeholders are given at construction time.
        vgsl = vgslspecs.VGSLSpecs(self.ph_widths, self.ph_heights, True)
        outputs = vgsl.Build(self.ph_image, spec)
        # Compute the expected output widths from the given scale factor.
        target_widths = tf.div(self.in_widths, factor).eval()
        target_heights = tf.div(self.in_heights, factor).eval()
        # Run with the 'real' data.
        tf.global_variables_initializer().run()
        res_image, res_widths, res_heights = sess.run(
            [outputs, vgsl.GetLengths(2), vgsl.GetLengths(1)],
            feed_dict={self.ph_image: self.in_image,
                       self.ph_widths: self.in_widths,
                       self.ph_heights: self.in_heights})
        self.assertEqual(tuple(res_image.shape), target_shape)
        if target_shape[1] > 1:
          self.assertEqual(tuple(res_heights), tuple(target_heights))
        if target_shape[2] > 1:
          self.assertEqual(tuple(res_widths), tuple(target_widths)) 
Example #2
Source File: model.py    From deeping-flow with MIT License 6 votes vote down vote up
def multi_view_att(ori_memory, att_w, dec_hidden, *args):
    bsz, max_len, rnn_hsz = args

    dec_hidden = att_w(dec_hidden)  # b*f

    ori_memory_t = tf.transpose(ori_memory, perm=[2, 0, 1])  # f*b*t
    flatten_om = tf.layers.flatten(ori_memory_t)

    beta_is = tf.exp(tf.tanh(tf.matmul(dec_hidden, flatten_om)))  # b*b*t
    beta_is = tf.reshape(beta_is, [bsz, bsz, max_len])
    beta_is = tf.transpose(beta_is, perm=[2, 0, 1])  # t*b*b

    beta_i_sum = tf.reduce_sum(beta_is, axis=0, keepdims=True)
    beta_i_sum = tf.tile(beta_i_sum, [max_len, 1, 1])
    beta_is = tf.div(beta_is, beta_i_sum)

    ori_memory_t = tf.transpose(ori_memory, perm=[1, 0, 2])
    return tf.reduce_sum(tf.matmul(beta_is, ori_memory_t), axis=0) 
Example #3
Source File: pruning_schedule.py    From model-optimization with Apache License 2.0 6 votes vote down vote up
def __call__(self, step):
    # TODO(tf-mot): consider switch to divide for 1.XX also.
    if hasattr(tf, 'div'):
      divide = tf.div
    else:
      divide = tf.math.divide

    # TODO(pulkitb): Replace function with tf.polynomial_decay
    with tf.name_scope('polynomial_decay_pruning_schedule'):
      p = tf.math.minimum(
          1.0,
          tf.math.maximum(
              0.0,
              divide(
                  tf.dtypes.cast(step - self.begin_step, tf.float32),
                  self.end_step - self.begin_step)))
      sparsity = tf.math.add(
          tf.math.multiply(self.initial_sparsity - self.final_sparsity,
                           tf.math.pow(1 - p, self.power)),
          self.final_sparsity,
          name='sparsity')

    return (self._should_prune_in_step(step, self.begin_step, self.end_step,
                                       self.frequency),
            sparsity) 
Example #4
Source File: attention.py    From deeping-flow with MIT License 6 votes vote down vote up
def intra_decoder_atten(w_dec, dec_hidden, dec_out):
    """
    Args:
        w_dec: w_dec_atten, size - [dec_hsz*dec_hsz]
        dec_hidden: decode hidden/time, size - [bsz*dec_hsz]
        dec_out: decode out, size - [bsz*time*dec_hsz]

    Return:
        dec_c_t: doc vector, size - [bsz*dec_hsz]
    """
    pre_hiddens = tf.transpose(dec_out, perm=[1, 0, 2])
    times = tf.shape(dec_out)[1]

     # formulation 6
    d_tts = tf.exp(tf.multiply(tf.tile(tf.expand_dims(
        tf.matmul(dec_hidden, w_dec), 0), [times, 1, 1]), pre_hiddens))

    # formulation 7
    norm_d_tt = tf.tile(tf.reduce_sum(d_tts, 0, keep_dims=True), [times, 1, 1])
    alpha_dec_tts = tf.div(d_tts, norm_d_tt)

    # formulation 8
    dec_c_t = tf.reduce_sum(tf.multiply(alpha_dec_tts, pre_hiddens), 0)

    return dec_c_t 
Example #5
Source File: adamirror.py    From HyperGAN with MIT License 6 votes vote down vote up
def _apply_dense(self, grad, var):
    lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
    beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
    beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
    if var.dtype.base_dtype == tf.float16:
        eps = 1e-7  # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
    else:
        eps = 1e-8

    v = self.get_slot(var, "v")
    v_t = v.assign(beta2_t * v + (1. - beta2_t) * tf.square(grad))
    m = self.get_slot(var, "m")
    m_t = m.assign( beta1_t * m + (1. - beta1_t) * grad )
    v_t_hat = tf.div(v_t, 1. - beta2_t)
    m_t_hat = tf.div(m_t, 1. - beta1_t)
    
    g_t = tf.div( m_t, tf.sqrt(v_t)+eps )
    g_t_1 = self.get_slot(var, "g")
    g_t = g_t_1.assign( g_t )

    var_update = state_ops.assign_sub(var, 2. * lr_t * g_t - lr_t * g_t_1) #Adam would be lr_t * g_t
    return control_flow_ops.group(*[var_update, m_t, v_t, g_t]) 
Example #6
Source File: lenet_preprocessing.py    From tf-pose with Apache License 2.0 6 votes vote down vote up
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image 
Example #7
Source File: model_ops.py    From PADME with MIT License 6 votes vote down vote up
def softmax_N(tensor, name=None):
  """Apply softmax across last dimension of a tensor.

  Args:
    tensor: Input tensor.
    name: Name for this op. If None, defaults to 'softmax_N'.

  Returns:
    A tensor with softmax-normalized values on the last dimension.
  """
  with tf.name_scope(name, 'softmax_N', [tensor]):
    exp_tensor = tf.exp(tensor)
    reduction_indices = [tensor.get_shape().ndims - 1]
    return tf.div(exp_tensor,
                  tf.reduce_sum(
                      exp_tensor, axis=reduction_indices, keep_dims=True)) 
Example #8
Source File: lenet_preprocessing.py    From STORK with MIT License 6 votes vote down vote up
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image 
Example #9
Source File: util.py    From tensorflow_end2end_speech_recognition with MIT License 6 votes vote down vote up
def normalize_score(log_probs, sequence_lengths, length_penalty_weight):
    """Normalizes scores for beam search hypotheses by the length.
    Args:
        log_probs: The log probabilities with shape
            `[beam_width, vocab_size]`.
        sequence_lengths: The sequence length of all hypotheses, a tensor
            of shape `[beam_size, vocab_size]`.
        length_penalty_weight: A float value, a scalar that weights the length
            penalty. Disabled with 0.0.
    Returns:
        score: The scores normalized by the length_penalty
    """
    # Calculate the length penality
    length_penality = tf.div(
        (5. + tf.to_float(sequence_lengths))**length_penalty_weight,
        (5. + 1.)**length_penalty_weight)
    # NOTE: See details in https://arxiv.org/abs/1609.08144.

    # Normalize log probabiltiies by the length penality
    if length_penalty_weight is None or length_penalty_weight == 1:
        score = log_probs
    else:
        score = log_probs / length_penality

    return score 
Example #10
Source File: utils.py    From ghostnet with Apache License 2.0 6 votes vote down vote up
def drop_path(inputs, keep_prob, is_training=True, scope=None):
    """Drops out a whole example hiddenstate with the specified probability.
    """
    with tf.name_scope(scope, 'drop_path', [inputs]):
        net = inputs
        if is_training:
            batch_size = tf.shape(net)[0]
            noise_shape = [batch_size, 1, 1, 1]
            random_tensor = keep_prob
            random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)
            binary_tensor = tf.floor(random_tensor)
            net = tf.div(net, keep_prob) * binary_tensor
        return net

# =========================================================================== #
# Useful methods
# =========================================================================== # 
Example #11
Source File: lenet_preprocessing.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image 
Example #12
Source File: loss_utils.py    From BERT with Apache License 2.0 6 votes vote down vote up
def contrastive_loss(label, feat1, feat2, margin=1.0):

	distance = tf.sqrt(1e-20+tf.reduce_sum(tf.pow(feat1-feat2, 2), 1, keep_dims=True))
	# distance_norm = tf.add(tf.sqrt(tf.reduce_sum(tf.square(feat1), 1, keep_dims=True)), tf.sqrt(tf.reduce_sum(tf.square(feat2), 1, keep_dims=True)))
	# distance = tf.div(distance, tf.stop_gradient(distance_norm+1e-10))
	distance = tf.reshape(distance, [-1], name="distance")

	input_shape_list = bert_utils.get_shape_list(feat1, expected_rank=[2])
	batch_size = input_shape_list[0]

	y = tf.cast(label, tf.float32)
	 # the smaller is better
	tmp = y * tf.pow(distance, 2)
	# when distance is larger than margin, then ignore gradient
	tmp2 = (1-y) *tf.pow(tf.maximum((margin - distance), 0.0), 2)
	per_example_loss = (tmp +tmp2)/2
	return per_example_loss, distance 
Example #13
Source File: lenet_preprocessing.py    From ctw-baseline with MIT License 6 votes vote down vote up
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image 
Example #14
Source File: common_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def global_pool_1d(inputs, pooling_type="MAX", mask=None):
  """Pool elements across the last dimension.

  Useful to convert a list of vectors into a single vector so as
  to get a representation of a set.

  Args:
    inputs: A tensor of shape [batch_size, sequence_length, input_dims]
      containing the sequences of input vectors.
    pooling_type: the pooling type to use, MAX or AVR
    mask: A tensor of shape [batch_size, sequence_length] containing a
      mask for the inputs with 1's for existing elements, and 0's elsewhere.

  Returns:
    A tensor of shape [batch_size, input_dims] containing the sequences of
    transformed vectors.
  """
  with tf.name_scope("global_pool", values=[inputs]):
    if mask is not None:
      mask = tf.expand_dims(mask, axis=2)
      inputs = tf.multiply(inputs, mask)

    if pooling_type == "MAX":
      # A tf.pool can be used here, but reduce is cleaner
      output = tf.reduce_max(inputs, axis=1)
    elif pooling_type == "AVR":
      if mask is not None:
        # Some elems are dummy elems so we can't just reduce the average.
        output = tf.reduce_sum(inputs, axis=1)
        num_elems = tf.reduce_sum(mask, axis=1, keepdims=True)
        output = tf.div(output, tf.maximum(num_elems, 1))
      else:
        output = tf.reduce_mean(inputs, axis=1)

  return output 
Example #15
Source File: image_ops.py    From D-VAE with MIT License 5 votes vote down vote up
def drop_path(x, keep_prob):
  """Drops out a whole example hiddenstate with the specified probability."""

  batch_size = tf.shape(x)[0]
  noise_shape = [batch_size, 1, 1, 1]
  random_tensor = keep_prob
  random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)
  binary_tensor = tf.floor(random_tensor)
  x = tf.div(x, keep_prob) * binary_tensor

  return x 
Example #16
Source File: dhn.py    From DeepHash with MIT License 5 votes vote down vote up
def cross_entropy(u, label_u, alpha=0.5, normed=False):

        label_ip = tf.cast(
            tf.matmul(label_u, tf.transpose(label_u)), tf.float32)
        s = tf.clip_by_value(label_ip, 0.0, 1.0)

        # compute balance param
        # s_t \in {-1, 1}
        s_t = tf.multiply(tf.add(s, tf.constant(-0.5)), tf.constant(2.0))
        sum_1 = tf.reduce_sum(s)
        sum_all = tf.reduce_sum(tf.abs(s_t))
        balance_param = tf.add(tf.abs(tf.add(s, tf.constant(-1.0))),
                               tf.multiply(tf.div(sum_all, sum_1), s))

        if normed:
            # ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
            ip_1 = tf.matmul(u, tf.transpose(u))

            def reduce_shaper(t):
                return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1])
            mod_1 = tf.sqrt(tf.matmul(reduce_shaper(tf.square(u)),
                                      reduce_shaper(tf.square(u)), transpose_b=True))
            ip = tf.div(ip_1, mod_1)
        else:
            ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
        ones = tf.ones([tf.shape(u)[0], tf.shape(u)[0]])
        return tf.reduce_mean(tf.multiply(tf.log(ones + tf.exp(alpha * ip)) - s * alpha * ip, balance_param)) 
Example #17
Source File: adv_utils.py    From generative_adversary with GNU General Public License v3.0 5 votes vote down vote up
def feature_squeeze(images, dataset='cifar'):
    # color depth reduction
    if dataset == 'cifar':
        npp = 2 ** 5
    elif dataset == 'mnist':
        npp = 2 ** 3

    npp_int = npp - 1
    images = images / 255.
    x_int = tf.rint(tf.multiply(images, npp_int))
    x_float = tf.div(x_int, npp_int)
    return median_filtering_2x2(x_float, dataset=dataset) 
Example #18
Source File: loopprocessing.py    From How_to_generate_music_in_tensorflow_LIVE with Apache License 2.0 5 votes vote down vote up
def __call__(self, prev_output):
        """ Use TODO formula
        Args:
            prev_output (tf.Tensor): the ouput on which applying the transformation
        Return:
            tf.Ops: the processing operator
        """
        # prev_output size: [batch_size, nb_labels]
        nb_labels = prev_output.get_shape().as_list()[-1]

        if False:  # TODO: Add option to control argmax
            #label_draws = tf.argmax(prev_output, 1)
            label_draws = tf.multinomial(tf.log(prev_output), 1)  # Draw 1 sample from the distribution
            label_draws = tf.squeeze(label_draws, [1])
            self.chosen_labels.append(label_draws)
            next_input = tf.one_hot(label_draws, nb_labels)
            return next_input
        # Could use the Gumbel-Max trick to sample from a softmax distribution ?

        soft_values = tf.exp(tf.div(prev_output, self.temperature))  # Pi = exp(pi/t)
        # soft_values size: [batch_size, nb_labels]

        normalisation_coeff = tf.expand_dims(tf.reduce_sum(soft_values, 1), -1)
        # normalisation_coeff size: [batch_size, 1]
        probs = tf.div(soft_values, normalisation_coeff + 1e-8)  # = Pi / sum(Pk)
        # probs size: [batch_size, nb_labels]
        label_draws = tf.multinomial(tf.log(probs), 1)  # Draw 1 sample from the log-probability distribution
        # probs label_draws: [batch_size, 1]
        label_draws = tf.squeeze(label_draws, [1])
        # label_draws size: [batch_size,]
        self.chosen_labels.append(label_draws)
        next_input = tf.one_hot(label_draws, nb_labels)  # Reencode the next input vector
        # next_input size: [batch_size, nb_labels]
        return next_input 
Example #19
Source File: model.py    From rgn with MIT License 5 votes vote down vote up
def _weights(config, masks, curriculum_step=None):
    """ Returns dRMSD weights that mask meaningless (missing or longer than 
        sequence residues) pairwise distances and incorporate the state of 
        the curriculum to differentially weigh pairwise distances based on 
        their proximity. """

    if config['atoms'] == 'c_alpha':
        if config['mode'] != 'loss':
            # no loss-based curriculum, create fixed weighting matrix that weighs all distances equally. 
            # minus one factor is there because we ignore self-distances.
            flat_curriculum_weights = np.ones(config['num_steps'] - config['num_edge_residues'] - 1, dtype='float32')

        elif config['mode'] == 'loss' and curriculum_step is not None:
            # create appropriate weights based on curriculum parameters and current step.
            flat_curriculum_weights = curriculum_weights(base=curriculum_step, 
                                                         slope=config['slope'], 
                                                         max_seq_length=config['num_steps'] - config['num_edge_residues'])
        else:
            raise RuntimeError('Curriculum step tensor not supplied.')

        # weighting matrix for entire batch that accounts for curriculum weighting.
        unnormalized_weights = weighting_matrix(flat_curriculum_weights, name='unnormalized_weights')
                               # [NUM_STEPS - NUM_EDGE_RESIDUES, NUM_STEPS - NUM_EDGE_RESIDUES]

        # create final weights by multiplying with masks and normalizing.
        mask_length = tf.shape(masks)[0]
        unnormalized_masked_weights = masks * unnormalized_weights[:mask_length, :mask_length, tf.newaxis]
        masked_weights = tf.div(unnormalized_masked_weights, 
                                tf.reduce_sum(unnormalized_masked_weights, axis=[0, 1]), 
                                name='weights')

        return masked_weights, flat_curriculum_weights

    else:
        raise NotImplementedError('Model does not currently support anything other than C alpha atoms for the loss function.') 
Example #20
Source File: common_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def shakeshake(xs, equal_grad=False):
  """Multi-argument shake-shake, currently approximated by sums of 2."""
  if len(xs) == 1:
    return xs[0]
  div = (len(xs) + 1) // 2
  arg1 = shakeshake(xs[:div], equal_grad=equal_grad)
  arg2 = shakeshake(xs[div:], equal_grad=equal_grad)
  if equal_grad:
    return shakeshake2_eqgrad(arg1, arg2)
  return shakeshake2(arg1, arg2) 
Example #21
Source File: siamese_network_semantic.py    From deep-siamese-text-similarity with MIT License 5 votes vote down vote up
def __init__(
        self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size, trainableEmbeddings):

        # Placeholders for input, output and dropout
        self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
        self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
        self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0, name="l2_loss")
          
        # Embedding layer
        with tf.name_scope("embedding"):
            self.W = tf.Variable(
                tf.constant(0.0, shape=[vocab_size, embedding_size]),
                trainable=trainableEmbeddings,name="W")
            self.embedded_words1 = tf.nn.embedding_lookup(self.W, self.input_x1)
            self.embedded_words2 = tf.nn.embedding_lookup(self.W, self.input_x2)
        print self.embedded_words1
        # Create a convolution + maxpool layer for each filter size
        with tf.name_scope("output"):
            self.out1=self.stackedRNN(self.embedded_words1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
            self.out2=self.stackedRNN(self.embedded_words2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
            self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
            self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
            self.distance = tf.reshape(self.distance, [-1], name="distance")
        with tf.name_scope("loss"):
            self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
        #### Accuracy computation is outside of this class.
        with tf.name_scope("accuracy"):
            self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
            correct_predictions = tf.equal(self.temp_sim, self.input_y)
            self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") 
Example #22
Source File: input_fn.py    From imitation-learning with MIT License 5 votes vote down vote up
def read_fn(tf_example):
        """Given a tf_example dict, separates into feature_dict and target_dict"""
        flat_img = tf_example[ilc.FEATKEY_IMG]
        img = convert_image_tf(flat_img)
        img = tf.cast(img, tf.float32)
        img = tf.squeeze(img)
        img = tf.div(img, 255.0)

        feats = {
            ilc.FEATKEY_IMG: img,
            ilc.TGT_SPEED: tf_example[ilc.TGT_SPEED],
        }
        tgts = {key: tf_example[key] for key in ilc.TGT_KEYS}

        return feats, tgts 
Example #23
Source File: ops.py    From TripleGAN-Tensorflow with MIT License 5 votes vote down vote up
def instance_norm(x, is_training, scope):
    with tf.variable_scope(scope):
        epsilon = 1e-5
        mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
        scale = tf.get_variable('scale', [x.get_shape()[-1]],
                                initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))
        offset = tf.get_variable('offset', [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0))
        out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset

        return out 
Example #24
Source File: ops.py    From tensorflow-image-wavenet with MIT License 5 votes vote down vote up
def batch_to_time(value, dilation, name=None):
    with tf.name_scope('batch_to_time'):
        shape = tf.shape(value)
        prepared = tf.reshape(value, [dilation, -1, shape[2]])
        transposed = tf.transpose(prepared, perm=[1, 0, 2])
        return tf.reshape(transposed,
                          [tf.div(shape[0], dilation), -1, shape[2]]) 
Example #25
Source File: nasnet_utils.py    From tf-pose with Apache License 2.0 5 votes vote down vote up
def drop_path(net, keep_prob, is_training=True):
  """Drops out a whole example hiddenstate with the specified probability."""
  if is_training:
    batch_size = tf.shape(net)[0]
    noise_shape = [batch_size, 1, 1, 1]
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)
    binary_tensor = tf.floor(random_tensor)
    net = tf.div(net, keep_prob) * binary_tensor
  return net 
Example #26
Source File: impute.py    From aboleth with Apache License 2.0 5 votes vote down vote up
def _impute_columns(self, X_2D_zero):
        """Generate a vector of means from X batches."""
        # Sum the real values in each column
        col_tot = tf.reduce_sum(X_2D_zero, 0)

        # Divide column totals by the number of non-nan values
        num_values_col = tf.reduce_sum(self.real_val_mask, 0)
        num_values_col = tf.maximum(num_values_col,
                                    tf.ones(tf.shape(num_values_col)))
        col_nan_means = tf.div(col_tot, num_values_col)
        return col_nan_means 
Example #27
Source File: sarcos.py    From aboleth with Apache License 2.0 5 votes vote down vote up
def r2_metric(labels, predictions):
    SST, update_op1 = tf.metrics.mean_squared_error(
        labels, tf.reduce_mean(labels, axis=0))
    SSE, update_op2 = tf.metrics.mean_squared_error(labels, predictions)
    return tf.subtract(1.0, tf.div(SSE, SST)), tf.group(update_op1, update_op2) 
Example #28
Source File: imagenet_preprocessing.py    From tensornets with MIT License 5 votes vote down vote up
def _mean_image_subtraction(image, means, num_channels, stds=None):
  """Subtracts the given means from each image channel.

  For example:
    means = [123.68, 116.779, 103.939]
    image = _mean_image_subtraction(image, means)

  Note that the rank of `image` must be known.

  Args:
    image: a tensor of size [height, width, C].
    means: a C-vector of values to subtract from each channel.
    num_channels: number of color channels in the image that will be distorted.

  Returns:
    the centered image.

  Raises:
    ValueError: If the rank of `image` is unknown, if `image` has a rank other
      than three or if the number of channels in `image` doesn't match the
      number of values in `means`.
  """
  if image.get_shape().ndims != 3:
    raise ValueError('Input must be of size [height, width, C>0]')

  if len(means) != num_channels:
    raise ValueError('len(means) must match the number of channels')

  # We have a 1-D tensor of means; convert to 3-D.
  means = tf.reshape(means, (1, 1, 3))
  images = image - means

  if stds is not None:
    if len(stds) != num_channels:
      raise ValueError('len(stds) must match the number of channels')

    # We have a 1-D tensor of means; convert to 3-D.
    stds = tf.reshape(stds, (1, 1, 3))
    images = tf.div(images, stds)

  return images 
Example #29
Source File: modules.py    From parallel-wavenet-vocoder with MIT License 5 votes vote down vote up
def causal_conv(value, filter_, dilation, name='causal_conv'):
    def time_to_batch(value, dilation):
        shape = tf.shape(value)
        pad_elements = dilation - 1 - (shape[1] + dilation - 1) % dilation
        padded = tf.pad(value, [[0, 0], [0, pad_elements], [0, 0]])
        reshaped = tf.reshape(padded, [-1, dilation, shape[2]])
        transposed = tf.transpose(reshaped, perm=[1, 0, 2])
        return tf.reshape(transposed, [shape[0] * dilation, -1, shape[2]])

    def batch_to_time(value, dilation):
        shape = tf.shape(value)
        prepared = tf.reshape(value, [dilation, -1, shape[2]])
        transposed = tf.transpose(prepared, perm=[1, 0, 2])
        return tf.reshape(transposed,
                          [tf.div(shape[0], dilation), -1, shape[2]])

    with tf.variable_scope(name):
        filter_width = tf.shape(filter_)[0]
        if dilation > 1:
            transformed = time_to_batch(value, dilation)
            # for left-side padding because tf.nn.conv1d do not support left-side padding with padding='SAME'
            padded = tf.pad(transformed, [[0, 0], [filter_width - 1, 0], [0, 0]])
            conv = tf.nn.conv1d(padded, filter_, stride=1, padding='VALID')
            restored = batch_to_time(conv, dilation)

            # Remove excess elements at the end caused by padding in time_to_batch.
            result = tf.slice(restored,
                              [0, 0, 0],
                              [-1, tf.shape(value)[1], -1])
        else:
            padded = tf.pad(value, [[0, 0], [filter_width - 1, 0], [0, 0]])
            result = tf.nn.conv1d(padded, filter_, stride=1, padding='VALID')
        return result 
Example #30
Source File: model_deploy.py    From ctw-baseline with MIT License 5 votes vote down vote up
def _gather_clone_loss(clone, num_clones, regularization_losses):
  """Gather the loss for a single clone.

  Args:
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.

  Returns:
    A tensor for the total loss for the clone.  Can be None.
  """
  # The return value.
  sum_loss = None
  # Individual components of the loss that will need summaries.
  clone_loss = None
  regularization_loss = None
  # Compute and aggregate losses on the clone device.
  with tf.device(clone.device):
    all_losses = []
    clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
    if clone_losses:
      clone_loss = tf.add_n(clone_losses, name='clone_loss')
      if num_clones > 1:
        clone_loss = tf.div(clone_loss, 1.0 * num_clones,
                            name='scaled_clone_loss')
      all_losses.append(clone_loss)
    if regularization_losses:
      regularization_loss = tf.add_n(regularization_losses,
                                     name='regularization_loss')
      all_losses.append(regularization_loss)
    if all_losses:
      sum_loss = tf.add_n(all_losses)
  # Add the summaries out of the clone device block.
  if clone_loss is not None:
    tf.summary.scalar(clone.scope + '/clone_loss', clone_loss)
  if regularization_loss is not None:
    tf.summary.scalar('regularization_loss', regularization_loss)
  return sum_loss