Python tensorflow.compat.v1.square() Examples

The following are 30 code examples of tensorflow.compat.v1.square(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: expert_utils.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def cv_squared(x):
  """The squared coefficient of variation of a sample.

  Useful as a loss to encourage a positive distribution to be more uniform.
  Epsilons added for numerical stability.
  Returns 0 for an empty Tensor.

  Args:
    x: a `Tensor`.

  Returns:
    a `Scalar`.
  """
  epsilon = 1e-10
  float_size = tf.to_float(tf.size(x)) + epsilon
  mean = tf.reduce_sum(x) / float_size
  variance = tf.reduce_sum(tf.squared_difference(x, mean)) / float_size
  return variance / (tf.square(mean) + epsilon) 
Example #2
Source File: learner.py    From meta-dataset with Apache License 2.0 6 votes vote down vote up
def proto_maml_fc_bias(self, prototypes, zero_pad_to_max_way=False):
    """Computes the Prototypical MAML fc layer's bias.

    Args:
      prototypes: Tensor of shape [num_classes, embedding_size]
      zero_pad_to_max_way: Whether to zero padd to max num way.

    Returns:
      fc_bias: Tensor of shape [num_classes] or [self.logit_dim]
        when zero_pad_to_max_way is True.
    """
    fc_bias = -tf.square(tf.norm(prototypes, axis=1))
    if zero_pad_to_max_way:
      paddings = [[0, self.logit_dim - tf.shape(fc_bias)[0]]]
      fc_bias = tf.pad(fc_bias, paddings, 'CONSTANT', constant_values=0)
    return fc_bias 
Example #3
Source File: losses.py    From models with Apache License 2.0 6 votes vote down vote up
def _compute_loss(self, prediction_tensor, target_tensor, weights):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the (encoded) predicted locations of objects.
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the regression targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors] tensor
        representing the value of the loss function.
    """
    weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims(
        weights, 2)
    square_diff = 0.5 * tf.square(weighted_diff)
    return tf.reduce_sum(square_diff, 2) 
Example #4
Source File: cost.py    From super-resolution-videos with The Unlicense 6 votes vote down vote up
def normalized_mean_square_error(output, target):
    """Return the TensorFlow expression of normalized mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1,2,3]))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1,2,3]))
        nmse = tf.reduce_mean(nmse_a / nmse_b)
    return nmse 
Example #5
Source File: op_evaluator_test.py    From tensorboard with Apache License 2.0 6 votes vote down vote up
def setUp(self):
        super(PersistentOpEvaluatorTest, self).setUp()

        patch = tf.test.mock.patch(
            "tensorflow.compat.v1.Session", wraps=tf.Session
        )
        patch.start()
        self.addCleanup(patch.stop)

        class Squarer(op_evaluator.PersistentOpEvaluator):
            def __init__(self):
                super(Squarer, self).__init__()
                self._input = None
                self._squarer = None

            def initialize_graph(self):
                self._input = tf.placeholder(tf.int32)
                self._squarer = tf.square(self._input)

            def run(self, xs):  # pylint: disable=arguments-differ
                return self._squarer.eval(feed_dict={self._input: xs})

        self._square = Squarer() 
Example #6
Source File: tf_mittens.py    From mittens with Apache License 2.0 6 votes vote down vote up
def _get_cost_function(self):
        """Compute the cost of the Mittens objective function.

        If self.mittens = 0, this is the same as the cost of GloVe.
        """
        self.weights = tf.placeholder(
            tf.float32, shape=[self.n_words, self.n_words])
        self.log_coincidence = tf.placeholder(
            tf.float32, shape=[self.n_words, self.n_words])
        self.diffs = tf.subtract(self.model, self.log_coincidence)
        cost = tf.reduce_sum(
            0.5 * tf.multiply(self.weights, tf.square(self.diffs)))
        if self.mittens > 0:
            self.mittens = tf.constant(self.mittens, tf.float32)
            cost += self.mittens * tf.reduce_sum(
                tf.multiply(
                    self.has_embedding,
                    self._tf_squared_euclidean(
                        tf.add(self.W, self.C),
                        self.original_embedding)))
        tf.summary.scalar("cost", cost)
        return cost 
Example #7
Source File: image_vae.py    From magenta with Apache License 2.0 6 votes vote down vote up
def bottleneck(self, x):
    z_size = self.hparams.bottleneck_bits
    x_shape = common_layers.shape_list(x)
    with tf.variable_scope('bottleneck', reuse=tf.AUTO_REUSE):
      mu = x[..., :self.hparams.bottleneck_bits]
      if self.hparams.mode != tf.estimator.ModeKeys.TRAIN:
        return mu, 0.0  # No sampling or kl loss on eval.
      log_sigma = x[..., self.hparams.bottleneck_bits:]
      epsilon = tf.random_normal(x_shape[:-1] + [z_size])
      z = mu + tf.exp(log_sigma / 2) * epsilon
      kl = 0.5 * tf.reduce_mean(
          tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)
      # This is the 'free bits' trick mentioned in Kingma et al. (2016)
      free_bits = self.hparams.free_bits
      kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss * self.hparams.kl_beta 
Example #8
Source File: grouping_regularizers.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def lazy_square(tensor):
  """Computes the square of a tensor in a lazy way.

  This function is lazy in the following sense, for:
    tensor = tf.sqrt(input)
  will return input (and not tf.square(tensor)).

  Args:
    tensor: A `Tensor` of floats to compute the square of.

  Returns:
    The square of the input tensor.
  """
  if tensor.op.type == 'Sqrt':
    return tensor.op.inputs[0]
  else:
    return tf.square(tensor) 
Example #9
Source File: flop_regularizer_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def test_group_lasso_conv3d(self):
    shape = [3, 3, 3]
    video = tf.zeros([2, 3, 3, 3, 1])
    net = slim.conv3d(
        video,
        5,
        shape,
        padding='VALID',
        weights_initializer=tf.glorot_normal_initializer(),
        scope='vconv1')
    conv3d_op = tf.get_default_graph().get_operation_by_name('vconv1/Conv3D')
    conv3d_weights = conv3d_op.inputs[1]

    threshold = 0.09
    flop_reg = flop_regularizer.GroupLassoFlopsRegularizer([net.op],
                                                           threshold=threshold)
    norm = tf.sqrt(tf.reduce_mean(tf.square(conv3d_weights), [0, 1, 2, 3]))
    alive = tf.reduce_sum(tf.cast(norm > threshold, tf.float32))
    with self.session():
      flop_coeff = 2 * shape[0] * shape[1] * shape[2]
      tf.compat.v1.global_variables_initializer().run()
      self.assertAllClose(flop_reg.get_cost(), flop_coeff * alive)
      self.assertAllClose(flop_reg.get_regularization_term(),
                          flop_coeff * tf.reduce_sum(norm)) 
Example #10
Source File: discretization.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def vae(x, z_size, name=None):
  """Simple variational autoencoder without discretization.

  Args:
    x: Input to the discretization bottleneck.
    z_size: Number of bits, where discrete codes range from 1 to 2**z_size.
    name: Name for the bottleneck scope.

  Returns:
    Embedding function, latent, loss, mu and log_simga.
  """
  with tf.variable_scope(name, default_name="vae"):
    mu = tf.layers.dense(x, z_size, name="mu")
    log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
    shape = common_layers.shape_list(x)
    epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
    z = mu + tf.exp(log_sigma / 2) * epsilon
    kl = 0.5 * tf.reduce_mean(
        tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
    free_bits = z_size // 4
    kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss, mu, log_sigma 
Example #11
Source File: autoencoders.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def bottleneck(self, x):
    hparams = self.hparams
    z_size = hparams.bottleneck_bits
    x_shape = common_layers.shape_list(x)
    with tf.variable_scope("vae"):
      mu = tf.layers.dense(x, z_size, name="mu")
      if hparams.mode != tf.estimator.ModeKeys.TRAIN:
        return mu, 0.0  # No sampling or kl loss on eval.
      log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
      epsilon = tf.random_normal(x_shape[:-1] + [z_size])
      z = mu + tf.exp(log_sigma / 2) * epsilon
      kl = 0.5 * tf.reduce_mean(
          tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1)
      free_bits = z_size // 4
      kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss * hparams.kl_beta 
Example #12
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def _test_l2_normalize(ishape, eps, axis):
    """ testing l2 normalize (uses max, sum, square, sqrt frontend operators)"""

    inp_array = np.random.uniform(size=ishape).astype(np.float32)

    with tf.Graph().as_default():
        in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
        nn.l2_normalize(in1,
                        axis=axis,
                        epsilon=eps,
                        name=None,
                        dim=None)

        compare_tf_with_tvm(inp_array, 'Placeholder:0', 'l2_normalize:0') 
Example #13
Source File: model_utils.py    From language with Apache License 2.0 5 votes vote down vote up
def variable_summaries(var, scope=""):
  """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
  with tf.name_scope(scope):
    with tf.name_scope("summaries"):
      mean = tf.reduce_mean(var)
      tf.summary.scalar("mean", mean)
      with tf.name_scope("stddev"):
        stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
      tf.summary.scalar("stddev", stddev)
      tf.summary.scalar("max", tf.reduce_max(var))
      tf.summary.scalar("min", tf.reduce_min(var))
      tf.summary.histogram("histogram", var) 
Example #14
Source File: yellowfin.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def _grad_variance(self):
    """Estimate of gradient Variance.

    Returns:
      C_t ops.
    """
    grad_var_ops = []
    tensor_to_avg = []
    for t, g in zip(self._vars, self._grad):
      if isinstance(g, tf.IndexedSlices):
        tensor_to_avg.append(
            tf.reshape(tf.unsorted_segment_sum(g.values,
                                               g.indices,
                                               g.dense_shape[0]),
                       shape=t.get_shape()))
      else:
        tensor_to_avg.append(g)
    avg_op = self._moving_averager.apply(tensor_to_avg)
    grad_var_ops.append(avg_op)
    with tf.control_dependencies([avg_op]):
      self._grad_avg = [self._moving_averager.average(val)
                        for val in tensor_to_avg]
      self._grad_avg_squared = [tf.square(val) for val in self._grad_avg]

    # Compute Variance
    self._grad_var = tf.maximum(
        tf.constant(1e-6, dtype=self._grad_norm_squared_avg.dtype),
        self._grad_norm_squared_avg
        - tf.add_n([tf.reduce_sum(val) for val in self._grad_avg_squared]))
    if self._sparsity_debias:
      self._grad_var *= self._sparsity_avg
    return grad_var_ops  # C_t 
Example #15
Source File: value_ops.py    From trfl with Apache License 2.0 5 votes vote down vote up
def td_learning(v_tm1, r_t, pcont_t, v_t, name="TDLearning"):
  """Implements the TD(0)-learning loss as a TensorFlow op.

  The TD loss is `0.5` times the squared difference between `v_tm1` and
  the target `r_t + pcont_t * v_t`.

  See "Learning to Predict by the Methods of Temporal Differences" by Sutton.
  (https://link.springer.com/article/10.1023/A:1022633531479).

  Args:
    v_tm1: Tensor holding values at previous timestep, shape `[B]`.
    r_t: Tensor holding rewards, shape `[B]`.
    pcont_t: Tensor holding pcontinue values, shape `[B]`.
    v_t: Tensor holding values at current timestep, shape `[B]`.
    name: name to prefix ops created by this function.

  Returns:
    A namedtuple with fields:

    * `loss`: a tensor containing the batch of losses, shape `[B]`.
    * `extra`: a namedtuple with fields:
        * `target`: batch of target values for `v_tm1`, shape `[B]`.
        * `td_error`: batch of temporal difference errors, shape `[B]`.
  """
  # Rank and compatibility checks.
  base_ops.wrap_rank_shape_assert([[v_tm1, v_t, r_t, pcont_t]], [1], name)

  # TD(0)-learning op.
  with tf.name_scope(name, values=[v_tm1, r_t, pcont_t, v_t]):

    # Build target.
    target = tf.stop_gradient(r_t + pcont_t * v_t)

    # Temporal difference error and loss.
    # Loss is MSE scaled by 0.5, so the gradient is equal to the TD error.
    td_error = target - v_tm1
    loss = 0.5 * tf.square(td_error)
    return base_ops.LossOutput(loss, TDExtra(target, td_error)) 
Example #16
Source File: value_ops.py    From trfl with Apache License 2.0 5 votes vote down vote up
def qv_max(v_tm1, r_t, pcont_t, q_t, name="QVMAX"):
  """Implements the QVMAX learning loss as a TensorFlow op.

  The QVMAX loss is `0.5` times the squared difference between `v_tm1` and
  the target `r_t + pcont_t * max q_t`, where `q_t` is separately learned
  through QV learning (c.f. `action_value_ops.qv_learning`).

  See "The QV Family Compared to Other Reinforcement Learning Algorithms" by
  Wiering and van Hasselt (2009).
  (http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.713.1931)

  Args:
    v_tm1: Tensor holding values at previous timestep, shape `[B]`.
    r_t: Tensor holding rewards, shape `[B]`.
    pcont_t: Tensor holding pcontinue values, shape `[B]`.
    q_t: Tensor of action values at current timestep, shape `[B, num_actions]`.
    name: name to prefix ops created by this function.

  Returns:
    A namedtuple with fields:

    * `loss`: a tensor containing the batch of losses, shape `[B]`.
    * `extra`: a namedtuple with fields:
        * `target`: batch of target values for `v_tm1`, shape `[B]`.
        * `td_error`: batch of temporal difference errors, shape `[B]`.
  """
  # Rank and compatibility checks.
  base_ops.wrap_rank_shape_assert([[v_tm1, r_t, pcont_t], [q_t]], [1, 2], name)

  # The QVMAX op.
  with tf.name_scope(name, values=[v_tm1, r_t, pcont_t, q_t]):

    # Build target.
    target = tf.stop_gradient(r_t + pcont_t * tf.reduce_max(q_t, axis=1))

    # Temporal difference error and loss.
    # Loss is MSE scaled by 0.5, so the gradient is equal to the TD error.
    td_error = target - v_tm1
    loss = 0.5 * tf.square(td_error)
    return base_ops.LossOutput(loss, TDExtra(target, td_error)) 
Example #17
Source File: retrain.py    From AudioNet with MIT License 5 votes vote down vote up
def variable_summaries(var):
  """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
  with tf.name_scope('summaries'):
    mean = tf.reduce_mean(var)
    tf.summary.scalar('mean', mean)
    with tf.name_scope('stddev'):
      stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar('stddev', stddev)
    tf.summary.scalar('max', tf.reduce_max(var))
    tf.summary.scalar('min', tf.reduce_min(var))
    tf.summary.histogram('histogram', var) 
Example #18
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def _test_l2_pool2d(input_shape, ksize, strides, padding, data_format, fused_func_name=None):
    x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1

    with tf.Graph().as_default():
        in_data = tf.placeholder(
            dtype=tf.float32, name="input", shape=input_shape)
        out = tf.sqrt(tf.nn.avg_pool(
            tf.square(in_data), ksize=ksize, strides=strides,
            padding=padding, data_format=data_format))
        out = with_fused_activation_function(out, fused_func_name)

        compare_tflite_with_tvm(x, 'input', [in_data], [out]) 
Example #19
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def _test_square(data):
    """ One iteration of square """
    return _test_unary_elemwise(math_ops.square, data)

#######################################################################
# Elu
# --- 
Example #20
Source File: discretization.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def vq_nearest_neighbor(x, means,
                        soft_em=False, num_samples=10, temperature=None):
  """Find the nearest element in means to elements in x."""
  bottleneck_size = common_layers.shape_list(means)[0]
  x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
  means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
  scalar_prod = tf.matmul(x, means, transpose_b=True)
  dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
  if soft_em:
    x_means_idx = tf.multinomial(-dist, num_samples=num_samples)
    x_means_hot = tf.one_hot(
        x_means_idx, depth=common_layers.shape_list(means)[0])
    x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
  else:
    if temperature is None:
      x_means_idx = tf.argmax(-dist, axis=-1)
    else:
      x_means_idx = tf.multinomial(- dist / temperature, 1)
      x_means_idx = tf.squeeze(x_means_idx, axis=-1)
    if (common_layers.should_generate_summaries() and
        not common_layers.is_xla_compiled()):
      tf.summary.histogram("means_idx", tf.reshape(x_means_idx, [-1]))
    x_means_hot = tf.one_hot(x_means_idx, bottleneck_size)
  x_means_hot_flat = tf.reshape(x_means_hot, [-1, bottleneck_size])
  x_means = tf.matmul(x_means_hot_flat, means)
  e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
  return x_means_hot, e_loss, dist 
Example #21
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_forward_square():
    """test operator Square """
    np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
    tf.reset_default_graph()
    with tf.Graph().as_default():
        in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
        tf.square(in_data, name="square")
        compare_tf_with_tvm([np_data], ['in_data:0'], 'square:0') 
Example #22
Source File: test_control_flow.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_multiple_cond_vars():
    graph = tf.Graph()
    with graph.as_default():
        x1 = tf.constant(7)
        x2 = tf.constant(12)
        z = tf.constant(20)
        r = tf.cond(tf.less(tf.add(x1, x2), 10),
                    lambda: tf.add(10, 2), lambda: tf.square(5))

        with tf.Session() as sess:
            tf_out = sess.run(r)

    check_equal(graph, tf_out) 
Example #23
Source File: test_control_flow.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_vanilla_loop_bound():
    graph = tf.Graph()
    with graph.as_default():
        dshape = (2, 10)
        dtype = "float32"
        dname = "data"
        np_data = np.random.uniform(size=dshape).astype(dtype)
        data = tf.placeholder(shape=dshape, dtype=dtype, name=dname)
        x = tf.slice(data, [1, 4], [1, 4])
        outer = x + 5.0
        def body(x, y):
            res = tf.cond(tf.less(y, 10), lambda: tf.add(
                10.0, 20.0), lambda: tf.square(10.0))
            z = tf.constant(7)
            res = tf.cond(tf.less(z, 10), lambda: res * 5, lambda: res + 10)
            return tf.multiply(res, x * outer), y + 1

        y = tf.constant(0)
        def condition(x, y):
            return tf.less(y, 20)

        r = tf.while_loop(condition, body, loop_vars=[x, y])
        with tf.Session() as sess:
            tf_out = sess.run(r, feed_dict={"%s:0" % dname: np_data})

    check_equal(graph, tf_out, {dname: np_data}) 
Example #24
Source File: test_control_flow.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_nested_loop_bound():
    graph = tf.Graph()
    with graph.as_default():
        dshape = (2, 10)
        dtype = "float32"
        dname = "data"
        np_data = np.random.uniform(size=dshape).astype(dtype)
        data = tf.placeholder(shape=dshape, dtype=dtype, name=dname)
        x = tf.slice(data, [1, 4], [1, 4])
        outer = x + 5.0
        def body(x, y):
            res = tf.cond(tf.less(y, 10), lambda: tf.add(
                10.0, 20.0), lambda: tf.square(10.0))
            def nested_body(nx, ny):
                return nx + 1, res + 2.0
            def nested_cond(nx, ny):
                return tf.less(nx, 15)
            nx = tf.constant(0)
            ny = tf.constant(0.0)
            nested_res = tf.while_loop(nested_cond, nested_body, loop_vars=[nx, ny])
            res = res + nested_res[1]
            z = tf.constant(7)
            res = tf.cond(tf.less(z, 10), lambda: res * 5, lambda: res + 10)
            return tf.multiply(res, x * outer), y + 1

        y = tf.constant(0)
        def condition(x, y):
            return tf.less(y, 20)

        r = tf.while_loop(condition, body, loop_vars=[x, y])
        with tf.Session() as sess:
            tf_out = sess.run(r, feed_dict={"%s:0" % dname: np_data})

    check_equal(graph, tf_out, {dname: np_data}) 
Example #25
Source File: learner.py    From meta-dataset with Apache License 2.0 5 votes vote down vote up
def compute_logits(self, support_embeddings, query_embeddings,
                     onehot_support_labels):
    """Computes the negative distances of each query point to each prototype."""

    # [num test images, 1, embedding size].
    query_embeddings = tf.expand_dims(query_embeddings, 1)

    prototypes = compute_prototypes(support_embeddings, onehot_support_labels)

    # [1, num_clases, embedding_size].
    prototypes = tf.expand_dims(prototypes, 0)

    # Squared euclidean distances between each test embedding / prototype pair.
    distances = tf.reduce_sum(tf.square(query_embeddings - prototypes), 2)
    return -distances 
Example #26
Source File: dyneval.py    From lamb with Apache License 2.0 5 votes vote down vote up
def _make_add_squared_grads(self):
    assignments = []
    for sum_squared_grads, grads in zip(self._sum_squared_grads, self._grads):
      assignments.append(sum_squared_grads.assign_add(tf.square(grads)))
    return tf.group(assignments + [self._num_squared_grads.assign_add(1)]) 
Example #27
Source File: transformer_memory.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def _norm(self, x):
    """Compute the safe norm."""
    return tf.sqrt(tf.reduce_sum(tf.square(x), keepdims=True, axis=-1) + 1e-7) 
Example #28
Source File: models.py    From graphics with Apache License 2.0 5 votes vote down vote up
def _compute_sample_loss(self, gt, output):
    """Compute point sampling loss."""
    sample_loss = tf.square(gt - output)
    if self._sample_bbx > 0:
      loss_bbx = sample_loss[:, :self._sample_bbx]
      loss_bbx = tf.reduce_mean(loss_bbx)
    else:
      loss_bbx = 0.
    if self._sample_surf > 0:
      loss_surf = sample_loss[:, self._sample_bbx:]
      loss_surf = tf.reduce_mean(loss_surf)
    else:
      loss_surf = 0.
    sample_loss = loss_bbx + 0.1 * loss_surf
    return sample_loss 
Example #29
Source File: models.py    From graphics with Apache License 2.0 5 votes vote down vote up
def _compute_bbx_loss(self, trans, pts, gt):
    """Compute bounding box loss."""
    oo = 1e5
    inside = tf.expand_dims(tf.cast(gt > 0.5, tf.float32), axis=1)
    trans = tf.expand_dims(trans, axis=2)
    pts = tf.expand_dims(pts, axis=1)
    distances = tf.reduce_sum(tf.square(trans - pts), axis=-1, keepdims=True)
    distances = inside * distances + (1 - inside) * oo
    min_dis = tf.reduce_min(distances, axis=2)
    return tf.reduce_mean(min_dis) 
Example #30
Source File: models.py    From graphics with Apache License 2.0 5 votes vote down vote up
def _compute_overlap_loss(self, overlap):
    """Compute overlap loss."""
    return tf.reduce_mean(tf.square(tf.nn.relu(overlap - 2.)))