Python tensorflow.norm() Examples

The following are 30 code examples of tensorflow.norm(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: pgd_whitebox.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example #2
Source File: common_layers.py    From BERT with Apache License 2.0 6 votes vote down vote up
def group_norm(x, filters=None, num_groups=8, epsilon=1e-5):
  """Group normalization as in https://arxiv.org/abs/1803.08494."""
  x_shape = shape_list(x)
  if filters is None:
    filters = x_shape[-1]
  assert len(x_shape) == 4
  assert filters % num_groups == 0
  # Prepare variables.
  scale = tf.get_variable(
      "group_norm_scale", [filters], initializer=tf.ones_initializer())
  bias = tf.get_variable(
      "group_norm_bias", [filters], initializer=tf.zeros_initializer())
  epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
  # Reshape and compute group norm.
  x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups])
  # Calculate mean and variance on heights, width, channels (not groups).
  mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
  norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
  return tf.reshape(norm_x, x_shape) * scale + bias 
Example #3
Source File: common_layers.py    From BERT with Apache License 2.0 6 votes vote down vote up
def layer_norm_compute(x, epsilon, scale, bias, layer_collection=None):
  """Layer norm raw computation."""

  # Save these before they get converted to tensors by the casting below
  params = (scale, bias)

  epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
  mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
  variance = tf.reduce_mean(
      tf.squared_difference(x, mean), axis=[-1], keepdims=True)
  norm_x = (x - mean) * tf.rsqrt(variance + epsilon)

  output = norm_x * scale + bias


  return output 
Example #4
Source File: model_handler.py    From nucleus7 with Mozilla Public License 2.0 6 votes vote down vote up
def _add_grads_and_vars_to_summaries(model_results: ModelResults):
        if model_results.grads_and_vars is not None:
            for grad, var in model_results.grads_and_vars:
                grad_name = ('gradient/' + var.name).replace(':', '_')
                model_utils.add_histogram_summary(grad_name, grad)
                grad_norm = tf.norm(grad)
                grad_norm_name = "gradient_l2_norms/scalar_" + grad_name
                model_utils.add_summary_by_name(grad_norm_name, grad_norm)
            all_grads = list(zip(*model_results.grads_and_vars))[0]
            global_grad_norm = tf.global_norm(all_grads)
            global_norm_name = "_".join(["scalar", "global_gradient_l2_norm"])
            model_utils.add_summary_by_name(global_norm_name, global_grad_norm)

        if model_results.regularization_grads_and_vars is not None:
            for grad, var in model_results.regularization_grads_and_vars:
                grad_name = ('reg_gradient/' + var.name).replace(':', '_')
                model_utils.add_histogram_summary(grad_name, grad) 
Example #5
Source File: loss_ops.py    From TensorflowFramework with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def wgan_loss(x, gz, discriminator, beta=10.0):
  """Improved Wasserstein GAN loss.

  Args:
    x: Batch of real samples.
    gz: Batch of generated samples.
    discriminator: Discriminator function.
    beta: Regualarizer factor.
  Returns:
    d_loss: Discriminator loss.
    g_loss: Generator loss.
  """
  dx = discriminator(x)
  with tf.variable_scope(tf.get_variable_scope(), reuse=True):
    dgz = discriminator(gz)
  batch_size = tf.shape(x)[0]
  alpha = tf.random_uniform([batch_size])
  xhat = x * alpha + gz * (1 - alpha)
  with tf.variable_scope(tf.get_variable_scope(), reuse=True):
    dxhat = discriminator(xhat)
  gnorm = tf.norm(tf.gradients(dxhat, xhat)[0])
  d_loss = -tf.reduce_mean(dx - dgz - beta * tf.square(gnorm - 1))
  g_loss = -tf.reduce_mean(dgz)
  return d_loss, g_loss 
Example #6
Source File: utils_test.py    From graphics with Apache License 2.0 6 votes vote down vote up
def test_unflatten_batch_to_2d_random(self, sizes, max_rows, num_features):
    """Test unflattening with random inputs."""
    max_rows = np.max(sizes) if max_rows is None else max_rows
    output_shape = np.concatenate(
        (np.shape(sizes), (max_rows,), (num_features,)))
    total_rows = np.sum(sizes)
    data = 0.1 + np.random.uniform(size=(total_rows, num_features))

    unflattened = utils.unflatten_2d_to_batch(data, sizes, max_rows)
    flattened = tf.reshape(unflattened, (-1, num_features))
    nonzero_rows = tf.compat.v1.where(tf.norm(tensor=flattened, axis=-1))
    flattened_unpadded = tf.gather(
        params=flattened, indices=tf.squeeze(input=nonzero_rows, axis=-1))

    self.assertAllEqual(tf.shape(input=unflattened), output_shape)
    self.assertAllEqual(flattened_unpadded, data) 
Example #7
Source File: agent.py    From ppo-lstm-parallel with MIT License 6 votes vote down vote up
def get_train_op(self, loss, clip_factor, clip, step):
        import tensorflow as tf
        optimizer = tf.train.AdamOptimizer(learning_rate=step)
        gradients, variables = zip(*optimizer.compute_gradients(loss))
        filtered_grads = []
        filtered_vars = []
        for i in range(len(gradients)):
            if gradients[i] is not None:
                filtered_grads.append(gradients[i])
                filtered_vars.append(variables[i])
        gradients = filtered_grads
        variables = filtered_vars
        if clip:
            gradients, _ = tf.clip_by_global_norm(gradients, clip_factor)
        grad_norm = tf.reduce_sum([tf.norm(grad) for grad in gradients])
        train_op = optimizer.apply_gradients(zip(gradients, variables))
        return optimizer, train_op, grad_norm 
Example #8
Source File: quaternion_test.py    From graphics with Apache License 2.0 6 votes vote down vote up
def test_normalized_random_uniform_initializer_is_normalized(self):
    """Tests normalized_random_uniform_initializer outputs are normalized."""
    tensor_size = np.random.randint(3)
    tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()

    variable = tf.compat.v1.get_variable(
        "test_variable",
        shape=tensor_shape + [4],
        dtype=tf.float32,
        initializer=quaternion.normalized_random_uniform_initializer(),
        use_resource=False)
    self.evaluate(tf.compat.v1.global_variables_initializer())
    value = self.evaluate(variable)
    norms = np.linalg.norm(value, axis=-1)
    ones = np.ones(tensor_shape)

    self.assertAllClose(norms, ones, rtol=1e-3) 
Example #9
Source File: layers.py    From face_landmark_dnn with MIT License 6 votes vote down vote up
def LandmarkImageLayer(Landmarks):
    
    def draw_landmarks(L):
        def draw_landmarks_helper(Point):
            intLandmark = tf.to_int32(Point)
            locations = Offsets + intLandmark
            dxdy = Point - tf.to_float(intLandmark)
            offsetsSubPix = tf.to_float(Offsets) - dxdy
            vals = 1 / (1 + tf.norm(offsetsSubPix, axis=2))
            img = tf.scatter_nd(locations, vals, shape=(IMGSIZE, IMGSIZE))
            return img
        Landmark = tf.reverse(tf.reshape(L, [-1,2]), [-1])
        # Landmark = tf.reshape(L, (-1, 2))
        Landmark = tf.clip_by_value(Landmark, HalfSize, IMGSIZE - 1 - HalfSize)
        # Ret = 1 / (tf.norm(tf.map_fn(DoIn,Landmarks),axis = 3) + 1)
        Ret = tf.map_fn(draw_landmarks_helper, Landmark)
        Ret = tf.reshape(tf.reduce_max(Ret, axis=0), [IMGSIZE, IMGSIZE, 1])
        return Ret
    return tf.map_fn(draw_landmarks, Landmarks) 
Example #10
Source File: pgd_cw_whitebox.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example #11
Source File: pgd_cw_whitebox.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example #12
Source File: pgd_cw_whitebox.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example #13
Source File: pgd_whitebox.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def perturb(self, x_nat, y, sess):
    """Given a set of examples (x_nat, y), returns a set of adversarial
       examples within epsilon of x_nat in l_infinity norm."""
    if self.rand:
      x = x_nat + np.random.uniform(-self.epsilon, self.epsilon, x_nat.shape)
    else:
      x = np.copy(x_nat)

    for i in range(self.k):
      grad = sess.run(self.grad, feed_dict={self.model.x_input: x,
                                            self.model.y_input: y})

      x += self.a * np.sign(grad)

      x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
      x = np.clip(x, 0, 1) # ensure valid pixel range

    return x 
Example #14
Source File: reformer_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def __call__(self, inputs):
        inputs = self.norm(inputs)
        return self.fn(inputs) 
Example #15
Source File: quaternion.py    From graphics with Apache License 2.0 5 votes vote down vote up
def is_normalized(quaternion, atol=1e-3, name=None):
  """Determines if quaternion is normalized quaternion or not.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    quaternion:  A tensor of shape `[A1, ..., An, 4]`, where the last dimension
      represents a quaternion.
    atol: The absolute tolerance parameter.
    name: A name for this op that defaults to "quaternion_is_normalized".

  Returns:
    A tensor of type `bool` and shape `[A1, ..., An, 1]`, where False indicates
    that the quaternion is not normalized.

  Raises:
    ValueError: If the shape of `quaternion` is not supported.
  """
  with tf.compat.v1.name_scope(name, "quaternion_is_normalized", [quaternion]):
    quaternion = tf.convert_to_tensor(value=quaternion)

    shape.check_static(
        tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))

    norms = tf.norm(tensor=quaternion, axis=-1, keepdims=True)
    return tf.compat.v1.where(
        tf.abs(norms - 1.) < atol, tf.ones_like(norms, dtype=bool),
        tf.zeros_like(norms, dtype=bool)) 
Example #16
Source File: quaternion_test.py    From graphics with Apache License 2.0 5 votes vote down vote up
def test_normalized_random_uniform_is_normalized(self):
    """Tests that the normalized_random_uniform gives normalized quaternions."""
    tensor_size = np.random.randint(3)
    tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()

    tensor = quaternion.normalized_random_uniform(tensor_shape)
    norms = tf.norm(tensor=tensor, axis=-1)
    ones = np.ones(tensor_shape)

    self.assertAllClose(norms, ones, rtol=1e-3) 
Example #17
Source File: axis_angle.py    From graphics with Apache License 2.0 5 votes vote down vote up
def is_normalized(axis, angle, atol=1e-3, name=None):
  """Determines if the axis-angle is normalized or not.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    axis: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
      represents a normalized axis.
    angle: A tensor of shape `[A1, ..., An, 1]` where the last dimension
      represents an angle.
    atol: The absolute tolerance parameter.
    name: A name for this op that defaults to "axis_angle_is_normalized".

  Returns:
    A tensor of shape `[A1, ..., An, 1]`, where False indicates that the axis is
    not normalized.
  """
  with tf.compat.v1.name_scope(name, "axis_angle_is_normalized", [axis, angle]):
    axis = tf.convert_to_tensor(value=axis)
    angle = tf.convert_to_tensor(value=angle)

    shape.check_static(tensor=axis, tensor_name="axis", has_dim_equals=(-1, 3))
    shape.check_static(
        tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1))
    shape.compare_batch_dimensions(
        tensors=(axis, angle),
        tensor_names=("axis", "angle"),
        last_axes=-2,
        broadcast_compatible=True)

    norms = tf.norm(tensor=axis, axis=-1, keepdims=True)
    return tf.abs(norms - 1.) < atol 
Example #18
Source File: quaternion.py    From graphics with Apache License 2.0 5 votes vote down vote up
def normalize(quaternion, eps=1e-12, name=None):
  """Normalizes a quaternion.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    quaternion:  A tensor of shape `[A1, ..., An, 4]`, where the last dimension
      represents a quaternion.
    eps: A lower bound value for the norm that defaults to 1e-12.
    name: A name for this op that defaults to "quaternion_normalize".

  Returns:
    A N-D tensor of shape `[?, ..., ?, 1]` where the quaternion elements have
    been normalized.

  Raises:
    ValueError: If the shape of `quaternion` is not supported.
  """
  with tf.compat.v1.name_scope(name, "quaternion_normalize", [quaternion]):
    quaternion = tf.convert_to_tensor(value=quaternion)

    shape.check_static(
        tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))

    return tf.math.l2_normalize(quaternion, axis=-1, epsilon=eps) 
Example #19
Source File: axis_angle_test.py    From graphics with Apache License 2.0 5 votes vote down vote up
def test_from_euler_jacobian_random(self):
    """Test the Jacobian of the from_euler function.

    Note:
      Preset angles are not tested as the gradient of tf.norm is NaN at 0.
    """
    x_init = test_helpers.generate_random_test_euler_angles()

    self.assert_jacobian_is_finite_fn(lambda x: axis_angle.from_euler(x)[0],
                                      [x_init])
    self.assert_jacobian_is_finite_fn(lambda x: axis_angle.from_euler(x)[1],
                                      [x_init]) 
Example #20
Source File: utils.py    From CVPR2019-DeepTreeLearningForZeroShotFaceAntispoofing with MIT License 5 votes vote down vote up
def call(self, x, mask, training):
        norm_v = self.v / (tf.norm(self.v) + 1e-8)
        norm_v_t = tf.transpose(norm_v, [1, 0])
        num_of_visit = tf.reduce_sum(mask)

        if training and num_of_visit > 1:
            # use only the visiting samples
            index = tf.where(tf.greater(mask[:, 0], tf.constant(0.)))
            index_not = tf.where(tf.equal(mask[:, 0], tf.constant(0.)))
            x_sub = tf.gather_nd(x, index) - tf.stop_gradient(self.mu)
            x_not = tf.gather_nd(x, index_not)
            x_sub_t = tf.transpose(x_sub, [1, 0])

            # compute the covariance matrix, eigenvalue, and the trace
            covar = tf.matmul(x_sub_t, x_sub) / num_of_visit
            eigenvalue = tf.reshape(tf.matmul(tf.matmul(norm_v, covar), norm_v_t), [])
            trace = tf.linalg.trace(covar)
            # compute the route loss
            # print(tf.exp(-self.alpha * eigenvalue), self.beta * trace)
            route_loss = tf.exp(-self.alpha * eigenvalue) + self.beta * trace
            uniq_loss = -tf.reduce_mean(tf.square(tf.matmul(x_sub, norm_v_t))) + \
                         tf.reduce_mean(tf.square(tf.matmul(x_not, norm_v_t)))
            # compute mean and response for this batch
            self.mu_of_visit = tf.reduce_mean(x_sub, axis=0, keepdims=True)
            self.eigenvalue = eigenvalue
            self.trace = trace
            x -= tf.stop_gradient(self.mu_of_visit)
            route_value = tf.matmul(x, norm_v_t)
        else:
            self.mu_of_visit = self.mu
            self.eigenvalue = 0.
            self.trace = 0.
            x -= self.mu
            route_value = tf.matmul(x, norm_v_t)
            route_loss = 0.
            uniq_loss = 0.

        return route_value, route_loss, uniq_loss 
Example #21
Source File: common_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def layer_norm_vars(filters):
  """Create Variables for layer norm."""
  scale = tf.get_variable(
      "layer_norm_scale", [filters], initializer=tf.ones_initializer())
  bias = tf.get_variable(
      "layer_norm_bias", [filters], initializer=tf.zeros_initializer())
  return scale, bias 
Example #22
Source File: vqa_self_attention.py    From BERT with Apache License 2.0 5 votes vote down vote up
def mlp(feature, hparams, name="mlp"):
  """Multi layer perceptron with dropout and relu activation."""
  with tf.variable_scope(name, "mlp", values=[feature]):
    num_mlp_layers = hparams.num_mlp_layers
    mlp_size = hparams.mlp_size
    for _ in range(num_mlp_layers):
      feature = common_layers.dense(feature, mlp_size, activation=None)
      utils.collect_named_outputs("norms", "mlp_feature",
                                  tf.norm(feature, axis=-1))
      feature = common_layers.layer_norm(feature)
      feature = tf.nn.relu(feature)
      feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout)
    return feature 
Example #23
Source File: layers.py    From face_landmark_dnn with MIT License 5 votes vote down vote up
def TransformParamsLayer(SrcShapes, DstShape):
    '''
    SrcShapes: [N, (N_LANDMARK x 2)]
    DstShape: [N_LANDMARK x 2,]
    return: [N, 6]
    '''
    # import pdb; pdb.set_trace()
    def bestFit(src, dst):
        # import pdb; pdb.set_trace()
        source = tf.reshape(src, (-1, 2))
        destination = tf.reshape(dst, (-1, 2))

        destMean = tf.reduce_mean(destination, axis=0)
        srcMean = tf.reduce_mean(source, axis=0)

        srcCenter = source - srcMean
        dstCenter = destination - destMean

        srcVec = tf.reshape(srcCenter, (-1,))
        destVec = tf.reshape(dstCenter, (-1,))
        norm = (tf.norm(srcVec)**2)

        a = tf.tensordot(srcVec, destVec, 1) / norm
        b = 0

        srcX = tf.reshape(srcVec, (-1,2))[:,0]
        srcY = tf.reshape(srcVec, (-1,2))[:,1]
        destX = tf.reshape(destVec, (-1,2))[:,0]
        destY = tf.reshape(destVec, (-1,2))[:,1]

        b = tf.reduce_sum(tf.multiply(srcX, destY) - tf.multiply(srcY, destX))
        b = b / norm

    
        A = tf.reshape(tf.stack([a, b, -b, a]), (2,2))
        srcMean = tf.tensordot(srcMean, A, 1)

        return tf.concat((tf.reshape(A, (-1,)), destMean - srcMean), 0)

    return tf.map_fn(lambda s: bestFit(s, DstShape), SrcShapes) 
Example #24
Source File: reformer_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def __init__(self, norm_class, emb, fn):
        super(WithNorm, self).__init__()
        self.emb = emb
        if isinstance(norm_class, ScaleNorm):
            self.norm = norm_class(emb)
        else:
            self.norm = norm_class()

        self.fn = fn 
Example #25
Source File: reformer_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def __call__(self, inputs):
        n = tf.norm(inputs, axis=-1, keepdims=True).clip_by_value(min=self.eps)
        return inputs / n * self.g 
Example #26
Source File: reformer_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def make_unit_length(x, epsilon=1e-6):
    norm = tf.norm(x,  ord=2, axis=-1, keepdims=True)
    return tf.truediv(x, norm + epsilon) 
Example #27
Source File: asserts.py    From graphics with Apache License 2.0 5 votes vote down vote up
def assert_normalized(vector, order='euclidean', axis=-1, eps=None, name=None):
  """Checks whether vector/quaternion is normalized in its last dimension.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    vector: A tensor of shape `[A1, ..., M, ..., An]`, where the axis of M
      contains the vectors.
    order: Order of the norm passed to tf.norm.
    axis: The axis containing the vectors.
    eps: A `float` describing the tolerance used to determine if the norm is
      equal to `1.0`.
    name: A name for this op. Defaults to 'assert_normalized'.

  Raises:
    InvalidArgumentError: If the norm of `vector` is not `1.0`.

  Returns:
    The input vector, with dependence on the assertion operator in the graph.
  """
  if not FLAGS[tfg_flags.TFG_ADD_ASSERTS_TO_GRAPH].value:
    return vector

  with tf.compat.v1.name_scope(name, 'assert_normalized', [vector]):
    vector = tf.convert_to_tensor(value=vector)
    if eps is None:
      eps = select_eps_for_division(vector.dtype)
    eps = tf.convert_to_tensor(value=eps, dtype=vector.dtype)

    norm = tf.norm(tensor=vector, ord=order, axis=axis)
    one = tf.constant(1.0, dtype=norm.dtype)
    with tf.control_dependencies(
        [tf.compat.v1.assert_near(norm, one, atol=eps)]):
      return tf.identity(vector) 
Example #28
Source File: token_discriminator_relgan.py    From BERT with Apache License 2.0 5 votes vote down vote up
def gradient_penalty(x_real_onehot, x_fake_onehot_appr, config):
	"""compute the gradiet penalty for the WGAN-GP loss"""
	alpha = tf.random_uniform(shape=[config['batch_size'], 1, 1], minval=0., maxval=1.)
	interpolated = alpha * x_real_onehot + (1. - alpha) * x_fake_onehot_appr

	logit = discriminator(x_onehot=interpolated)

	grad = tf.gradients(logit, interpolated)[0]  # gradient of D(interpolated)
	grad_norm = tf.norm(tf.layers.flatten(grad), axis=1)  # l2 norm

	GP = config['reg_param'] * tf.reduce_mean(tf.square(grad_norm - 1.))

	return GP 
Example #29
Source File: global_lp_pool.py    From onnx-tensorflow with Apache License 2.0 5 votes vote down vote up
def _common(cls, node, **kwargs):
    x = kwargs["tensor_dict"][node.inputs[0]]
    p = node.attrs.get("p", 2.)
    dims = list(range(len(x.shape)))
    dim_window = dims[2:]
    if len(dim_window) > 1 and p == 2:
      p = "euclidean"
    return [tf.norm(x, ord=p, axis=dim_window, keepdims=True)] 
Example #30
Source File: smpl_tf.py    From SMPL with MIT License 5 votes vote down vote up
def rodrigues(r):
  """
  Rodrigues' rotation formula that turns axis-angle tensor into rotation
  matrix in a batch-ed manner.

  Parameter:
  ----------
  r: Axis-angle rotation tensor of shape [batch_size, 1, 3].

  Return:
  -------
  Rotation matrix of shape [batch_size, 3, 3].

  """
  theta = tf.norm(r + tf.random_normal(r.shape, 0, 1e-8, dtype=tf.float64), axis=(1, 2), keepdims=True)
  # avoid divide by zero
  r_hat = r / theta
  cos = tf.cos(theta)
  z_stick = tf.zeros(theta.get_shape().as_list()[0], dtype=tf.float64)
  m = tf.stack(
    (z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,
     -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick), axis=1)
  m = tf.reshape(m, (-1, 3, 3))
  i_cube = tf.expand_dims(tf.eye(3, dtype=tf.float64), axis=0) + tf.zeros(
    (theta.get_shape().as_list()[0], 3, 3), dtype=tf.float64)
  A = tf.transpose(r_hat, (0, 2, 1))
  B = r_hat
  dot = tf.matmul(A, B)
  R = cos * i_cube + (1 - cos) * dot + tf.sin(theta) * m
  return R