Python tensorflow.sqrt() Examples

The following are 30 code examples of tensorflow.sqrt(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: normalize.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _std(self):
    """Computes the current estimate of the standard deviation.

    Note that the standard deviation is not defined until at least two samples
    were seen.

    Returns:
      Tensor of current variance.
    """
    variance = tf.cond(
        self._count > 1,
        lambda: self._var_sum / tf.cast(self._count - 1, tf.float32),
        lambda: tf.ones_like(self._var_sum) * float('nan'))
    # The epsilon corrects for small negative variance values caused by
    # the algorithm. It was empirically chosen to work with all environments
    # tested.
    return tf.sqrt(variance + 1e-4) 
Example #2
Source File: normalize.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _std(self):
    """Computes the current estimate of the standard deviation.

    Note that the standard deviation is not defined until at least two samples
    were seen.

    Returns:
      Tensor of current variance.
    """
    variance = tf.cond(
        self._count > 1,
        lambda: self._var_sum / tf.cast(self._count - 1, tf.float32),
        lambda: tf.ones_like(self._var_sum) * float('nan'))
    # The epsilon corrects for small negative variance values caused by
    # the algorithm. It was empirically chosen to work with all environments
    # tested.
    return tf.sqrt(variance + 1e-4) 
Example #3
Source File: yellowfin.py    From fine-lm with MIT License 6 votes vote down vote up
def _dist_to_opt(self):
    """Distance to optimum.

    Returns:
      D_t ops
    """
    dist_to_opt_ops = []
    # Running average of the norm of gradient
    self._grad_norm = tf.sqrt(self._grad_norm_squared)
    avg_op = self._moving_averager.apply([self._grad_norm,])
    dist_to_opt_ops.append(avg_op)
    with tf.control_dependencies([avg_op]):
      self._grad_norm_avg = self._moving_averager.average(self._grad_norm)
      # Single iteration distance estimation, note here
      # self._grad_norm_avg is per variable
      self._d_t = self._grad_norm_avg / self._grad_norm_squared_avg
    # Running average of distance
    avg_op = self._moving_averager.apply([self._d_t])
    dist_to_opt_ops.append(avg_op)
    with tf.control_dependencies([avg_op]):
      self._dist_to_opt_avg = tf.identity(
          self._moving_averager.average(self._d_t))
      if self._sparsity_debias:
        self._dist_to_opt_avg /= tf.sqrt(self._sparsity_avg)
    return dist_to_opt_ops  # D_t 
Example #4
Source File: xception.py    From fine-lm with MIT License 6 votes vote down vote up
def xception_exit(inputs):
  """Xception exit flow."""
  with tf.variable_scope("xception_exit"):
    x = inputs
    x_shape = x.get_shape().as_list()
    if x_shape[1] is None or x_shape[2] is None:
      length_float = tf.to_float(tf.shape(x)[1])
      length_float *= tf.to_float(tf.shape(x)[2])
      spatial_dim_float = tf.sqrt(length_float)
      spatial_dim = tf.to_int32(spatial_dim_float)
      x_depth = x_shape[3]
      x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
    elif x_shape[1] != x_shape[2]:
      spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2])))
      if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]:
        raise ValueError("Assumed inputs were square-able but they were "
                         "not. Shape: %s" % x_shape)
      x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])

    x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME")
    return tf.nn.relu(x) 
Example #5
Source File: picklable_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def set_input_shape(self, input_shape):
        batch_size, dim = input_shape
        self.input_shape = [batch_size, dim]
        self.output_shape = [batch_size, self.num_hid]
        if self.init_mode == "norm":
            init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
            init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
                                                       keep_dims=True))
            init = init * self.init_scale
        elif self.init_mode == "uniform_unit_scaling":
            scale = np.sqrt(3. / dim)
            init = tf.random_uniform([dim, self.num_hid], dtype=tf.float32,
                                     minval=-scale, maxval=scale)
        else:
            raise ValueError(self.init_mode)
        self.W = PV(init)
        if self.use_bias:
            self.b = PV((np.zeros((self.num_hid,))
                         + self.init_b).astype('float32')) 
Example #6
Source File: embedding_matrix.py    From post--memorization-in-rnns with MIT License 6 votes vote down vote up
def embedding_matrix(vocab_size: int, dim: int,
                     name: str=None):
    with tf.name_scope(None, 'embedding-matrix'):
        # compute initialization paramters
        shape = (vocab_size - 1, dim)
        scale = tf.sqrt(1 / shape[0])

        # get or initialize embedding matrix
        w = tf.get_variable(
            name, shape,
            dtype=tf.float32,
            initializer=tf.random_uniform_initializer(
                minval=-scale, maxval=scale
            ),
            trainable=True
        )

        # 1st row should be zero and not be updated by backprop because of
        # zero padding.
        emb = tf.concat([
            tf.zeros((1, dim), dtype=tf.float32),
            w
        ], 0)

        return emb 
Example #7
Source File: model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def set_input_shape(self, input_shape):
        batch_size, rows, cols, input_channels = input_shape
        kernel_shape = tuple(self.kernel_shape) + (input_channels,
                                                   self.output_channels)
        assert len(kernel_shape) == 4
        assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
        init = tf.random_normal(kernel_shape, dtype=tf.float32)
        init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
                                                   axis=(0, 1, 2)))
        self.kernels = tf.Variable(init)
        self.b = tf.Variable(
            np.zeros((self.output_channels,)).astype('float32'))
        input_shape = list(input_shape)
        input_shape[0] = 1
        dummy_batch = tf.zeros(input_shape)
        dummy_output = self.fprop(dummy_batch)
        output_shape = [int(e) for e in dummy_output.get_shape()]
        output_shape[0] = batch_size
        self.output_shape = tuple(output_shape) 
Example #8
Source File: net.py    From progressive_growing_of_GANs with MIT License 6 votes vote down vote up
def conv2d(self, input_, n_filters, k_size, padding='same'):
        if not self.cfg.weight_scale:
            return tf.layers.conv2d(input_, n_filters, k_size, padding=padding)

        n_feats_in = input_.get_shape().as_list()[-1]
        fan_in = k_size * k_size * n_feats_in
        c = tf.constant(np.sqrt(2. / fan_in), dtype=tf.float32)
        kernel_init = tf.random_normal_initializer(stddev=1.)
        w_shape = [k_size, k_size, n_feats_in, n_filters]
        w = tf.get_variable('kernel', shape=w_shape, initializer=kernel_init)
        w = c * w
        strides = [1, 1, 1, 1]
        net = tf.nn.conv2d(input_, w, strides, padding=padding.upper())
        b = tf.get_variable('bias', [n_filters],
                            initializer=tf.constant_initializer(0.))
        net = tf.nn.bias_add(net, b)
        return net 
Example #9
Source File: pcr_model.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def get_loss_b(self,predicted_transformation,batch_size,template_pointclouds_pl,source_pointclouds_pl):	
		with tf.variable_scope('loss') as LossEvaluation:
			predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
			predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])

			# with tf.variable_scope('quat_normalization') as norm:
			norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
			norm_predicted_quat = tf.sqrt(norm_predicted_quat)
			norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
			const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
			norm_predicted_quat = tf.add(norm_predicted_quat,const)
			predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)
	
			transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat, predicted_position)

			# Use 1024 Points to find loss.
			#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
			loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
			# loss = 0
		return loss 
Example #10
Source File: ipcr_model.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def get_loss(predicted_transformation, batch_size, template_pointclouds_pl, source_pointclouds_pl):
	with tf.variable_scope('loss') as LossEvaluation:
		predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
		predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])

		# with tf.variable_scope('quat_normalization') as norm:
		norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
		norm_predicted_quat = tf.sqrt(norm_predicted_quat)
		norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
		const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
		norm_predicted_quat = tf.add(norm_predicted_quat,const)
		predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)

		transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat,predicted_position)

		#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
		loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
	return loss 
Example #11
Source File: networks.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def minibatch_stddev_layer(x, group_size=4):
    with tf.variable_scope('MinibatchStddev'):
        group_size = tf.minimum(group_size, tf.shape(x)[0])     # Minibatch must be divisible by (or smaller than) group_size.
        s = x.shape                                             # [NCHW]  Input shape.
        y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]])   # [GMCHW] Split minibatch into M groups of size G.
        y = tf.cast(y, tf.float32)                              # [GMCHW] Cast to FP32.
        y -= tf.reduce_mean(y, axis=0, keep_dims=True)           # [GMCHW] Subtract mean over group.
        y = tf.reduce_mean(tf.square(y), axis=0)                # [MCHW]  Calc variance over group.
        y = tf.sqrt(y + 1e-8)                                   # [MCHW]  Calc stddev over group.
        y = tf.reduce_mean(y, axis=[1,2,3], keep_dims=True)      # [M111]  Take average over fmaps and pixels.
        y = tf.cast(y, x.dtype)                                 # [M111]  Cast back to original data type.
        y = tf.tile(y, [group_size, 1, s[2], s[3]])             # [N1HW]  Replicate over group and pixels.
        return tf.concat([x, y], axis=1)                        # [NCHW]  Append as new fmap.

#----------------------------------------------------------------------------
# Generator network used in the paper. 
Example #12
Source File: yellowfin.py    From fine-lm with MIT License 5 votes vote down vote up
def _get_cubic_root(self):
    """Get the cubic root."""
    # We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
    # where x = sqrt(mu).
    # We substitute x, which is sqrt(mu), with x = y + 1.
    # It gives y^3 + py = q
    # where p = (D^2 h_min^2)/(2*C) and q = -p.
    # We use the Vieta's substitution to compute the root.
    # There is only one real solution y (which is in [0, 1] ).
    # http://mathworld.wolfram.com/VietasSubstitution.html
    assert_array = [
        tf.Assert(
            tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
            [self._dist_to_opt_avg,]),
        tf.Assert(
            tf.logical_not(tf.is_nan(self._h_min)),
            [self._h_min,]),
        tf.Assert(
            tf.logical_not(tf.is_nan(self._grad_var)),
            [self._grad_var,]),
        tf.Assert(
            tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
            [self._dist_to_opt_avg,]),
        tf.Assert(
            tf.logical_not(tf.is_inf(self._h_min)),
            [self._h_min,]),
        tf.Assert(
            tf.logical_not(tf.is_inf(self._grad_var)),
            [self._grad_var,])
    ]
    with tf.control_dependencies(assert_array):
      p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
      w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
      w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
      y = w - p / 3.0 / w
      x = y + 1
    return x 
Example #13
Source File: ddpg.py    From HardRLWithYoutube with MIT License 5 votes vote down vote up
def reduce_std(x, axis=None, keepdims=False):
    return tf.sqrt(reduce_var(x, axis=axis, keepdims=keepdims)) 
Example #14
Source File: yellowfin.py    From fine-lm with MIT License 5 votes vote down vote up
def _get_lr_tensor(self):
    """Get lr minimizing the surrogate.

    Returns:
      The lr_t.
    """
    lr = (1.0 - tf.sqrt(self._mu))**2 / self._h_min
    return lr 
Example #15
Source File: yellowfin.py    From fine-lm with MIT License 5 votes vote down vote up
def _get_mu_tensor(self):
    """Get the min mu which minimize the surrogate.

    Returns:
      The mu_t.
    """
    root = self._get_cubic_root()
    dr = self._h_max / self._h_min
    mu = tf.maximum(
        root**2, ((tf.sqrt(dr) - 1) / (tf.sqrt(dr) + 1))**2)
    return mu 
Example #16
Source File: learning_rate.py    From fine-lm with MIT License 5 votes vote down vote up
def _legacy_sqrt_decay(step):
  """Decay like 1 / sqrt(step), multiplied by 500 to normalize."""
  return 500.0 / tf.sqrt(tf.maximum(step, 1.0)) 
Example #17
Source File: net.py    From progressive_growing_of_GANs with MIT License 5 votes vote down vote up
def pixelwise_norm(self, a):
        return a / tf.sqrt(tf.reduce_mean(a * a, axis=3, keep_dims=True) + 1e-8) 
Example #18
Source File: mpi_running_mean_std.py    From HardRLWithYoutube with MIT License 5 votes vote down vote up
def __init__(self, epsilon=1e-2, shape=()):

        self._sum = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(0.0),
            name="runningsum", trainable=False)
        self._sumsq = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(epsilon),
            name="runningsumsq", trainable=False)
        self._count = tf.get_variable(
            dtype=tf.float64,
            shape=(),
            initializer=tf.constant_initializer(epsilon),
            name="count", trainable=False)
        self.shape = shape

        self.mean = tf.to_float(self._sum / self._count)
        self.std = tf.sqrt( tf.maximum( tf.to_float(self._sumsq / self._count) - tf.square(self.mean) , 1e-2 ))

        newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum')
        newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var')
        newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
        self.incfiltparams = U.function([newsum, newsumsq, newcount], [],
            updates=[tf.assign_add(self._sum, newsum),
                     tf.assign_add(self._sumsq, newsumsq),
                     tf.assign_add(self._count, newcount)]) 
Example #19
Source File: utils.py    From HardRLWithYoutube with MIT License 5 votes vote down vote up
def avg_norm(t):
    return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(t), axis=-1))) 
Example #20
Source File: net.py    From progressive_growing_of_GANs with MIT License 5 votes vote down vote up
def add_minibatch_stddev_feat(self, input_):
        _, h, w, _ = input_.get_shape().as_list()
        new_feat_shape = [self.cfg.batch_size, h, w, 1]

        mean, var = tf.nn.moments(input_, axes=[0], keep_dims=True)
        stddev = tf.sqrt(tf.reduce_mean(var, keep_dims=True))
        new_feat = tf.tile(stddev, multiples=new_feat_shape)
        return tf.concat([input_, new_feat], axis=3) 
Example #21
Source File: square_box_coder.py    From object_detector_app with MIT License 5 votes vote down vote up
def _decode(self, rel_codes, anchors):
    """Decodes relative codes to boxes.

    Args:
      rel_codes: a tensor representing N anchor-encoded boxes.
      anchors: BoxList of anchors.

    Returns:
      boxes: BoxList holding N bounding boxes.
    """
    ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
    la = tf.sqrt(ha * wa)

    ty, tx, tl = tf.unstack(tf.transpose(rel_codes))
    if self._scale_factors:
      ty /= self._scale_factors[0]
      tx /= self._scale_factors[1]
      tl /= self._scale_factors[2]
    l = tf.exp(tl) * la
    ycenter = ty * la + ycenter_a
    xcenter = tx * la + xcenter_a
    ymin = ycenter - l / 2.
    xmin = xcenter - l / 2.
    ymax = ycenter + l / 2.
    xmax = xcenter + l / 2.
    return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) 
Example #22
Source File: diet.py    From fine-lm with MIT License 5 votes vote down vote up
def make_diet_var_getter(params):
  """Create a custom variable getter for diet variables according to params."""

  def diet_var_initializer(shape, dtype, partition_info=None):
    """Initializer for a diet variable."""
    del dtype
    del partition_info

    with common_layers.fn_device_dependency("diet_init") as out_deps:
      float_range = math.sqrt(3)
      ret = tf.random_uniform(shape, -float_range, float_range)
      if params.quantize:
        ret = _quantize(ret, params, randomize=False)
      out_deps.append(ret)
      return ret

  def diet_var_getter(getter, **kwargs):
    """Get diet variable and return it dequantized."""
    if params.quantize:
      kwargs["dtype"] = tf.float16
    kwargs["initializer"] = diet_var_initializer
    kwargs["trainable"] = False

    base_var = getter(**kwargs)

    dequantized = _dequantize(base_var, params)

    if not hasattr(params, "dequantized"):
      params.dequantized = defaultdict(list)
    params.dequantized[base_var.name].append(dequantized)

    return dequantized

  return diet_var_getter 
Example #23
Source File: metrics.py    From fine-lm with MIT License 5 votes vote down vote up
def padded_rmse(predictions, labels, weights_fn=common_layers.weights_all):
  predictions = tf.to_float(predictions)
  labels = tf.to_float(labels)
  predictions, labels = common_layers.pad_with_zeros(predictions, labels)
  weights = weights_fn(labels)
  error = tf.pow(predictions - labels, 2)
  error_sqrt = tf.sqrt(tf.reduce_sum(error * weights))
  return error_sqrt, tf.reduce_sum(weights) 
Example #24
Source File: layers.py    From ArtGAN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def featurenorm(x, epsilon=1e-8, name='fn'):
    """
    Pixelwise feature vector normalization: PROGRESSIVE GROWING OF GANS FOR IMPROVED QUALITY, STABILITY, AND VARIATION
    Use "NCHW". Works same for FC layers.
    """
    with tf.variable_scope(name):
        norm = tf.sqrt(tf.reduce_mean(tf.square(x), axis=1, keep_dims=True) + epsilon)
        return x / norm 
Example #25
Source File: layers.py    From ArtGAN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def layernorm(x, epsilon=1e-5, name='lnconv'):
    """Layer Normalization for conv. x must be [NCHW]"""
    shape = x.get_shape().as_list()
    with tf.variable_scope(name):
        beta = tf.get_variable("beta", [1, shape[1], 1, 1], initializer=tf.constant_initializer(0.))
        gamma = tf.get_variable("gamma", [1, shape[1], 1, 1], initializer=tf.constant_initializer(1.))
        mean, var = tf.nn.moments(x, range(1, len(shape)), keep_dims=True)
        return beta * (x - mean) / tf.sqrt(var + epsilon) + gamma 
Example #26
Source File: layers.py    From ArtGAN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def adain2(net1, shift, scale, epsilon=1e-9, name='in'):
    """use shape NCHW"""
    with tf.variable_scope(name):
        mu, sigma_sq = tf.nn.moments(net1, [2, 3], keep_dims=True)
        normalized = (net1 - mu) / tf.sqrt(sigma_sq + epsilon)
        normalized = tf.sqrt(scale + epsilon) * normalized + shift
        return normalized 
Example #27
Source File: layers.py    From ArtGAN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def adain(net1, net2, epsilon=1e-9, name='in'):
    """use shape NCHW"""
    with tf.variable_scope(name):
        mu, sigma_sq = tf.nn.moments(net1, [2, 3], keep_dims=True)
        normalized = (net1 - mu) / tf.sqrt(sigma_sq + epsilon)

        shift, scale = tf.nn.moments(net2, [2, 3], keep_dims=True)
        normalized = tf.sqrt(scale + epsilon) * normalized + shift
        return normalized 
Example #28
Source File: layers.py    From ArtGAN with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def instance_norm(net, train=False, epsilon=1e-8, name='in'):
    """use shape NCHW"""
    with tf.variable_scope(name):
        mu, sigma_sq = tf.nn.moments(net, [2, 3], keep_dims=True)
        normalized = (net - mu) / tf.sqrt(sigma_sq + epsilon)
        if train:
            var_shape = net.get_shape().as_list()[1]
            shift = tf.get_variable('shift', shape=[1, var_shape, 1, 1], initializer=tf.constant_initializer(0.))
            scale = tf.get_variable('scale', shape=[1, var_shape, 1, 1], initializer=tf.constant_initializer(1.))
            normalized = scale * normalized + shift
        return normalized 
Example #29
Source File: modeling.py    From BERT-Classification-Tutorial with Apache License 2.0 5 votes vote down vote up
def gelu(input_tensor):
    """Gaussian Error Linear Unit.

    This is a smoother version of the RELU.
    Original paper: https://arxiv.org/abs/1606.08415

    Args:
      input_tensor: float Tensor to perform activation.

    Returns:
      `input_tensor` with the GELU activation applied.
    """
    cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
    return input_tensor * cdf 
Example #30
Source File: quantization.py    From fine-lm with MIT License 5 votes vote down vote up
def decode(self, x):
    x = tf.to_float(x)
    # we can't use tf.pow(..., 0.125) because of a high-error approximation
    # on TPU.  Instead we sqrt three times.
    return tf.sign(x) * (tf.sqrt(tf.sqrt(tf.sqrt(tf.abs(x)))) / 128.0)