Python tensorflow.compat.v1.truncated_normal() Examples

The following are 8 code examples of tensorflow.compat.v1.truncated_normal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: model.py    From benchmarks with Apache License 2.0 6 votes vote down vote up
def get_synthetic_inputs(self, input_name, nclass):
    # Synthetic input should be within [0, 255].
    image_shape, label_shape = self.get_input_shapes('train')
    inputs = tf.truncated_normal(
        image_shape,
        dtype=self.data_type,
        mean=127,
        stddev=60,
        name=self.model_name + '_synthetic_inputs')
    inputs = variables_module.VariableV1(
        inputs, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES],
        name=input_name)
    labels = tf.random_uniform(
        label_shape,
        minval=0,
        maxval=nclass - 1,
        dtype=tf.int32,
        name=self.model_name + '_synthetic_labels')
    return (inputs, labels) 
Example #2
Source File: optimize.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def weight_noise(noise_rate, learning_rate, var_list):
  """Apply weight noise to vars in var_list."""
  if not noise_rate:
    return [tf.no_op()]

  tf.logging.info("Applying weight noise scaled by learning rate, "
                  "noise_rate: %0.5f", noise_rate)

  noise_ops = []

  for v in var_list:
    with tf.device(v.device):  # pylint: disable=protected-access
      scale = noise_rate * learning_rate * 0.001
      if common_layers.should_generate_summaries():
        tf.summary.scalar("weight_noise_scale", scale)
      noise = tf.truncated_normal(v.shape) * scale
      noise_op = v.assign_add(noise)
      noise_ops.append(noise_op)

  return noise_ops 
Example #3
Source File: discretization.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise,
                             discretize_warmup_steps, mode):
  """Simple discretization through tanh, flip bottleneck_noise many bits."""
  x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck")
  d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0
  if mode == tf.estimator.ModeKeys.TRAIN:
    x += tf.truncated_normal(
        common_layers.shape_list(x), mean=0.0, stddev=0.2)
  x = tf.tanh(x)
  d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
  if mode == tf.estimator.ModeKeys.TRAIN:
    noise = tf.random_uniform(common_layers.shape_list(x))
    noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
    d *= noise
  d = common_layers.mix(d, x, discretize_warmup_steps,
                        mode == tf.estimator.ModeKeys.TRAIN)
  return d, d0 
Example #4
Source File: utils.py    From lamb with Apache License 2.0 5 votes vote down vote up
def make_low_rank_factorization_initializer(shape, rank):
  fan_in = int(shape[0])
  # This is the variance we'd like to see if a matrix of 'shape' was
  # initialized directly.
  variance = 1.0 / fan_in
  # Each element of a*b (the low rank matrices) is the sum of 'rank'
  # terms, each of which is a product of an element from 'a' and
  # 'b'.
  stddev = np.sqrt(np.sqrt(variance / rank))
  return tf.initializers.truncated_normal(stddev=stddev) 
Example #5
Source File: discretization.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def isemhash_bottleneck(x,
                        bottleneck_bits,
                        bottleneck_noise,
                        discretize_warmup_steps,
                        mode,
                        isemhash_noise_dev=0.5,
                        isemhash_mix_prob=0.5):
  """Improved semantic hashing bottleneck."""
  with tf.variable_scope("isemhash_bottleneck"):
    x = tf.layers.dense(x, bottleneck_bits, name="dense")
    y = common_layers.saturating_sigmoid(x)
    if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:
      noise = tf.truncated_normal(
          common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev)
      y = common_layers.saturating_sigmoid(x + noise)
    d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y)
    d = 2.0 * d - 1.0  # Move from [0, 1] to [-1, 1].
    if mode == tf.estimator.ModeKeys.TRAIN:  # Flip some bits.
      noise = tf.random_uniform(common_layers.shape_list(x))
      noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
      d *= noise
      d = common_layers.mix(
          d,
          2.0 * y - 1.0,
          discretize_warmup_steps,
          mode == tf.estimator.ModeKeys.TRAIN,
          max_prob=isemhash_mix_prob)
    return d, 0.0 
Example #6
Source File: model.py    From Autopilot-TensorFlow with MIT License 5 votes vote down vote up
def weight_variable(shape):
  initial = tf.truncated_normal(shape, stddev=0.1)
  return tf.Variable(initial) 
Example #7
Source File: utils.py    From lamb with Apache License 2.0 4 votes vote down vote up
def variance_scaling_initializer(scale=2.0, mode='fan_in',
                                 distribution='truncated_normal',
                                 mean=0.0, seed=None, dtype=tf.float32):
  """Like tf.variance_scaling_initializer but supports non-zero means."""
  if not dtype.is_floating:
    raise TypeError('Cannot create initializer for non-floating point type.')
  if mode not in ['fan_in', 'fan_out', 'fan_avg']:
    raise TypeError('Unknown mode %s [fan_in, fan_out, fan_avg]' % mode)

  # pylint: disable=unused-argument
  def _initializer(shape, dtype=dtype, partition_info=None):
    """Initializer function."""
    if not dtype.is_floating:
      raise TypeError('Cannot create initializer for non-floating point type.')
    # Estimating fan_in and fan_out is not possible to do perfectly, but we try.
    # This is the right thing for matrix multiply and convolutions.
    if shape:
      fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])
      fan_out = float(shape[-1])
    else:
      fan_in = 1.0
      fan_out = 1.0
    for dim in shape[:-2]:
      fan_in *= float(dim)
      fan_out *= float(dim)
    if mode == 'fan_in':
      # Count only number of input connections.
      n = fan_in
    elif mode == 'fan_out':
      # Count only number of output connections.
      n = fan_out
    elif mode == 'fan_avg':
      # Average number of inputs and output connections.
      n = (fan_in + fan_out) / 2.0
    if distribution == 'truncated_normal':
      # To get stddev = math.sqrt(scale / n) need to adjust for truncated.
      trunc_stddev = math.sqrt(1.3 * scale / n)
      return tf.truncated_normal(shape, mean, trunc_stddev, dtype, seed=seed)
    elif distribution == 'uniform':
      # To get stddev = math.sqrt(scale / n) need to adjust for uniform.
      limit = math.sqrt(3.0 * scale / n)
      return tf.random_uniform(shape, mean-limit, mean+limit, dtype, seed=seed)
    else:
      assert 'Unexpected distribution %s.' % distribution
  # pylint: enable=unused-argument

  return _initializer 
Example #8
Source File: text_cnn.py    From tensor2tensor with Apache License 2.0 4 votes vote down vote up
def body(self, features):
    """TextCNN main model_fn.

    Args:
      features: Map of features to the model. Should contain the following:
          "inputs": Text inputs.
              [batch_size, input_length, 1, hidden_dim].
          "targets": Target encoder outputs.
              [batch_size, 1, 1, hidden_dim]
    Returns:
      Final encoder representation. [batch_size, 1, 1, hidden_dim]
    """
    hparams = self._hparams
    inputs = features["inputs"]

    xshape = common_layers.shape_list(inputs)

    vocab_size = xshape[3]
    inputs = tf.reshape(inputs, [xshape[0], xshape[1], xshape[3], xshape[2]])

    pooled_outputs = []
    for _, filter_size in enumerate(hparams.filter_sizes):
      with tf.name_scope("conv-maxpool-%s" % filter_size):
        filter_shape = [filter_size, vocab_size, 1, hparams.num_filters]
        filter_var = tf.Variable(
            tf.truncated_normal(filter_shape, stddev=0.1), name="W")
        filter_bias = tf.Variable(
            tf.constant(0.1, shape=[hparams.num_filters]), name="b")
        conv = tf.nn.conv2d(
            inputs,
            filter_var,
            strides=[1, 1, 1, 1],
            padding="VALID",
            name="conv")
        conv_outputs = tf.nn.relu(
            tf.nn.bias_add(conv, filter_bias), name="relu")
        pooled = tf.math.reduce_max(
            conv_outputs, axis=1, keepdims=True, name="max")
        pooled_outputs.append(pooled)

    num_filters_total = hparams.num_filters * len(hparams.filter_sizes)
    h_pool = tf.concat(pooled_outputs, 3)
    h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])

    # Add dropout
    output = tf.nn.dropout(h_pool_flat, 1 - hparams.output_dropout)
    output = tf.reshape(output, [-1, 1, 1, num_filters_total])

    return output