Python tensorflow.truncated_normal() Examples

The following are 30 code examples of tensorflow.truncated_normal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: discretization.py    From fine-lm with MIT License 7 votes vote down vote up
def isemhash_bottleneck(x, bottleneck_bits, bottleneck_noise,
                        discretize_warmup_steps, mode,
                        isemhash_noise_dev=0.5, isemhash_mix_prob=0.5):
  """Improved semantic hashing bottleneck."""
  with tf.variable_scope("isemhash_bottleneck"):
    x = tf.layers.dense(x, bottleneck_bits, name="dense")
    y = common_layers.saturating_sigmoid(x)
    if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:
      noise = tf.truncated_normal(
          common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev)
      y = common_layers.saturating_sigmoid(x + noise)
    d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y)
    d = 2.0 * d - 1.0  # Move from [0, 1] to [-1, 1].
    if mode == tf.estimator.ModeKeys.TRAIN:  # Flip some bits.
      noise = tf.random_uniform(common_layers.shape_list(x))
      noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
      d *= noise
      d = common_layers.mix(d, 2.0 * y - 1.0, discretize_warmup_steps,
                            mode == tf.estimator.ModeKeys.TRAIN,
                            max_prob=isemhash_mix_prob)
    return d, 0.0 
Example #2
Source File: block_util.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self, dims=(0,), scale=2.0, **kwargs):
    """Creates an initializer.

    Args:
      dims: Dimension(s) index to compute standard deviation:
        sqrt(scale / product(shape[dims]))
      scale: A constant scaling for the initialization used as
        sqrt(scale / product(shape[dims])).
      **kwargs: Extra keyword arguments to pass to tf.truncated_normal.
    """
    if isinstance(dims, (int, long)):
      self._dims = [dims]
    else:
      self._dims = dims
    self._kwargs = kwargs
    self._scale = scale 
Example #3
Source File: optimize.py    From fine-lm with MIT License 6 votes vote down vote up
def weight_noise(noise_rate, learning_rate, var_list):
  """Apply weight noise to vars in var_list."""
  if not noise_rate:
    return [tf.no_op()]

  tf.logging.info("Applying weight noise scaled by learning rate, "
                  "noise_rate: %0.5f", noise_rate)

  noise_ops = []

  for v in var_list:
    with tf.device(v._ref().device):  # pylint: disable=protected-access
      scale = noise_rate * learning_rate * 0.001
      tf.summary.scalar("weight_noise_scale", scale)
      noise = tf.truncated_normal(v.shape) * scale
      noise_op = v.assign_add(noise)
      noise_ops.append(noise_op)

  return noise_ops 
Example #4
Source File: tf_cnn.py    From tf-example-models with Apache License 2.0 6 votes vote down vote up
def dense_layer(x, in_dim, out_dim, layer_name, act):
    """Creates a single densely connected layer of a NN"""
    with tf.name_scope(layer_name):
        # layer weights corresponding to the input / output dimensions
        weights = tf.Variable(
            tf.truncated_normal(
                [in_dim, out_dim], 
                stddev=1.0 / tf.sqrt(float(out_dim))
            ), name="weights"
        )

        # layer biases corresponding to output dimension
        biases = tf.Variable(tf.zeros([out_dim]), name="biases")

        # layer activations applied to Wx+b
        layer = act(tf.matmul(x, weights) + biases, name="activations")

    return layer 
Example #5
Source File: tf_cnn.py    From tf-example-models with Apache License 2.0 6 votes vote down vote up
def conv_pool_layer(x, in_channels, out_channels, layer_name):
    """Creates a single convpool layer of a NN"""
    with tf.name_scope(layer_name):
        # layer weights corresponding to the input / output channels
        weights = tf.Variable(tf.truncated_normal([5, 5, in_channels, out_channels], stddev=0.1))

        # layer biases corresponding to output channels
        biases = tf.Variable(tf.constant(0.1, shape=[out_channels]))

        # convolution layer: convolving inputs with the weights and applying ReLU
        conv = tf.nn.relu(tf.nn.conv2d(x, weights, strides=[1, 1, 1, 1], padding='SAME') + biases)

        # max-pooling layer: pooling convolutions (after applying ReLU) by 2x2 windows
        pool = tf.nn.max_pool(conv, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        return pool


# PREPARING DATA

# downloading (on first run) and extracting MNIST data 
Example #6
Source File: tf_mlp.py    From tf-example-models with Apache License 2.0 6 votes vote down vote up
def dense_layer(x, in_dim, out_dim, layer_name, act):
    """Creates a single densely connected layer of a NN"""
    with tf.name_scope(layer_name):
        # layer weights corresponding to the input / output dimensions
        weights = tf.Variable(
            tf.truncated_normal(
                [in_dim, out_dim], 
                stddev=1.0 / tf.sqrt(float(out_dim))
            ), name="weights"
        )

        # layer biases corresponding to output dimension
        biases = tf.Variable(tf.zeros([out_dim]), name="biases")

        # layer activations applied to Wx+b
        layer = act(tf.matmul(x, weights) + biases, name="activations")

    return layer


# PREPARING DATA

# downloading (on first run) and extracting MNIST data 
Example #7
Source File: layers.py    From tensorflow-u-net with GNU General Public License v3.0 6 votes vote down vote up
def conv_den(x, dendrites, out_channels, variances=[1., 1., 2.], width=11, data_format="NHWC"):

    if data_format not in ["NHWC", "NCHW"]:
        raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")

    if data_format == "NHWC":
        raise NotImplementedError("data_format \"NHWC\" is not yet implemented!")

    shape = x.shape.as_list()
    depth = shape[1]

    positions_height = tf.Variable(initial_value=tf.truncated_normal([dendrites], stddev=(width - 3) / 4.), name="dendrite_height", dtype=tf.float32)
    positions_width = tf.Variable(initial_value=tf.truncated_normal([dendrites], stddev=(width - 3) / 4.), name="dendrite_width", dtype=tf.float32)
    positions_depth = tf.Variable(initial_value=tf.abs(tf.truncated_normal([dendrites], stddev=(depth - 2) / 2.)) + 0.5, name="dendrite_depth", dtype=tf.float32)

    positions = [positions_height, positions_width, positions_depth]

    weights = weight_variable([out_channels, dendrites], name="weights")
    bias = bias_variable([out_channels, 1, 1], name="biases")

    output = dendrite_layer(x, positions, weights, variances=[1., 1., 2.], width=11, data_format=data_format)

    return tf.nn.relu(output + bias, name="relu") 
Example #8
Source File: layers.py    From tensorflow-u-net with GNU General Public License v3.0 6 votes vote down vote up
def weight_variable(shape, stddev=0.1, name=None):
    """
    Creates a weight variable initialized with a truncated normal distribution.

    Parameters
    ----------
    shape: list or tuple of ints
        The shape of the weight variable.
    stddev: float
        The standard deviation of the truncated normal distribution.
    name : string
        The name of the variable in TensorFlow.

    Returns
    -------
    weights: TF variable
        The weight variable.   
    """
    return tf.Variable(initial_value=tf.truncated_normal(shape, stddev=stddev),
                       name=name,
                       dtype=tf.float32) 
Example #9
Source File: multitask_regressor.py    From deepchem with MIT License 6 votes vote down vote up
def build(self):
    # Create target inputs
    self.label_placeholder = tf.placeholder(
        dtype='float32', shape=(None, self.n_tasks), name="label_placeholder")
    self.weight_placeholder = tf.placeholder(
        dtype='float32', shape=(None, self.n_tasks), name="weight_placholder")

    feat = self.model.return_outputs()
    feat_size = feat.get_shape()[-1].value
    outputs = []
    for task in range(self.n_tasks):
      outputs.append(
          tf.squeeze(
              model_ops.fully_connected_layer(
                  tensor=feat,
                  size=1,
                  weight_init=tf.truncated_normal(
                      shape=[feat_size, 1], stddev=0.01),
                  bias_init=tf.constant(value=0., shape=[1]))))
    return outputs 
Example #10
Source File: trainer.py    From StackGAN with MIT License 6 votes vote down vote up
def sample_encoded_context(self, embeddings):
        '''Helper function for init_opt'''
        c_mean_logsigma = self.model.generate_condition(embeddings)
        mean = c_mean_logsigma[0]
        if cfg.TRAIN.COND_AUGMENTATION:
            # epsilon = tf.random_normal(tf.shape(mean))
            epsilon = tf.truncated_normal(tf.shape(mean))
            stddev = tf.exp(c_mean_logsigma[1])
            c = mean + stddev * epsilon

            kl_loss = KL_loss(c_mean_logsigma[0], c_mean_logsigma[1])
        else:
            c = mean
            kl_loss = 0

        return c, cfg.TRAIN.COEFF.KL * kl_loss 
Example #11
Source File: trainer.py    From StackGAN with MIT License 6 votes vote down vote up
def sample_encoded_context(self, embeddings):
        '''Helper function for init_opt'''
        # Build conditioning augmentation structure for text embedding
        # under different variable_scope: 'g_net' and 'hr_g_net'
        c_mean_logsigma = self.model.generate_condition(embeddings)
        mean = c_mean_logsigma[0]
        if cfg.TRAIN.COND_AUGMENTATION:
            # epsilon = tf.random_normal(tf.shape(mean))
            epsilon = tf.truncated_normal(tf.shape(mean))
            stddev = tf.exp(c_mean_logsigma[1])
            c = mean + stddev * epsilon

            kl_loss = KL_loss(c_mean_logsigma[0], c_mean_logsigma[1])
        else:
            c = mean
            kl_loss = 0
        # TODO: play with the coefficient for KL
        return c, cfg.TRAIN.COEFF.KL * kl_loss 
Example #12
Source File: Model.py    From Handwritten-Line-Text-Recognition-using-Deep-Learning-with-Tensorflow with Apache License 2.0 6 votes vote down vote up
def setupRNN(self):
        """ Create RNN layers and return output of these layers """
        # Collapse layer to remove dimension 100 x 1 x 512 --> 100 x 512 on axis=2
        rnnIn3d = tf.squeeze(self.cnnOut4d, axis=[2])

        # 2 layers of LSTM cell used to build RNN
        numHidden = 512
        cells = [tf.contrib.rnn.LSTMCell(
            num_units=numHidden, state_is_tuple=True, name='basic_lstm_cell') for _ in range(2)]
        stacked = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
        # Bi-directional RNN
        # BxTxF -> BxTx2H
        ((forward, backward), _) = tf.nn.bidirectional_dynamic_rnn(
            cell_fw=stacked, cell_bw=stacked, inputs=rnnIn3d, dtype=rnnIn3d.dtype)

        # BxTxH + BxTxH -> BxTx2H -> BxTx1X2H
        concat = tf.expand_dims(tf.concat([forward, backward], 2), 2)

        # Project output to chars (including blank): BxTx1x2H -> BxTx1xC -> BxTxC
        kernel = tf.Variable(tf.truncated_normal(
            [1, 1, numHidden * 2, len(self.charList) + 1], stddev=0.1))
        self.rnnOut3d = tf.squeeze(tf.nn.atrous_conv2d(value=concat, filters=kernel, rate=1, padding='SAME'), axis=[2]) 
Example #13
Source File: optimize.py    From BERT with Apache License 2.0 6 votes vote down vote up
def weight_noise(noise_rate, learning_rate, var_list):
  """Apply weight noise to vars in var_list."""
  if not noise_rate:
    return [tf.no_op()]

  tf.logging.info("Applying weight noise scaled by learning rate, "
                  "noise_rate: %0.5f", noise_rate)

  noise_ops = []

  for v in var_list:
    with tf.device(v.device):  # pylint: disable=protected-access
      scale = noise_rate * learning_rate * 0.001
      if common_layers.should_generate_summaries():
        tf.summary.scalar("weight_noise_scale", scale)
      noise = tf.truncated_normal(v.shape) * scale
      noise_op = v.assign_add(noise)
      noise_ops.append(noise_op)

  return noise_ops 
Example #14
Source File: discretization.py    From BERT with Apache License 2.0 6 votes vote down vote up
def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise,
                             discretize_warmup_steps, mode):
  """Simple discretization through tanh, flip bottleneck_noise many bits."""
  x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck")
  d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0
  if mode == tf.estimator.ModeKeys.TRAIN:
    x += tf.truncated_normal(
        common_layers.shape_list(x), mean=0.0, stddev=0.2)
  x = tf.tanh(x)
  d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
  if mode == tf.estimator.ModeKeys.TRAIN:
    noise = tf.random_uniform(common_layers.shape_list(x))
    noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
    d *= noise
  d = common_layers.mix(d, x, discretize_warmup_steps,
                        mode == tf.estimator.ModeKeys.TRAIN)
  return d, d0 
Example #15
Source File: tf_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _weight_variable(shape,name=None):
    """weight_variable generates a weigh  t        variable of a given shape."""
    initial = tf.truncated_normal(shape, stddev=0.01)+0.01
    return tf.Variable(initial,name=name) 
Example #16
Source File: tf_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _weight_variable(shape,name=None):
    """weight_variable generates a weigh  t        variable of a given shape."""
    initial = tf.truncated_normal(shape, stddev=0.01)+0.01
    return tf.Variable(initial,name=name) 
Example #17
Source File: tf_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _weight_variable(shape,name=None):
    """weight_variable generates a weigh  t        variable of a given shape."""
    initial = tf.truncated_normal(shape, stddev=0.01)+0.01
    return tf.Variable(initial,name=name) 
Example #18
Source File: madry_mnist_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _weight_variable(shape):
        initial = tf.truncated_normal(shape, stddev=0.1)
        return tf.Variable(initial) 
Example #19
Source File: model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_input_shape(self, input_shape):
        batch_size, dim = input_shape
        self.input_shape = [batch_size, dim]
        self.output_shape = [batch_size, self.num_hid]
        shape = [dim, self.num_hid]
        with tf.variable_scope(self.name):
            init = tf.truncated_normal(shape, stddev=0.1)
            self.W = self.get_variable(self.w_name, init)
            self.b = self.get_variable('b', .1 + np.zeros(
                (self.num_hid,)).astype('float32')) 
Example #20
Source File: 2_mnist.py    From deep-learning-note with MIT License 5 votes vote down vote up
def weight_variable(shape):
    """weight_variable generates a weight variable of a given shape."""
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial) 
Example #21
Source File: 1_mnist_before.py    From deep-learning-note with MIT License 5 votes vote down vote up
def weight_variable(shape):
    """weight_variable generates a weight variable of a given shape."""
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial) 
Example #22
Source File: neural_gpu.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def autoenc_quantize(x, nbits, nmaps, do_training, layers=1):
  """Autoencoder into nbits vectors of bits, using noise and sigmoids."""
  enc_x = tf.reshape(x, [-1, nmaps])
  for i in xrange(layers - 1):
    enc_x = tf.layers.dense(enc_x, nmaps, name="autoenc_%d" % i)
  enc_x = tf.layers.dense(enc_x, nbits, name="autoenc_%d" % (layers - 1))
  noise = tf.truncated_normal(tf.shape(enc_x), stddev=2.0)
  dec_x = sigmoid_cutoff_12(enc_x + noise * do_training)
  dec_x = tf.reshape(dec_x, [-1, nbits])
  for i in xrange(layers):
    dec_x = tf.layers.dense(dec_x, nmaps, name="autodec_%d" % i)
  return tf.reshape(dec_x, tf.shape(x)) 
Example #23
Source File: block_util.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def __init__(self, dims=(0,), **kwargs):
    """Creates an initializer.

    Args:
      dims: Dimension(s) index to compute standard deviation:
        1.0 / sqrt(product(shape[dims]))
      **kwargs: Extra keyword arguments to pass to tf.truncated_normal.
    """
    if isinstance(dims, (int, long)):
      self._dims = [dims]
    else:
      self._dims = dims
    self._kwargs = kwargs 
Example #24
Source File: block_util.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def __call__(self, shape, dtype):
    stddev = 1.0 / np.sqrt(np.prod([shape[x] for x in self._dims]))
    return tf.truncated_normal(
        shape=shape, dtype=dtype, stddev=stddev, **self._kwargs) 
Example #25
Source File: block_util.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def __call__(self, shape, dtype):
    return tf.truncated_normal(shape=shape, dtype=dtype, stddev=self._stddev) 
Example #26
Source File: resnet.py    From deep-models with Apache License 2.0 5 votes vote down vote up
def weight_variable(shape):
  initial = tf.truncated_normal(shape, stddev=0.01)
  return tf.Variable(initial) 
Example #27
Source File: densenet.py    From deep-models with Apache License 2.0 5 votes vote down vote up
def weight_variable(shape):
  initial = tf.truncated_normal(shape, stddev=0.01)
  return tf.Variable(initial) 
Example #28
Source File: ssresnet.py    From deep-models with Apache License 2.0 5 votes vote down vote up
def weight_variable(shape):
  initial = tf.truncated_normal(shape, stddev=0.01)
  return tf.Variable(initial) 
Example #29
Source File: models.py    From nlp-tensorflow with MIT License 5 votes vote down vote up
def _build_net(self):
        with tf.variable_scope("placeholder"):
            self.input_x = tf.placeholder(tf.float32, shape=(None, self.vocab_size))
            self.input_y = tf.placeholder(tf.int32, shape=(None,))
            Y_one_hot = tf.one_hot(self.input_y, self.n_class)
        
        with tf.variable_scope("output", reuse=tf.AUTO_REUSE):
            W = tf.get_variable('W', dtype=tf.float32,
                               initializer=tf.truncated_normal((self.vocab_size, self.n_class)))
            b = tf.get_variable('b', dtype=tf.float32,
                               initializer=tf.constant(0.1, shape=(self.n_class,)))
            logits = tf.nn.xw_plus_b(self.input_x, W, b)
            self.prob = tf.reduce_max(tf.nn.softmax(logits), axis=1)
            self.prediction = tf.cast(tf.argmax(logits, axis=1), tf.int32)
            
            
        with tf.variable_scope("loss"):
            self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y_one_hot))
        
        with tf.variable_scope("train", reuse=tf.AUTO_REUSE):
            optimizer = tf.train.AdamOptimizer(self.lr)
            self.train_op = optimizer.minimize(self.loss)
            
        with tf.variable_scope("accuracy"):
            correct = tf.equal(self.prediction, self.input_y)
            self.accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
        
        self.sess.run(tf.global_variables_initializer()) 
Example #30
Source File: models.py    From nlp-tensorflow with MIT License 5 votes vote down vote up
def _build_net(self):
        with tf.variable_scope("placeholder"):
            self.input_x = tf.placeholder(tf.float32, shape=(None, self.vocab_size))
            self.input_y = tf.placeholder(tf.int32, shape=(None,))
        
        with tf.variable_scope("output", reuse=tf.AUTO_REUSE):
            W1 = tf.get_variable("W1", dtype=tf.float32,
                                 initializer=tf.truncated_normal((self.vocab_size, self.hidden_size)))
            b1 = tf.get_variable("b1", dtype=tf.float32,
                                initializer=tf.constant(0.1, shape=(self.hidden_size,)))
            W2 = tf.get_variable("W2", dtype=tf.float32,
                                initializer=tf.truncated_normal((self.hidden_size, self.n_class)))
            b2 = tf.get_variable("b2", dtype=tf.float32,
                                initializer=tf.constant(0.1, shape=(self.n_class,)))
            h = tf.nn.relu(tf.nn.xw_plus_b(self.input_x, W1, b1))
            logits = tf.nn.xw_plus_b(h, W2, b2)
            self.prob = tf.reduce_max(tf.nn.softmax(logits), axis=1)
            self.prediction = tf.cast(tf.argmax(logits, axis=1), tf.int32)
            
        with tf.variable_scope("loss"):
            self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=self.input_y))
        
        with tf.variable_scope("train", reuse=tf.AUTO_REUSE):
            optimizer = tf.train.AdamOptimizer(self.lr)
            self.train_op = optimizer.minimize(self.loss)

        with tf.variable_scope("accuracy"):
            correct = tf.equal(self.prediction, self.input_y)
            self.accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
        
        self.sess.run(tf.global_variables_initializer())