Python tensorflow.truncated_normal() Examples

The following are 30 code examples of tensorflow.truncated_normal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: discretization.py    From fine-lm with MIT License 7 votes vote down vote up
def isemhash_bottleneck(x, bottleneck_bits, bottleneck_noise,
                        discretize_warmup_steps, mode,
                        isemhash_noise_dev=0.5, isemhash_mix_prob=0.5):
  """Improved semantic hashing bottleneck."""
  with tf.variable_scope("isemhash_bottleneck"):
    x = tf.layers.dense(x, bottleneck_bits, name="dense")
    y = common_layers.saturating_sigmoid(x)
    if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:
      noise = tf.truncated_normal(
          common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev)
      y = common_layers.saturating_sigmoid(x + noise)
    d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y)
    d = 2.0 * d - 1.0  # Move from [0, 1] to [-1, 1].
    if mode == tf.estimator.ModeKeys.TRAIN:  # Flip some bits.
      noise = tf.random_uniform(common_layers.shape_list(x))
      noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
      d *= noise
      d = common_layers.mix(d, 2.0 * y - 1.0, discretize_warmup_steps,
                            mode == tf.estimator.ModeKeys.TRAIN,
                            max_prob=isemhash_mix_prob)
    return d, 0.0 
Example #2
Source File: block_util.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self, dims=(0,), scale=2.0, **kwargs):
    """Creates an initializer.

    Args:
      dims: Dimension(s) index to compute standard deviation:
        sqrt(scale / product(shape[dims]))
      scale: A constant scaling for the initialization used as
        sqrt(scale / product(shape[dims])).
      **kwargs: Extra keyword arguments to pass to tf.truncated_normal.
    """
    if isinstance(dims, (int, long)):
      self._dims = [dims]
    else:
      self._dims = dims
    self._kwargs = kwargs
    self._scale = scale 
Example #3
Source File: Model.py    From Handwritten-Line-Text-Recognition-using-Deep-Learning-with-Tensorflow with Apache License 2.0 6 votes vote down vote up
def setupRNN(self):
        """ Create RNN layers and return output of these layers """
        # Collapse layer to remove dimension 100 x 1 x 512 --> 100 x 512 on axis=2
        rnnIn3d = tf.squeeze(self.cnnOut4d, axis=[2])

        # 2 layers of LSTM cell used to build RNN
        numHidden = 512
        cells = [tf.contrib.rnn.LSTMCell(
            num_units=numHidden, state_is_tuple=True, name='basic_lstm_cell') for _ in range(2)]
        stacked = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
        # Bi-directional RNN
        # BxTxF -> BxTx2H
        ((forward, backward), _) = tf.nn.bidirectional_dynamic_rnn(
            cell_fw=stacked, cell_bw=stacked, inputs=rnnIn3d, dtype=rnnIn3d.dtype)

        # BxTxH + BxTxH -> BxTx2H -> BxTx1X2H
        concat = tf.expand_dims(tf.concat([forward, backward], 2), 2)

        # Project output to chars (including blank): BxTx1x2H -> BxTx1xC -> BxTxC
        kernel = tf.Variable(tf.truncated_normal(
            [1, 1, numHidden * 2, len(self.charList) + 1], stddev=0.1))
        self.rnnOut3d = tf.squeeze(tf.nn.atrous_conv2d(value=concat, filters=kernel, rate=1, padding='SAME'), axis=[2]) 
Example #4
Source File: trainer.py    From StackGAN with MIT License 6 votes vote down vote up
def sample_encoded_context(self, embeddings):
        '''Helper function for init_opt'''
        # Build conditioning augmentation structure for text embedding
        # under different variable_scope: 'g_net' and 'hr_g_net'
        c_mean_logsigma = self.model.generate_condition(embeddings)
        mean = c_mean_logsigma[0]
        if cfg.TRAIN.COND_AUGMENTATION:
            # epsilon = tf.random_normal(tf.shape(mean))
            epsilon = tf.truncated_normal(tf.shape(mean))
            stddev = tf.exp(c_mean_logsigma[1])
            c = mean + stddev * epsilon

            kl_loss = KL_loss(c_mean_logsigma[0], c_mean_logsigma[1])
        else:
            c = mean
            kl_loss = 0
        # TODO: play with the coefficient for KL
        return c, cfg.TRAIN.COEFF.KL * kl_loss 
Example #5
Source File: multitask_regressor.py    From deepchem with MIT License 6 votes vote down vote up
def build(self):
    # Create target inputs
    self.label_placeholder = tf.placeholder(
        dtype='float32', shape=(None, self.n_tasks), name="label_placeholder")
    self.weight_placeholder = tf.placeholder(
        dtype='float32', shape=(None, self.n_tasks), name="weight_placholder")

    feat = self.model.return_outputs()
    feat_size = feat.get_shape()[-1].value
    outputs = []
    for task in range(self.n_tasks):
      outputs.append(
          tf.squeeze(
              model_ops.fully_connected_layer(
                  tensor=feat,
                  size=1,
                  weight_init=tf.truncated_normal(
                      shape=[feat_size, 1], stddev=0.01),
                  bias_init=tf.constant(value=0., shape=[1]))))
    return outputs 
Example #6
Source File: trainer.py    From StackGAN with MIT License 6 votes vote down vote up
def sample_encoded_context(self, embeddings):
        '''Helper function for init_opt'''
        c_mean_logsigma = self.model.generate_condition(embeddings)
        mean = c_mean_logsigma[0]
        if cfg.TRAIN.COND_AUGMENTATION:
            # epsilon = tf.random_normal(tf.shape(mean))
            epsilon = tf.truncated_normal(tf.shape(mean))
            stddev = tf.exp(c_mean_logsigma[1])
            c = mean + stddev * epsilon

            kl_loss = KL_loss(c_mean_logsigma[0], c_mean_logsigma[1])
        else:
            c = mean
            kl_loss = 0

        return c, cfg.TRAIN.COEFF.KL * kl_loss 
Example #7
Source File: optimize.py    From fine-lm with MIT License 6 votes vote down vote up
def weight_noise(noise_rate, learning_rate, var_list):
  """Apply weight noise to vars in var_list."""
  if not noise_rate:
    return [tf.no_op()]

  tf.logging.info("Applying weight noise scaled by learning rate, "
                  "noise_rate: %0.5f", noise_rate)

  noise_ops = []

  for v in var_list:
    with tf.device(v._ref().device):  # pylint: disable=protected-access
      scale = noise_rate * learning_rate * 0.001
      tf.summary.scalar("weight_noise_scale", scale)
      noise = tf.truncated_normal(v.shape) * scale
      noise_op = v.assign_add(noise)
      noise_ops.append(noise_op)

  return noise_ops 
Example #8
Source File: layers.py    From tensorflow-u-net with GNU General Public License v3.0 6 votes vote down vote up
def weight_variable(shape, stddev=0.1, name=None):
    """
    Creates a weight variable initialized with a truncated normal distribution.

    Parameters
    ----------
    shape: list or tuple of ints
        The shape of the weight variable.
    stddev: float
        The standard deviation of the truncated normal distribution.
    name : string
        The name of the variable in TensorFlow.

    Returns
    -------
    weights: TF variable
        The weight variable.   
    """
    return tf.Variable(initial_value=tf.truncated_normal(shape, stddev=stddev),
                       name=name,
                       dtype=tf.float32) 
Example #9
Source File: layers.py    From tensorflow-u-net with GNU General Public License v3.0 6 votes vote down vote up
def conv_den(x, dendrites, out_channels, variances=[1., 1., 2.], width=11, data_format="NHWC"):

    if data_format not in ["NHWC", "NCHW"]:
        raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")

    if data_format == "NHWC":
        raise NotImplementedError("data_format \"NHWC\" is not yet implemented!")

    shape = x.shape.as_list()
    depth = shape[1]

    positions_height = tf.Variable(initial_value=tf.truncated_normal([dendrites], stddev=(width - 3) / 4.), name="dendrite_height", dtype=tf.float32)
    positions_width = tf.Variable(initial_value=tf.truncated_normal([dendrites], stddev=(width - 3) / 4.), name="dendrite_width", dtype=tf.float32)
    positions_depth = tf.Variable(initial_value=tf.abs(tf.truncated_normal([dendrites], stddev=(depth - 2) / 2.)) + 0.5, name="dendrite_depth", dtype=tf.float32)

    positions = [positions_height, positions_width, positions_depth]

    weights = weight_variable([out_channels, dendrites], name="weights")
    bias = bias_variable([out_channels, 1, 1], name="biases")

    output = dendrite_layer(x, positions, weights, variances=[1., 1., 2.], width=11, data_format=data_format)

    return tf.nn.relu(output + bias, name="relu") 
Example #10
Source File: tf_cnn.py    From tf-example-models with Apache License 2.0 6 votes vote down vote up
def dense_layer(x, in_dim, out_dim, layer_name, act):
    """Creates a single densely connected layer of a NN"""
    with tf.name_scope(layer_name):
        # layer weights corresponding to the input / output dimensions
        weights = tf.Variable(
            tf.truncated_normal(
                [in_dim, out_dim], 
                stddev=1.0 / tf.sqrt(float(out_dim))
            ), name="weights"
        )

        # layer biases corresponding to output dimension
        biases = tf.Variable(tf.zeros([out_dim]), name="biases")

        # layer activations applied to Wx+b
        layer = act(tf.matmul(x, weights) + biases, name="activations")

    return layer 
Example #11
Source File: tf_cnn.py    From tf-example-models with Apache License 2.0 6 votes vote down vote up
def conv_pool_layer(x, in_channels, out_channels, layer_name):
    """Creates a single convpool layer of a NN"""
    with tf.name_scope(layer_name):
        # layer weights corresponding to the input / output channels
        weights = tf.Variable(tf.truncated_normal([5, 5, in_channels, out_channels], stddev=0.1))

        # layer biases corresponding to output channels
        biases = tf.Variable(tf.constant(0.1, shape=[out_channels]))

        # convolution layer: convolving inputs with the weights and applying ReLU
        conv = tf.nn.relu(tf.nn.conv2d(x, weights, strides=[1, 1, 1, 1], padding='SAME') + biases)

        # max-pooling layer: pooling convolutions (after applying ReLU) by 2x2 windows
        pool = tf.nn.max_pool(conv, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

        return pool


# PREPARING DATA

# downloading (on first run) and extracting MNIST data 
Example #12
Source File: tf_mlp.py    From tf-example-models with Apache License 2.0 6 votes vote down vote up
def dense_layer(x, in_dim, out_dim, layer_name, act):
    """Creates a single densely connected layer of a NN"""
    with tf.name_scope(layer_name):
        # layer weights corresponding to the input / output dimensions
        weights = tf.Variable(
            tf.truncated_normal(
                [in_dim, out_dim], 
                stddev=1.0 / tf.sqrt(float(out_dim))
            ), name="weights"
        )

        # layer biases corresponding to output dimension
        biases = tf.Variable(tf.zeros([out_dim]), name="biases")

        # layer activations applied to Wx+b
        layer = act(tf.matmul(x, weights) + biases, name="activations")

    return layer


# PREPARING DATA

# downloading (on first run) and extracting MNIST data 
Example #13
Source File: optimize.py    From BERT with Apache License 2.0 6 votes vote down vote up
def weight_noise(noise_rate, learning_rate, var_list):
  """Apply weight noise to vars in var_list."""
  if not noise_rate:
    return [tf.no_op()]

  tf.logging.info("Applying weight noise scaled by learning rate, "
                  "noise_rate: %0.5f", noise_rate)

  noise_ops = []

  for v in var_list:
    with tf.device(v.device):  # pylint: disable=protected-access
      scale = noise_rate * learning_rate * 0.001
      if common_layers.should_generate_summaries():
        tf.summary.scalar("weight_noise_scale", scale)
      noise = tf.truncated_normal(v.shape) * scale
      noise_op = v.assign_add(noise)
      noise_ops.append(noise_op)

  return noise_ops 
Example #14
Source File: discretization.py    From BERT with Apache License 2.0 6 votes vote down vote up
def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise,
                             discretize_warmup_steps, mode):
  """Simple discretization through tanh, flip bottleneck_noise many bits."""
  x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck")
  d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0
  if mode == tf.estimator.ModeKeys.TRAIN:
    x += tf.truncated_normal(
        common_layers.shape_list(x), mean=0.0, stddev=0.2)
  x = tf.tanh(x)
  d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
  if mode == tf.estimator.ModeKeys.TRAIN:
    noise = tf.random_uniform(common_layers.shape_list(x))
    noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
    d *= noise
  d = common_layers.mix(d, x, discretize_warmup_steps,
                        mode == tf.estimator.ModeKeys.TRAIN)
  return d, d0 
Example #15
Source File: mnist.py    From lambda-packs with MIT License 5 votes vote down vote up
def inference(images, hidden1_units, hidden2_units):
  """Build the MNIST model up to where it may be used for inference.

  Args:
    images: Images placeholder, from inputs().
    hidden1_units: Size of the first hidden layer.
    hidden2_units: Size of the second hidden layer.

  Returns:
    softmax_linear: Output tensor with the computed logits.
  """
  # Hidden 1
  with tf.name_scope('hidden1'):
    weights = tf.Variable(
        tf.truncated_normal([IMAGE_PIXELS, hidden1_units],
                            stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
        name='weights')
    biases = tf.Variable(tf.zeros([hidden1_units]),
                         name='biases')
    hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
  # Hidden 2
  with tf.name_scope('hidden2'):
    weights = tf.Variable(
        tf.truncated_normal([hidden1_units, hidden2_units],
                            stddev=1.0 / math.sqrt(float(hidden1_units))),
        name='weights')
    biases = tf.Variable(tf.zeros([hidden2_units]),
                         name='biases')
    hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
  # Linear
  with tf.name_scope('softmax_linear'):
    weights = tf.Variable(
        tf.truncated_normal([hidden2_units, NUM_CLASSES],
                            stddev=1.0 / math.sqrt(float(hidden2_units))),
        name='weights')
    biases = tf.Variable(tf.zeros([NUM_CLASSES]),
                         name='biases')
    logits = tf.matmul(hidden2, weights) + biases
  return logits 
Example #16
Source File: discretization.py    From BERT with Apache License 2.0 5 votes vote down vote up
def isemhash_bottleneck(x,
                        bottleneck_bits,
                        bottleneck_noise,
                        discretize_warmup_steps,
                        mode,
                        isemhash_noise_dev=0.5,
                        isemhash_mix_prob=0.5):
  """Improved semantic hashing bottleneck."""
  with tf.variable_scope("isemhash_bottleneck"):
    x = tf.layers.dense(x, bottleneck_bits, name="dense")
    y = common_layers.saturating_sigmoid(x)
    if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:
      noise = tf.truncated_normal(
          common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev)
      y = common_layers.saturating_sigmoid(x + noise)
    d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y)
    d = 2.0 * d - 1.0  # Move from [0, 1] to [-1, 1].
    if mode == tf.estimator.ModeKeys.TRAIN:  # Flip some bits.
      noise = tf.random_uniform(common_layers.shape_list(x))
      noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
      d *= noise
      d = common_layers.mix(
          d,
          2.0 * y - 1.0,
          discretize_warmup_steps,
          mode == tf.estimator.ModeKeys.TRAIN,
          max_prob=isemhash_mix_prob)
    return d, 0.0 
Example #17
Source File: seq_nn_3d.py    From TNT with GNU General Public License v3.0 5 votes vote down vote up
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial) 
Example #18
Source File: example.py    From CRFasRNNLayer with MIT License 5 votes vote down vote up
def xavier_initializer_convolution(shape, dist='uniform', lambda_initializer=True):
    """
    Xavier initializer for N-D convolution patches. input_activations = patch_volume * in_channels;
    output_activations = patch_volume * out_channels; Uniform: lim = sqrt(3/(input_activations + output_activations))
    Normal: stddev =  sqrt(6/(input_activations + output_activations))
    :param shape: The shape of the convolution patch i.e. spatial_shape + [input_channels, output_channels]. The order of
    input_channels and output_channels is irrelevant, hence this can be used to initialize deconvolution parameters.
    :param dist: A string either 'uniform' or 'normal' determining the type of distribution
    :param lambda_initializer: Whether to return the initial actual values of the parameters (True) or placeholders that
    are initialized when the session is initiated
    :return: A numpy araray with the initial values for the parameters in the patch
    """
    s = len(shape) - 2
    num_activations = np.prod(shape[:s]) * np.sum(shape[s:])  # input_activations + output_activations
    if dist == 'uniform':
        lim = np.sqrt(6. / num_activations)
        if lambda_initializer:
            return np.random.uniform(-lim, lim, shape).astype(np.float32)
        else:
            return tf.random_uniform(shape, minval=-lim, maxval=lim)
    if dist == 'normal':
        stddev = np.sqrt(3. / num_activations)
        if lambda_initializer:
            return np.random.normal(0, stddev, shape).astype(np.float32)
        else:
            tf.truncated_normal(shape, mean=0, stddev=stddev)
    raise ValueError('Distribution must be either "uniform" or "normal".') 
Example #19
Source File: model_ops.py    From PADME with MIT License 5 votes vote down vote up
def fully_connected_layer(tensor,
                          size=None,
                          weight_init=None,
                          bias_init=None,
                          name=None):
  """Fully connected layer.

  Parameters
  ----------
  tensor: tf.Tensor
    Input tensor.
  size: int
    Number of output nodes for this layer.
  weight_init: float
    Weight initializer.
  bias_init: float
    Bias initializer.
  name: str
    Name for this op. Defaults to 'fully_connected'.

  Returns
  -------
  tf.Tensor:
    A new tensor representing the output of the fully connected layer.

  Raises
  ------
  ValueError
    If input tensor is not 2D.
  """
  if weight_init is None:
    num_features = tensor.get_shape()[-1].value
    weight_init = tf.truncated_normal([num_features, size], stddev=0.01)
  if bias_init is None:
    bias_init = tf.zeros([size])

  with tf.name_scope(name, 'fully_connected', [tensor]):
    w = tf.Variable(weight_init, name='w', dtype=tf.float32)
    b = tf.Variable(bias_init, name='b', dtype=tf.float32)
    return tf.nn.xw_plus_b(tensor, w, b) 
Example #20
Source File: linear.py    From TensorFlow-in-a-Nutshell with MIT License 5 votes vote down vote up
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=1)
    return tf.Variable(initial)

# dataset 
Example #21
Source File: layer.py    From LiTS---Liver-Tumor-Segmentation-Challenge with MIT License 5 votes vote down vote up
def weight_xavier_init(shape, n_inputs, n_outputs, activefunction='sigomd', uniform=True, variable_name=None):
    with tf.device('/cpu:0'):
        if activefunction == 'sigomd':
            if uniform:
                init_range = tf.sqrt(6.0 / (n_inputs + n_outputs))
                initial = tf.random_uniform(shape, -init_range, init_range)
                return tf.get_variable(name=variable_name, initializer=initial, trainable=True)
            else:
                stddev = tf.sqrt(2.0 / (n_inputs + n_outputs))
                initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev)
                return tf.get_variable(name=variable_name, initializer=initial, trainable=True)
        elif activefunction == 'relu':
            if uniform:
                init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * np.sqrt(2)
                initial = tf.random_uniform(shape, -init_range, init_range)
                return tf.get_variable(name=variable_name, initializer=initial, trainable=True)
            else:
                stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * np.sqrt(2)
                initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev)
                return tf.get_variable(name=variable_name, initializer=initial, trainable=True)
        elif activefunction == 'tan':
            if uniform:
                init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * 4
                initial = tf.random_uniform(shape, -init_range, init_range)
                return tf.get_variable(name=variable_name, initializer=initial, trainable=True)
            else:
                stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * 4
                initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev)
                return tf.get_variable(name=variable_name, initializer=initial, trainable=True)


# Bias initialization 
Example #22
Source File: demo.py    From StackGAN with MIT License 5 votes vote down vote up
def sample_encoded_context(embeddings, model, bAugmentation=True):
    '''Helper function for init_opt'''
    # Build conditioning augmentation structure for text embedding
    # under different variable_scope: 'g_net' and 'hr_g_net'
    c_mean_logsigma = model.generate_condition(embeddings)
    mean = c_mean_logsigma[0]
    if bAugmentation:
        # epsilon = tf.random_normal(tf.shape(mean))
        epsilon = tf.truncated_normal(tf.shape(mean))
        stddev = tf.exp(c_mean_logsigma[1])
        c = mean + stddev * epsilon
    else:
        c = mean
    return c 
Example #23
Source File: roi_pooling_op_test.py    From TFFRCNN with MIT License 5 votes vote down vote up
def weight_variable(shape):
  initial = tf.truncated_normal(shape, stddev=0.1)
  return tf.Variable(initial) 
Example #24
Source File: atomicnet_ops.py    From deepchem with MIT License 5 votes vote down vote up
def InitializeWeightsBiases(prev_layer_size,
                            size,
                            weights=None,
                            biases=None,
                            name=None):
  """Initializes weights and biases to be used in a fully-connected layer.

  Parameters
  ----------
  prev_layer_size: int
    Number of features in previous layer.
  size: int 
    Number of nodes in this layer.
  weights: tf.Tensor, optional (Default None)
    Weight tensor.
  biases: tf.Tensor, optional (Default None)
    Bias tensor.
  name: str 
    Name for this op, optional (Defaults to 'fully_connected' if None)

  Returns
  -------
  weights: tf.Variable
    Initialized weights.
  biases: tf.Variable
    Initialized biases.

  """

  if weights is None:
    weights = tf.truncated_normal([prev_layer_size, size], stddev=0.01)
  if biases is None:
    biases = tf.zeros([size])

  with tf.name_scope(name, 'fully_connected', [weights, biases]):
    w = tf.Variable(weights, name='w')
    b = tf.Variable(biases, name='b')
  return w, b 
Example #25
Source File: layers.py    From tensorflow-u-net with GNU General Public License v3.0 5 votes vote down vote up
def weight_variable(shape, stddev=0.1, name=None):
    return tf.Variable(initial_value=tf.truncated_normal(shape, stddev=stddev),
                       name=name,
                       dtype=tf.float32) 
Example #26
Source File: srez_model.py    From srez with MIT License 5 votes vote down vote up
def _glorot_initializer_conv2d(self, prev_units, num_units, mapsize, stddev_factor=1.0):
        """Initialization in the style of Glorot 2010.

        stddev_factor should be 1.0 for linear activations, and 2.0 for ReLUs"""

        stddev  = np.sqrt(stddev_factor / (np.sqrt(prev_units*num_units)*mapsize*mapsize))
        return tf.truncated_normal([mapsize, mapsize, prev_units, num_units],
                                    mean=0.0, stddev=stddev) 
Example #27
Source File: srez_model.py    From srez with MIT License 5 votes vote down vote up
def _glorot_initializer(self, prev_units, num_units, stddev_factor=1.0):
        """Initialization in the style of Glorot 2010.

        stddev_factor should be 1.0 for linear activations, and 2.0 for ReLUs"""
        stddev  = np.sqrt(stddev_factor / np.sqrt(prev_units*num_units))
        return tf.truncated_normal([prev_units, num_units],
                                    mean=0.0, stddev=stddev) 
Example #28
Source File: models.py    From adversarial_audio with MIT License 5 votes vote down vote up
def create_single_fc_model(fingerprint_input, model_settings, is_training):
  """Builds a model with a single hidden fully-connected layer.

  This is a very simple model with just one matmul and bias layer. As you'd
  expect, it doesn't produce very accurate results, but it is very fast and
  simple, so it's useful for sanity testing.

  Here's the layout of the graph:

  (fingerprint_input)
          v
      [MatMul]<-(weights)
          v
      [BiasAdd]<-(bias)
          v

  Args:
    fingerprint_input: TensorFlow node that will output audio feature vectors.
    model_settings: Dictionary of information about the model.
    is_training: Whether the model is going to be used for training.

  Returns:
    TensorFlow node outputting logits results, and optionally a dropout
    placeholder.
  """
  if is_training:
    dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
  fingerprint_size = model_settings['fingerprint_size']
  label_count = model_settings['label_count']
  weights = tf.Variable(
      tf.truncated_normal([fingerprint_size, label_count], stddev=0.001))
  bias = tf.Variable(tf.zeros([label_count]))
  logits = tf.matmul(fingerprint_input, weights) + bias
  if is_training:
    return logits, dropout_prob
  else:
    return logits 
Example #29
Source File: cnn.py    From anticipating-activities with MIT License 5 votes vote down vote up
def __weight_variable(self, shape, myName):
        initial = tf.truncated_normal(shape, stddev=0.1, name=myName)
        return tf.Variable(initial) 
Example #30
Source File: tf_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _weight_variable(shape,name=None):
    """weight_variable generates a weigh  t        variable of a given shape."""
    initial = tf.truncated_normal(shape, stddev=0.01)+0.01
    return tf.Variable(initial,name=name)