Python tensorflow.random_normal_initializer() Examples

The following are 30 code examples of tensorflow.random_normal_initializer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: pix2pix.py    From DeepLab_v3 with MIT License 6 votes vote down vote up
def pix2pix_arg_scope():
  """Returns a default argument scope for isola_net.

  Returns:
    An arg scope.
  """
  # These parameters come from the online port, which don't necessarily match
  # those in the paper.
  # TODO(nsilberman): confirm these values with Philip.
  instance_norm_params = {
      'center': True,
      'scale': True,
      'epsilon': 0.00001,
  }

  with tf.contrib.framework.arg_scope(
      [layers.conv2d, layers.conv2d_transpose],
      normalizer_fn=layers.instance_norm,
      normalizer_params=instance_norm_params,
      weights_initializer=tf.random_normal_initializer(0, 0.02)) as sc:
    return sc 
Example #2
Source File: utility.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def define_network(constructor, config, action_size):
  """Constructor for the recurrent cell for the algorithm.

  Args:
    constructor: Callable returning the network as RNNCell.
    config: Object providing configurations via attributes.
    action_size: Integer indicating the amount of action dimensions.

  Returns:
    Created recurrent cell object.
  """
  mean_weights_initializer = (
      tf.contrib.layers.variance_scaling_initializer(
          factor=config.init_mean_factor))
  logstd_initializer = tf.random_normal_initializer(
      config.init_logstd, 1e-10)
  network = constructor(
      config.policy_layers, config.value_layers, action_size,
      mean_weights_initializer=mean_weights_initializer,
      logstd_initializer=logstd_initializer)
  return network 
Example #3
Source File: universal_transformer_util.py    From fine-lm with MIT License 6 votes vote down vote up
def add_depth_embedding(x):
  """Add n-dimensional embedding as the depth embedding (timing signal).

  Adds embeddings to represent the position of the step in the recurrent
  tower.

  Args:
    x: a tensor with shape [max_step, batch, length, depth]

  Returns:
    a Tensor the same shape as x.
  """
  x_shape = common_layers.shape_list(x)
  depth = x_shape[-1]
  num_steps = x_shape[0]
  shape = [num_steps, 1, 1, depth]
  depth_embedding = (
      tf.get_variable(
          "depth_embedding",
          shape,
          initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth**
                                                                       0.5))

  x += depth_embedding
  return x 
Example #4
Source File: ops.py    From SSGAN-Tensorflow with MIT License 6 votes vote down vote up
def instance_norm(input):
    """
    Instance normalization
    """
    with tf.variable_scope('instance_norm'):
        num_out = input.get_shape()[-1]
        scale = tf.get_variable(
            'scale', [num_out],
            initializer=tf.random_normal_initializer(mean=1.0, stddev=0.02))
        offset = tf.get_variable(
            'offset', [num_out],
            initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02))
        mean, var = tf.nn.moments(input, axes=[1, 2], keep_dims=True)
        epsilon = 1e-6
        inv = tf.rsqrt(var + epsilon)
        return scale * (input - mean) * inv + offset 
Example #5
Source File: common_attention.py    From fine-lm with MIT License 6 votes vote down vote up
def get_layer_timing_signal_learned_1d(channels, layer, num_layers):
  """get n-dimensional embedding as the layer (vertical) timing signal.

  Adds embeddings to represent the position of the layer in the tower.

  Args:
    channels: dimension of the timing signal
    layer: layer num
    num_layers: total number of layers

  Returns:
    a Tensor of timing signals [1, 1, channels].
  """
  shape = [num_layers, 1, 1, channels]
  layer_embedding = (
      tf.get_variable(
          "layer_embedding",
          shape,
          initializer=tf.random_normal_initializer(0, channels**-0.5)) *
      (channels**0.5))
  return layer_embedding[layer, :, :, :] 
Example #6
Source File: resnet_model.py    From benchmarks with The Unlicense 6 votes vote down vote up
def resnet_backbone(image, num_blocks, group_func, block_func):
    """
    Sec 5.1: We adopt the initialization of [15] for all convolutional layers.
    TensorFlow does not have the true "MSRA init". We use variance_scaling as an approximation.
    """
    with argscope(Conv2D, use_bias=False,
                  kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')):
        l = Conv2D('conv0', image, 64, 7, strides=2, activation=BNReLU)
        l = MaxPooling('pool0', l, pool_size=3, strides=2, padding='SAME')
        l = group_func('group0', l, block_func, 64, num_blocks[0], 1)
        l = group_func('group1', l, block_func, 128, num_blocks[1], 2)
        l = group_func('group2', l, block_func, 256, num_blocks[2], 2)
        l = group_func('group3', l, block_func, 512, num_blocks[3], 2)
        l = GlobalAvgPooling('gap', l)
        logits = FullyConnected('linear', l, 1000,
                                kernel_initializer=tf.random_normal_initializer(stddev=0.01))
    """
    Sec 5.1:
    The 1000-way fully-connected layer is initialized by
    drawing weights from a zero-mean Gaussian with standard
    deviation of 0.01.
    """
    return logits 
Example #7
Source File: net.py    From progressive_growing_of_GANs with MIT License 6 votes vote down vote up
def conv2d(self, input_, n_filters, k_size, padding='same'):
        if not self.cfg.weight_scale:
            return tf.layers.conv2d(input_, n_filters, k_size, padding=padding)

        n_feats_in = input_.get_shape().as_list()[-1]
        fan_in = k_size * k_size * n_feats_in
        c = tf.constant(np.sqrt(2. / fan_in), dtype=tf.float32)
        kernel_init = tf.random_normal_initializer(stddev=1.)
        w_shape = [k_size, k_size, n_feats_in, n_filters]
        w = tf.get_variable('kernel', shape=w_shape, initializer=kernel_init)
        w = c * w
        strides = [1, 1, 1, 1]
        net = tf.nn.conv2d(input_, w, strides, padding=padding.upper())
        b = tf.get_variable('bias', [n_filters],
                            initializer=tf.constant_initializer(0.))
        net = tf.nn.bias_add(net, b)
        return net 
Example #8
Source File: resnet_model_reusable.py    From blackbox-attacks with MIT License 5 votes vote down vote up
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
    """Convolution."""
    with tf.variable_scope(name):
      n = filter_size * filter_size * out_filters
      kernel = tf.get_variable(
          'DW', [filter_size, filter_size, in_filters, out_filters],
          tf.float32, initializer=tf.random_normal_initializer(
              stddev=np.sqrt(2.0/n)))
      return tf.nn.conv2d(x, kernel, strides, padding='SAME') 
Example #9
Source File: cnn.py    From GroundeR with MIT License 5 votes vote down vote up
def deconv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
                 bias_term=True, weights_initializer=None, biases_initializer=None):
    # input_shape is [batch, in_height, in_width, in_channels]
    input_shape = bottom.get_shape().as_list()
    batch_size, input_height, input_width, input_dim = input_shape
    output_shape = [batch_size, input_height*stride, input_width*stride, output_dim]

    # weights and biases variables
    with tf.variable_scope(name):
        # initialize the variables
        if weights_initializer is None:
            weights_initializer = tf.random_normal_initializer()
        if bias_term and biases_initializer is None:
            biases_initializer = tf.constant_initializer(0.)

        # filter has shape [filter_height, filter_width, out_channels, in_channels]
        weights = tf.get_variable("weights",
            [kernel_size, kernel_size, output_dim, input_dim],
            initializer=weights_initializer)
        if bias_term:
            biases = tf.get_variable("biases", output_dim,
                initializer=biases_initializer)

    deconv = tf.nn.conv2d_transpose(bottom, filter=weights,
        output_shape=output_shape, strides=[1, stride, stride, 1],
        padding=padding)
    if bias_term:
        deconv = tf.nn.bias_add(deconv, biases)
    return deconv 
Example #10
Source File: madry_thin_model.py    From blackbox-attacks with MIT License 5 votes vote down vote up
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
    """Convolution."""
    with tf.variable_scope(name):
      n = filter_size * filter_size * out_filters
      kernel = tf.get_variable(
          'DW', [filter_size, filter_size, in_filters, out_filters],
          tf.float32, initializer=tf.random_normal_initializer(
              stddev=np.sqrt(2.0/n)))
      return tf.nn.conv2d(x, kernel, strides, padding='SAME') 
Example #11
Source File: cnn.py    From GroundeR with MIT License 5 votes vote down vote up
def conv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
               bias_term=True, weights_initializer=None, biases_initializer=None, input_dim=None):
    # input has shape [batch, in_height, in_width, in_channels]
    if input_dim is None:
        input_dim = bottom.get_shape().as_list()[-1]

    # weights and biases variables
    with tf.variable_scope(name):
        # initialize the variables
        if weights_initializer is None:
            weights_initializer = tf.random_normal_initializer()
        if bias_term and biases_initializer is None:
            biases_initializer = tf.constant_initializer(0.)

        # filter has shape [filter_height, filter_width, in_channels, out_channels]
        weights = tf.get_variable("weights",
            [kernel_size, kernel_size, input_dim, output_dim],
            initializer=weights_initializer)
        if bias_term:
            biases = tf.get_variable("biases", output_dim,
                initializer=biases_initializer)

    conv = tf.nn.conv2d(bottom, filter=weights,
        strides=[1, stride, stride, 1], padding=padding)
    if bias_term:
        conv = tf.nn.bias_add(conv, biases)
    return conv 
Example #12
Source File: discriminator.py    From dcnn_textvae with MIT License 5 votes vote down vote up
def __init__(self, encoder_rnn_output, temperature, is_training=True, ru=False):
        with tf.variable_scope("Discriminator_input"):
            self.encoder_rnn_output = encoder_rnn_output
            self.temperature = temperature

            self.is_training = is_training

        with tf.variable_scope("discriminator_linear1"):
            discriminator_W1 = tf.get_variable(name="discriminator_W1",
                                              shape=(FLAGS.RNN_SIZE, 100),
                                              dtype=tf.float32,
                                              initializer=tf.random_normal_initializer(stddev=0.1))
            discriminator_b1 = tf.get_variable(name="discriminator_b1",
                                              shape=(100),
                                              dtype=tf.float32)

        with tf.variable_scope("discriminator_linear2"):
            discriminator_W2 = tf.get_variable(name="discriminator_W2",
                                              shape=(100, FLAGS.LABEL_CLASS),
                                              dtype=tf.float32,
                                              initializer=tf.random_normal_initializer(stddev=0.1))
            discriminator_b2 = tf.get_variable(name="discriminator_b2",
                                              shape=(FLAGS.LABEL_CLASS),
                                              dtype=tf.float32)

        with tf.name_scope("hidden"):
            h = tf.nn.relu(tf.matmul(self.encoder_rnn_output, discriminator_W1) + discriminator_b1)

        with tf.name_scope("discriminator_output"):
            self.discriminator_logits = tf.matmul(h, discriminator_W2) + discriminator_b2
            self.discriminator_predict = tf.stop_gradient(tf.argmax(self.discriminator_logits, 1))
            self.discriminator_prob = tf.nn.softmax(self.discriminator_logits, name="discriminator_softmax")

        with tf.name_scope("sampling"):
            # unlabeled
            self.discriminator_sampling_onehot = self.gumbel_softmax(self.discriminator_logits, self.temperature) 
Example #13
Source File: encoder.py    From dcnn_textvae with MIT License 5 votes vote down vote up
def __init__(self, embedding, encoder_input_list,
                 is_training=True, ru=False):
        with tf.name_scope("encoder_input"):
            self.embedding = embedding
            self.encoder_input_list = encoder_input_list

            self.is_training = is_training

        with tf.variable_scope("encoder_rnn"):
            with tf.variable_scope("rnn_input_weight"):
                self.rnn_input_W = tf.get_variable(name="rnn_input_W",
                                                   shape=(FLAGS.EMBED_SIZE, FLAGS.RNN_SIZE),
                                                   dtype=tf.float32,
                                                   initializer=tf.random_normal_initializer(stddev=0.1))
                self.rnn_input_b = tf.get_variable(name="rnn_input_b",
                                                   shape=(FLAGS.RNN_SIZE),
                                                   dtype=tf.float32)

            with tf.variable_scope("encoder_rnn"):
                cell = tf.contrib.rnn.LayerNormBasicLSTMCell(FLAGS.RNN_SIZE)

                if self.is_training:
                    cell = tf.nn.rnn_cell.DropoutWrapper(cell,
                                                         output_keep_prob=FLAGS.ENCODER_DROPOUT_KEEP)

                self.cell = tf.contrib.rnn.MultiRNNCell([cell] * FLAGS.RNN_NUM)

                self.init_states = [cell.zero_state(FLAGS.BATCH_SIZE, tf.float32)
                                    for _ in range(FLAGS.RNN_NUM)]
                self.states = [tf.placeholder(tf.float32,
                                              (FLAGS.BATCH_SIZE),
                                              name="state")
                               for _ in range(FLAGS.RNN_NUM)]

        with tf.name_scope("encoder_rnn_output"):
            self.encoder_rnn_output = self.rnn_train_predict()


    # input text from dataset 
Example #14
Source File: cnn.py    From GroundeR with MIT License 5 votes vote down vote up
def fc_layer(name, bottom, output_dim, bias_term=True, weights_initializer=None,
             biases_initializer=None):
    # flatten bottom input
    # input has shape [batch, in_height, in_width, in_channels]
    shape = bottom.get_shape().as_list()
    input_dim = 1
    for d in shape[1:]:
        input_dim *= d
    flat_bottom = tf.reshape(bottom, [-1, input_dim])

    # weights and biases variables
    with tf.variable_scope(name):
        # initialize the variables
        if weights_initializer is None:
            weights_initializer = tf.random_normal_initializer()
        if bias_term and biases_initializer is None:
            biases_initializer = tf.constant_initializer(0.)

        # weights has shape [input_dim, output_dim]
        weights = tf.get_variable("weights", [input_dim, output_dim],
            initializer=weights_initializer)
        if bias_term:
            biases = tf.get_variable("biases", output_dim,
                initializer=biases_initializer)
    if bias_term:
        fc = tf.nn.xw_plus_b(flat_bottom, weights, biases)
    else:
        fc = tf.matmul(flat_bottom, weights)
    return fc 
Example #15
Source File: model.py    From DNA-GAN with MIT License 5 votes vote down vote up
def make_conv(self, name, X, shape, strides):
        with tf.variable_scope(name) as scope:
            W = tf.get_variable('W',
                                shape=shape,
                                initializer=tf.random_normal_initializer(stddev=0.02),
                                )
            return tf.nn.conv2d(X, W, strides=strides, padding='SAME') 
Example #16
Source File: vae.py    From e2c with Apache License 2.0 5 votes vote down vote up
def linear(x,output_dim):
  #w=tf.get_variable("w", [x.get_shape()[1], output_dim], initializer=tf.random_normal_initializer(mean=0.0, stddev=.01)) 
  w=tf.get_variable("w", [x.get_shape()[1], output_dim], initializer=orthogonal_initializer(1.1))
  b=tf.get_variable("b", [output_dim], initializer=tf.constant_initializer(0.0))
  return tf.matmul(x,w)+b 
Example #17
Source File: model.py    From DNA-GAN with MIT License 5 votes vote down vote up
def make_fc(self, name, X, out_dim):
        in_dim = X.get_shape().as_list()[-1]
        with tf.variable_scope(name) as scope:
            W = tf.get_variable('W',
                                shape=[in_dim, out_dim],
                                initializer=tf.random_normal_initializer(stddev=0.02),
                                )
            b = tf.get_variable('b',
                                shape=[out_dim],
                                initializer=tf.zeros_initializer(),
                                )
            return tf.add(tf.matmul(X, W), b) 
Example #18
Source File: resnet_model_reusable_wide.py    From blackbox-attacks with MIT License 5 votes vote down vote up
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
    """Convolution."""
    with tf.variable_scope(name):
      n = filter_size * filter_size * out_filters
      kernel = tf.get_variable(
          'DW', [filter_size, filter_size, in_filters, out_filters],
          tf.float32, initializer=tf.random_normal_initializer(
              stddev=np.sqrt(2.0/n)))
      return tf.nn.conv2d(x, kernel, strides, padding='SAME') 
Example #19
Source File: sphere_resnet_v1.py    From SphereNet with MIT License 5 votes vote down vote up
def get_conv_filter(self, shape, reg, stddev):
        init = tf.random_normal_initializer(stddev=stddev)
        if reg:
            regu = tf.contrib.layers.l2_regularizer(self.wd)
            filt = tf.get_variable('filter', shape, initializer=init,regularizer=regu)
        else:
            filt = tf.get_variable('filter', shape, initializer=init)

        return filt 
Example #20
Source File: spherenet.py    From SphereNet with MIT License 5 votes vote down vote up
def get_conv_filter(self, shape, reg, stddev):
        init = tf.random_normal_initializer(stddev=stddev)
        if reg:
            regu = tf.contrib.layers.l2_regularizer(self.wd)
            filt = tf.get_variable('filter', shape, initializer=init,regularizer=regu)
        else:
            filt = tf.get_variable('filter', shape, initializer=init)

        return filt 
Example #21
Source File: baseline_cnn.py    From SphereNet with MIT License 5 votes vote down vote up
def get_conv_filter(self, shape, reg, stddev):
        init = tf.random_normal_initializer(stddev=stddev)
        if reg:
            regu = tf.contrib.layers.l2_regularizer(self.wd)
            filt = tf.get_variable('filter', shape, initializer=init,regularizer=regu)
        else:
            filt = tf.get_variable('filter', shape, initializer=init)

        return filt 
Example #22
Source File: spherenet_linear_sphereconv.py    From SphereNet with MIT License 5 votes vote down vote up
def get_conv_filter(self, shape, reg, stddev):
        init = tf.random_normal_initializer(stddev=stddev)
        if reg:
            regu = tf.contrib.layers.l2_regularizer(self.wd)
            filt = tf.get_variable('filter', shape, initializer=init,regularizer=regu)
        else:
            filt = tf.get_variable('filter', shape, initializer=init)

        return filt 
Example #23
Source File: spherenet_sigmoid_sphereconv.py    From SphereNet with MIT License 5 votes vote down vote up
def get_conv_filter(self, shape, reg, stddev):
        init = tf.random_normal_initializer(stddev=stddev)
        if reg:
            regu = tf.contrib.layers.l2_regularizer(self.wd)
            filt = tf.get_variable('filter', shape, initializer=init,regularizer=regu)
        else:
            filt = tf.get_variable('filter', shape, initializer=init)

        return filt 
Example #24
Source File: spherenet_linear_sphereconv_wsoftmax.py    From SphereNet with MIT License 5 votes vote down vote up
def get_conv_filter(self, shape, reg, stddev):
        init = tf.random_normal_initializer(stddev=stddev)
        if reg:
            regu = tf.contrib.layers.l2_regularizer(self.wd)
            filt = tf.get_variable('filter', shape, initializer=init,regularizer=regu)
        else:
            filt = tf.get_variable('filter', shape, initializer=init)

        return filt 
Example #25
Source File: pix2pix.py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 5 votes vote down vote up
def deconv2d(name, x, out_nums, ksize, strides, padding="SAME"):
    b = tf.shape(x)[0]
    w = x.shape[2]
    h = x.shape[1]
    c = x.shape[3]
    kernel = tf.get_variable(name + "weight", shape=[ksize, ksize, out_nums, c], initializer=tf.random_normal_initializer(mean=0., stddev=0.02))
    bias = tf.get_variable(name + "bias", shape=[out_nums], initializer=tf.constant_initializer(0.))
    return tf.nn.conv2d_transpose(x, kernel, [b, h*strides, w*strides, out_nums], [1, strides, strides, 1], padding=padding)+bias 
Example #26
Source File: pix2pix.py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 5 votes vote down vote up
def conv2d(name, x, out_nums, ksize, strides, padding="SAME"):
    c = int(np.shape(x)[3])
    kernel = tf.get_variable(name+"weight", shape=[ksize, ksize, c, out_nums], initializer=tf.random_normal_initializer(mean=0., stddev=0.02))
    bias = tf.get_variable(name+"bias", shape=[out_nums], initializer=tf.constant_initializer(0.))
    return tf.nn.conv2d(x, kernel, [1, strides, strides, 1], padding) + bias 
Example #27
Source File: test.py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 5 votes vote down vote up
def fully_connected(name, x, out_nums=1):
    x_flatten = tf.layers.flatten(x)
    W = tf.get_variable(name+"weight", shape=[int(np.shape(x_flatten)[1]), out_nums], initializer=tf.random_normal_initializer(stddev=0.02))
    b = tf.get_variable(name+"bias", shape=[out_nums], initializer=tf.random_normal_initializer(stddev=0.02))
    return tf.matmul(x_flatten, W) + b 
Example #28
Source File: test.py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 5 votes vote down vote up
def deconv2d(name, x, out_nums, ksize, strides, padding="SAME"):
    b = tf.shape(x)[0]
    w = x.shape[2]
    h = x.shape[1]
    c = x.shape[3]
    kernel = tf.get_variable(name + "weight", shape=[ksize, ksize, out_nums, c], initializer=tf.random_normal_initializer(mean=0., stddev=0.02))
    bias = tf.get_variable(name + "bias", shape=[out_nums], initializer=tf.constant_initializer(0.))
    return tf.nn.conv2d_transpose(x, kernel, [b, h*strides, w*strides, out_nums], [1, strides, strides, 1], padding=padding)+bias 
Example #29
Source File: test.py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 5 votes vote down vote up
def conv2d(name, x, out_nums, ksize, strides, padding="SAME"):
    c = int(np.shape(x)[3])
    kernel = tf.get_variable(name+"weight", shape=[ksize, ksize, c, out_nums], initializer=tf.random_normal_initializer(mean=0., stddev=0.02))
    bias = tf.get_variable(name+"bias", shape=[out_nums], initializer=tf.constant_initializer(0.))
    return tf.nn.conv2d(x, kernel, [1, strides, strides, 1], padding) + bias 
Example #30
Source File: cyclegan.py    From DeepLab_v3 with MIT License 5 votes vote down vote up
def cyclegan_arg_scope(instance_norm_center=True,
                       instance_norm_scale=True,
                       instance_norm_epsilon=0.001,
                       weights_init_stddev=0.02,
                       weight_decay=0.0):
  """Returns a default argument scope for all generators and discriminators.

  Args:
    instance_norm_center: Whether instance normalization applies centering.
    instance_norm_scale: Whether instance normalization applies scaling.
    instance_norm_epsilon: Small float added to the variance in the instance
      normalization to avoid dividing by zero.
    weights_init_stddev: Standard deviation of the random values to initialize
      the convolution kernels with.
    weight_decay: Magnitude of weight decay applied to all convolution kernel
      variables of the generator.

  Returns:
    An arg-scope.
  """
  instance_norm_params = {
      'center': instance_norm_center,
      'scale': instance_norm_scale,
      'epsilon': instance_norm_epsilon,
  }

  weights_regularizer = None
  if weight_decay and weight_decay > 0.0:
    weights_regularizer = layers.l2_regularizer(weight_decay)

  with tf.contrib.framework.arg_scope(
      [layers.conv2d],
      normalizer_fn=layers.instance_norm,
      normalizer_params=instance_norm_params,
      weights_initializer=tf.random_normal_initializer(0, weights_init_stddev),
      weights_regularizer=weights_regularizer) as sc:
    return sc