Python tensorflow.contrib.layers.batch_norm() Examples

The following are 30 code examples of tensorflow.contrib.layers.batch_norm(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.layers , or try the search function .
Example #1
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testCorrectSourceOpsWithSkipConnection(self):
    inputs = tf.zeros([2, 4, 4, 3])
    x0 = layers.conv2d(
        inputs, num_outputs=8, kernel_size=3, activation_fn=None, scope='conv0')
    x1 = tf.nn.relu(layers.batch_norm(x0, scale=True, scope='bn0'))
    x1 = layers.conv2d(
        x1, num_outputs=8, kernel_size=3, activation_fn=None, scope='conv1')
    x2 = tf.add_n([x0, x1], name='add')
    final_op = tf.nn.relu(layers.batch_norm(x2, scale=True, scope='bn1'))

    op_handler_dict = self._default_op_handler_dict
    op_reg_manager = orm.OpRegularizerManager([final_op.op], op_handler_dict)

    # All ops are in the same group
    group = list(op_reg_manager._op_group_dict.values())[0]
    source_op_names = [s.op.name for s in group.source_op_slices]
    self.assertSetEqual(set(['bn0/FusedBatchNormV3', 'bn1/FusedBatchNormV3']),
                        set(source_op_names)) 
Example #2
Source File: build_resnet.py    From tensorflow-litterbox with Apache License 2.0 6 votes vote down vote up
def resnet_arg_scope(
        weight_decay=0.0001,
        batch_norm_decay=0.997,
        batch_norm_epsilon=1e-5,
        batch_norm_scale=True,
):
    batch_norm_params = {
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
    }
    l2_regularizer = layers.l2_regularizer(weight_decay)

    arg_scope_layers = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d, layers.fully_connected],
        weights_initializer=layers.variance_scaling_initializer(),
        weights_regularizer=l2_regularizer,
        activation_fn=tf.nn.relu)
    arg_scope_conv = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d],
        normalizer_fn=layers.batch_norm,
        normalizer_params=batch_norm_params)
    with arg_scope_layers, arg_scope_conv as arg_sc:
        return arg_sc 
Example #3
Source File: prnet.py    From LipReading with MIT License 6 votes vote down vote up
def resBlock(x, num_outputs, kernel_size=4, stride=1, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm,
    scope=None):
  assert num_outputs % 2 == 0  # num_outputs must be divided by channel_factor(2 here)
  with tf.variable_scope(scope, 'resBlock'):
    shortcut = x
    if stride != 1 or x.get_shape()[3] != num_outputs:
      shortcut = tcl.conv2d(shortcut, num_outputs, kernel_size=1, stride=stride,
        activation_fn=None, normalizer_fn=None, scope='shortcut')
    x = tcl.conv2d(x, num_outputs / 2, kernel_size=1, stride=1, padding='SAME')
    x = tcl.conv2d(x, num_outputs / 2, kernel_size=kernel_size, stride=stride, padding='SAME')
    x = tcl.conv2d(x, num_outputs, kernel_size=1, stride=1, activation_fn=None, padding='SAME', normalizer_fn=None)

    x += shortcut
    x = normalizer_fn(x)
    x = activation_fn(x)
  return x 
Example #4
Source File: tfm_builder_densenet.py    From Centripetal-SGD with Apache License 2.0 6 votes vote down vote up
def _batch_norm_default(self, bottom, scope, eps=1e-3, center=True, scale=True):
        if hasattr(self, 'bn_decay'):
            # print('bn decay factor: ', self.bn_decay)
            decay = self.bn_decay
        else:
            decay = 0.9
        if hasattr(self, 'need_gamma'):
            need_gamma = self.need_gamma
        else:
            need_gamma = scale
        if hasattr(self, 'need_beta'):
            need_beta = self.need_beta
        else:
            need_beta = center
        return batch_norm(inputs=bottom, decay=decay, center=need_beta, scale=need_gamma, activation_fn=None,
                   is_training=self.training, scope=scope, epsilon=eps) 
Example #5
Source File: gait_nn.py    From gait-recognition with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_arg_scope(is_training):
        weight_decay_l2 = 0.1
        batch_norm_decay = 0.999
        batch_norm_epsilon = 0.0001

        with slim.arg_scope([slim.conv2d, slim.fully_connected, layers.separable_convolution2d],
                            weights_regularizer = slim.l2_regularizer(weight_decay_l2),
                            biases_regularizer = slim.l2_regularizer(weight_decay_l2),
                            weights_initializer = layers.variance_scaling_initializer(),
                            ):
            batch_norm_params = {
                'decay': batch_norm_decay,
                'epsilon': batch_norm_epsilon
            }
            with slim.arg_scope([slim.batch_norm, slim.dropout],
                                is_training = is_training):
                with slim.arg_scope([slim.batch_norm],
                                    **batch_norm_params):
                    with slim.arg_scope([slim.conv2d, layers.separable_convolution2d, layers.fully_connected],
                                        activation_fn = tf.nn.elu,
                                        normalizer_fn = slim.batch_norm,
                                        normalizer_params = batch_norm_params) as scope:
                        return scope 
Example #6
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testAddN(self):
    inputs = tf.zeros([2, 4, 4, 3])
    identity1 = tf.identity(inputs)
    identity2 = tf.identity(inputs)
    identity3 = tf.identity(inputs)
    identity4 = tf.identity(inputs)
    add_n = tf.add_n([identity1, identity2, identity3, identity4])
    batch_norm = layers.batch_norm(add_n)

    manager = orm.OpRegularizerManager(
        [batch_norm.op], op_handler_dict=self._default_op_handler_dict)

    op_slices = manager.get_op_slices(identity1.op)
    self.assertLen(op_slices, 1)
    op_group = manager.get_op_group(op_slices[0]).op_slices

    # Verify all ops are in the same group.
    for test_op in (identity1.op, identity2.op, identity3.op, identity4.op,
                    add_n.op, batch_norm.op):
      test_op_slices = manager.get_op_slices(test_op)
      self.assertLen(test_op_slices, 1)
      self.assertIn(test_op_slices[0], op_group) 
Example #7
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testAddN_Duplicates(self):
    inputs = tf.zeros([2, 4, 4, 3])
    identity = tf.identity(inputs)
    add_n = tf.add_n([identity, identity, identity, identity])
    batch_norm = layers.batch_norm(add_n)

    manager = orm.OpRegularizerManager(
        [batch_norm.op], op_handler_dict=self._default_op_handler_dict)

    op_slices = manager.get_op_slices(identity.op)
    self.assertLen(op_slices, 1)
    op_group = manager.get_op_group(op_slices[0]).op_slices

    # Verify all ops are in the same group.
    for test_op in (identity.op, add_n.op, batch_norm.op):
      test_op_slices = manager.get_op_slices(test_op)
      self.assertLen(test_op_slices, 1)
      self.assertIn(test_op_slices[0], op_group) 
Example #8
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testProcessOpsLast(self):
    inputs = tf.zeros([2, 4, 4, 3])
    batch_norm = layers.batch_norm(inputs)
    identity1 = tf.identity(batch_norm)
    identity2 = tf.identity(batch_norm)

    manager = orm.OpRegularizerManager(
        [identity1.op, identity2.op],
        op_handler_dict=self._default_op_handler_dict)
    manager.process_ops([identity1.op])
    manager.process_ops_last([identity2.op, batch_norm.op])

    self.assertLen(manager._op_deque, 3)
    self.assertEqual(identity1.op, manager._op_deque.pop())
    self.assertEqual(identity2.op, manager._op_deque.pop())
    self.assertEqual(batch_norm.op, manager._op_deque.pop()) 
Example #9
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testProcessOps_DuplicatesRemoved(self):
    inputs = tf.zeros([2, 4, 4, 3])
    batch_norm = layers.batch_norm(inputs)
    identity1 = tf.identity(batch_norm)
    identity2 = tf.identity(batch_norm)

    manager = orm.OpRegularizerManager(
        [identity1.op, identity2.op],
        op_handler_dict=self._default_op_handler_dict)
    manager.process_ops([identity1.op, identity2.op, batch_norm.op])
    # Try to process the same ops again.
    manager.process_ops([identity1.op, identity2.op, batch_norm.op])

    self.assertLen(manager._op_deque, 3)
    self.assertEqual(batch_norm.op, manager._op_deque.pop())
    self.assertEqual(identity2.op, manager._op_deque.pop())
    self.assertEqual(identity1.op, manager._op_deque.pop()) 
Example #10
Source File: variational_dropout.py    From zhusuan with MIT License 6 votes vote down vote up
def var_dropout(x, n, net_size, n_particles, is_training):
    normalizer_params = {'is_training': is_training,
                         'updates_collections': None}
    bn = zs.BayesianNet()
    h = x
    for i, [n_in, n_out] in enumerate(zip(net_size[:-1], net_size[1:])):
        eps_mean = tf.ones([n, n_in])
        eps = bn.normal(
            'layer' + str(i) + '/eps', eps_mean, std=1.,
            n_samples=n_particles, group_ndims=1)
        h = layers.fully_connected(
            h * eps, n_out, normalizer_fn=layers.batch_norm,
            normalizer_params=normalizer_params)
        if i < len(net_size) - 2:
            h = tf.nn.relu(h)
    y = bn.categorical('y', h)
    bn.deterministic('y_logit', h)
    return bn 
Example #11
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testProcessOpsLast_DuplicatesRemoved(self):
    inputs = tf.zeros([2, 4, 4, 3])
    batch_norm = layers.batch_norm(inputs)
    identity1 = tf.identity(batch_norm)
    identity2 = tf.identity(batch_norm)

    manager = orm.OpRegularizerManager(
        [identity1.op, identity2.op],
        op_handler_dict=self._default_op_handler_dict)
    manager.process_ops([identity1.op])
    manager.process_ops_last([identity2.op, batch_norm.op])
    # Try to process the same ops again.
    manager.process_ops_last([identity2.op, batch_norm.op])

    self.assertLen(manager._op_deque, 3)
    self.assertEqual(identity1.op, manager._op_deque.pop())
    self.assertEqual(identity2.op, manager._op_deque.pop())
    self.assertEqual(batch_norm.op, manager._op_deque.pop()) 
Example #12
Source File: generator_conv.py    From VAE-GAN with MIT License 6 votes vote down vote up
def __call__(self, i):
		with tf.variable_scope(self.name):
			if self.reuse:
				tf.get_variable_scope().reuse_variables()
			else:
				assert tf.get_variable_scope().reuse is False
				self.reuse = True
			g = tcl.fully_connected(i, self.size * self.size * 1024, activation_fn=tf.nn.relu, 
									normalizer_fn=tcl.batch_norm)
			g = tf.reshape(g, (-1, self.size, self.size, 1024))  # size
			g = tcl.conv2d_transpose(g, 512, 3, stride=2, # size*2
									activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			g = tcl.conv2d_transpose(g, 256, 3, stride=2, # size*4
									activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			g = tcl.conv2d_transpose(g, 128, 3, stride=2, # size*8
									activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			
			g = tcl.conv2d_transpose(g, self.channel, 3, stride=2, # size*16
										activation_fn=tf.nn.sigmoid, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02))
			return g

		return x 
Example #13
Source File: SE_Inception_v4.py    From SENet-Tensorflow with MIT License 5 votes vote down vote up
def Batch_Normalization(x, training, scope):
    with arg_scope([batch_norm],
                   scope=scope,
                   updates_collections=None,
                   decay=0.9,
                   center=True,
                   scale=True,
                   zero_debias_moving_mean=True) :
        return tf.cond(training,
                       lambda : batch_norm(inputs=x, is_training=training, reuse=None),
                       lambda : batch_norm(inputs=x, is_training=training, reuse=True)) 
Example #14
Source File: gait_nn.py    From gait-recognition with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def residual_block(net, ch = 256, ch_inner = 128, scope = None, reuse = None, stride = 1):
        """
        Bottleneck v2
        """

        with slim.arg_scope([layers.convolution2d],
                            activation_fn = None,
                            normalizer_fn = None):
            with tf.variable_scope(scope, 'ResidualBlock', reuse = reuse):
                in_net = net

                if stride > 1:
                    net = layers.convolution2d(net, ch, kernel_size = 1, stride = stride)

                in_net = layers.batch_norm(in_net)
                in_net = tf.nn.relu(in_net)
                in_net = layers.convolution2d(in_net, ch_inner, 1)

                in_net = layers.batch_norm(in_net)
                in_net = tf.nn.relu(in_net)
                in_net = layers.convolution2d(in_net, ch_inner, 3, stride = stride)

                in_net = layers.batch_norm(in_net)
                in_net = tf.nn.relu(in_net)
                in_net = layers.convolution2d(in_net, ch, 1, activation_fn = None)

                net = tf.nn.relu(in_net + net)

        return net 
Example #15
Source File: discriminator_conv.py    From VAE-GAN with MIT License 5 votes vote down vote up
def __call__(self, x):
		with tf.variable_scope(self.name):
			if self.reuse:
				tf.get_variable_scope().reuse_variables()
			else:
				assert tf.get_variable_scope().reuse is False
				self.reuse = True

			end_points = {}
			
			size = 64
			shared = tcl.conv2d(x, num_outputs=size, kernel_size=4, # bzx64x64x3 -> bzx32x32x64
						weights_initializer=tf.random_normal_initializer(0, 0.02), 
						stride=2, activation_fn=lrelu)
			end_points['conv1'] = shared
			shared = tcl.conv2d(shared, num_outputs=size * 2, kernel_size=4, # 16x16x128
						weights_initializer=tf.random_normal_initializer(0, 0.02), 
						stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			end_points['conv2'] = shared
			shared = tcl.conv2d(shared, num_outputs=size * 4, kernel_size=4, # 8x8x256
						weights_initializer=tf.random_normal_initializer(0, 0.02), 
						stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			end_points['conv3'] = shared
			shared = tcl.conv2d(shared, num_outputs=size * 8, kernel_size=4, # 4x4x512
						weights_initializer=tf.random_normal_initializer(0, 0.02), 
						stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			end_points['conv4'] = shared
			shared = tcl.flatten(shared)
			d = tcl.fully_connected(shared, 1, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))
			return d 
Example #16
Source File: SE_ResNeXt.py    From SENet-Tensorflow with MIT License 5 votes vote down vote up
def Batch_Normalization(x, training, scope):
    with arg_scope([batch_norm],
                   scope=scope,
                   updates_collections=None,
                   decay=0.9,
                   center=True,
                   scale=True,
                   zero_debias_moving_mean=True) :
        return tf.cond(training,
                       lambda : batch_norm(inputs=x, is_training=training, reuse=None),
                       lambda : batch_norm(inputs=x, is_training=training, reuse=True)) 
Example #17
Source File: denseNet.py    From cnn_lstm_ctc_ocr_for_ICPR with GNU General Public License v3.0 5 votes vote down vote up
def Batch_Normalization(x,training,scope):
    with arg_scope([batch_norm],
                   scope=scope,
                   update_collections=None,
                   deacy=0.9,
                   center=True,
                   scale=True,
                   zero_debias_moving_mean=True):
        if training:
            return tf.layers.batch_normalization(x,axis=3,training=training,reuse=True)

        else:
            return tf.layers.batch_normalization(x,axis=3,training=training,reuse=True)


# def Batch_Normalization(x, training, scope):
#     with arg_scope([batch_norm],
#                    scope=scope,
#                    updates_collections=None,
#                    decay=0.9,
#                    center=True,
#                    scale=True,
#                    zero_debias_moving_mean=True) :
#         return tf.cond(training,
#                        lambda : batch_norm(inputs=x, is_training=training, reuse=None),
#                        lambda : batch_norm(inputs=x, is_training=training, reuse=True)) 
Example #18
Source File: utils.py    From TransferLearningClassification with MIT License 5 votes vote down vote up
def batch_norm_from_layers(name, inputs, trainable, data_format, mode,
                           use_gamma=True, use_beta=True, bn_epsilon=1e-5, bn_ema=0.9):
    from tensorflow.contrib.layers import batch_norm as bn

    # if using this, should be note that:
    # python
    # update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    # with tf.control_dependencies(update_ops):
    #     train_op = optimizer.minimize(loss)

    return bn(inputs, decay=bn_ema, center=use_gamma, scale=use_beta, epsilon=bn_epsilon,
              is_training=(mode=='train'), trainable=trainable, scope=name, data_format=data_format) 
Example #19
Source File: resnn.py    From web_page_classification with MIT License 5 votes vote down vote up
def BN_ReLU(self, net):
        # Batch Normalization and ReLU
        # 'gamma' is not used as the next layer is ReLU
        net = batch_norm(net,
                         center=True,
                         scale=False,
                         activation_fn=tf.nn.relu, )
        # net = tf.nn.relu(net)
        # activation summary ??
        self._activation_summary(net)
        return net 
Example #20
Source File: SE_Inception_resnet_v2.py    From SENet-Tensorflow with MIT License 5 votes vote down vote up
def Batch_Normalization(x, training, scope):
    with arg_scope([batch_norm],
                   scope=scope,
                   updates_collections=None,
                   decay=0.9,
                   center=True,
                   scale=True,
                   zero_debias_moving_mean=True) :
        return tf.cond(training,
                       lambda : batch_norm(inputs=x, is_training=training, reuse=None),
                       lambda : batch_norm(inputs=x, is_training=training, reuse=True)) 
Example #21
Source File: Densenet_Cifar10.py    From Densenet-Tensorflow with MIT License 5 votes vote down vote up
def Batch_Normalization(x, training, scope):
    with arg_scope([batch_norm],
                   scope=scope,
                   updates_collections=None,
                   decay=0.9,
                   center=True,
                   scale=True,
                   zero_debias_moving_mean=True) :
        return tf.cond(training,
                       lambda : batch_norm(inputs=x, is_training=training, reuse=None),
                       lambda : batch_norm(inputs=x, is_training=training, reuse=True)) 
Example #22
Source File: Densenet_MNIST.py    From Densenet-Tensorflow with MIT License 5 votes vote down vote up
def Batch_Normalization(x, training, scope):
    with arg_scope([batch_norm],
                   scope=scope,
                   updates_collections=None,
                   decay=0.9,
                   center=True,
                   scale=True,
                   zero_debias_moving_mean=True) :
        return tf.cond(training,
                       lambda : batch_norm(inputs=x, is_training=training, reuse=None),
                       lambda : batch_norm(inputs=x, is_training=training, reuse=True)) 
Example #23
Source File: vae.py    From TensorFlow-VAE-GAN-DRAW with Apache License 2.0 5 votes vote down vote up
def __init__(self, hidden_size, batch_size, learning_rate):
        self.input_tensor = tf.placeholder(
            tf.float32, [None, 28 * 28])

        with arg_scope([layers.conv2d, layers.conv2d_transpose],
                       activation_fn=tf.nn.elu,
                       normalizer_fn=layers.batch_norm,
                       normalizer_params={'scale': True}):
            with tf.variable_scope("model") as scope:
                encoded = encoder(self.input_tensor, hidden_size * 2)

                mean = encoded[:, :hidden_size]
                stddev = tf.sqrt(tf.exp(encoded[:, hidden_size:]))

                epsilon = tf.random_normal([tf.shape(mean)[0], hidden_size])
                input_sample = mean + epsilon * stddev

                output_tensor = decoder(input_sample)

            with tf.variable_scope("model", reuse=True) as scope:
                self.sampled_tensor = decoder(tf.random_normal(
                    [batch_size, hidden_size]))

        vae_loss = self.__get_vae_cost(mean, stddev)
        rec_loss = self.__get_reconstruction_cost(
            output_tensor, self.input_tensor)

        loss = vae_loss + rec_loss
        self.train = layers.optimize_loss(loss, tf.contrib.framework.get_or_create_global_step(
        ), learning_rate=learning_rate, optimizer='Adam', update_ops=[])

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer()) 
Example #24
Source File: gan.py    From TensorFlow-VAE-GAN-DRAW with Apache License 2.0 5 votes vote down vote up
def __init__(self, hidden_size, batch_size, learning_rate):
        self.input_tensor = tf.placeholder(tf.float32, [None, 28 * 28])

        with arg_scope([layers.conv2d, layers.conv2d_transpose],
                       activation_fn=concat_elu,
                       normalizer_fn=layers.batch_norm,
                       normalizer_params={'scale': True}):
            with tf.variable_scope("model"):
                D1 = discriminator(self.input_tensor)  # positive examples
                D_params_num = len(tf.trainable_variables())
                G = decoder(tf.random_normal([batch_size, hidden_size]))
                self.sampled_tensor = G

            with tf.variable_scope("model", reuse=True):
                D2 = discriminator(G)  # generated examples

        D_loss = self.__get_discrinator_loss(D1, D2)
        G_loss = self.__get_generator_loss(D2)

        params = tf.trainable_variables()
        D_params = params[:D_params_num]
        G_params = params[D_params_num:]
        #    train_discrimator = optimizer.minimize(loss=D_loss, var_list=D_params)
        # train_generator = optimizer.minimize(loss=G_loss, var_list=G_params)
        global_step = tf.contrib.framework.get_or_create_global_step()
        self.train_discrimator = layers.optimize_loss(
            D_loss, global_step, learning_rate / 10, 'Adam', variables=D_params, update_ops=[])
        self.train_generator = layers.optimize_loss(
            G_loss, global_step, learning_rate, 'Adam', variables=G_params, update_ops=[])

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer()) 
Example #25
Source File: text_rnn.py    From Multi-Label-Text-Classification with Apache License 2.0 5 votes vote down vote up
def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(scope or type(self).__name__, reuse=self._reuse):
            c, h = state
            input_size = inputs.get_shape().as_list()[1]
            W_xh = tf.get_variable('W_xh',
                                   [input_size, 4 * self._num_units],
                                   initializer=orthogonal_initializer())
            W_hh = tf.get_variable('W_hh',
                                   [self._num_units, 4 * self._num_units],
                                   initializer=bn_lstm_identity_initializer(0.95))
            bias = tf.get_variable('bias', [4 * self._num_units])

            xh = tf.matmul(inputs, W_xh)
            hh = tf.matmul(h, W_hh)

            bn_xh = batch_norm(xh, self._is_training)
            bn_hh = batch_norm(hh, self._is_training)

            hidden = bn_xh + bn_hh + bias

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i, j, f, o = array_ops.split(value=hidden, num_or_size_splits=4, axis=1)

            new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))
            bn_new_c = batch_norm(new_c, 'c', self._is_training)
            new_h = self._activation(bn_new_c) * sigmoid(o)
            new_state = rnn.LSTMStateTuple(new_c, new_h)

            return new_h, new_state 
Example #26
Source File: models_collection.py    From SketchySceneColorization with MIT License 5 votes vote down vote up
def image_encoder_residual(x, num_residual_units, num_classes, reuse=False, data_format='NCHW', labels=None, scope_name=None):
    """
    :param x: [batch_size, 3, H, W]
    :return:
    """
    assert data_format == 'NCHW'
    size = SIZE

    if normalizer_params_e is not None and normalizer_fn_e != ly.batch_norm and normalizer_fn_e != ly.layer_norm:
        normalizer_params_e['labels'] = labels
        normalizer_params_e['n_labels'] = num_classes

    output_list = []

    # encoder_1: [batch, 3, 192, 192] => [batch, 64, 96, 96]
    with tf.variable_scope("encoder_1"):
        output = nchw_conv_ex(x, size, stride=2, filter_size=7)
        output = batchnorm(output, data_format=data_format)
        output = lrelu(output, 0.2)
        output_list.append(output)

    layer_specs = [
        size * 2,  # encoder_2: [batch, 64, 96, 96] => [batch, 128, 48, 48]
        size * 4,  # encoder_3: [batch, 128, 48, 48] => [batch, 256, 24, 24]
        size * 8,  # encoder_4: [batch, 256, 24, 24] => [batch, 512, 12, 12]
        size * 8,  # encoder_5: [batch, 512, 12, 12] => [batch, 512, 6, 6]
    ]
    for encoder_layer, (out_channels) in enumerate(layer_specs):
        with tf.variable_scope("encoder_%d_0" % (len(output_list) + 1)):
            output = bottleneck_residual_en(output_list[-1], out_channels, stride=2)
        for uId in range(1, num_residual_units[encoder_layer]):
            with tf.variable_scope("encoder_%d_%d" % (len(output_list) + 1, uId)):
                output = bottleneck_residual_pu(output, out_channels, True)
        output_list.append(output)

    return output_list 
Example #27
Source File: text_sann.py    From Multi-Label-Text-Classification with Apache License 2.0 5 votes vote down vote up
def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(scope or type(self).__name__, reuse=self._reuse):
            c, h = state
            input_size = inputs.get_shape().as_list()[1]
            W_xh = tf.get_variable('W_xh',
                                   [input_size, 4 * self._num_units],
                                   initializer=orthogonal_initializer())
            W_hh = tf.get_variable('W_hh',
                                   [self._num_units, 4 * self._num_units],
                                   initializer=bn_lstm_identity_initializer(0.95))
            bias = tf.get_variable('bias', [4 * self._num_units])

            xh = tf.matmul(inputs, W_xh)
            hh = tf.matmul(h, W_hh)

            bn_xh = batch_norm(xh, self._is_training)
            bn_hh = batch_norm(hh, self._is_training)

            hidden = bn_xh + bn_hh + bias

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i, j, f, o = array_ops.split(value=hidden, num_or_size_splits=4, axis=1)

            new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))
            bn_new_c = batch_norm(new_c, 'c', self._is_training)
            new_h = self._activation(bn_new_c) * sigmoid(o)
            new_state = rnn.LSTMStateTuple(new_c, new_h)

            return new_h, new_state 
Example #28
Source File: dg_mnist.py    From deligan with MIT License 5 votes vote down vote up
def discriminator(image, Reuse=False):
    with tf.variable_scope('disc', reuse=Reuse):
        image = tf.reshape(image, [-1, 28, 28, 1])
        h0 = lrelu(conv(image, 5, 5, 1, df_dim, stridex=2, stridey=2, name='d_h0_conv'))
        h1 = lrelu( batch_norm(conv(h0, 5, 5, df_dim,df_dim*2,stridex=2,stridey=2,name='d_h1_conv'), decay=0.9, scale=True, updates_collections=None, is_training=phase_train, reuse=Reuse, scope='d_bn1'))
        h2 = lrelu(batch_norm(conv(h1, 3, 3, df_dim*2, df_dim*4, stridex=2, stridey=2,name='d_h2_conv'), decay=0.9,scale=True, updates_collections=None, is_training=phase_train, reuse=Reuse, scope='d_bn2'))
        h3 = tf.nn.max_pool(h2, ksize=[1,4,4,1], strides=[1,1,1,1],padding='VALID')
        h6 = tf.reshape(h2,[-1, 4*4*df_dim*4])
        h7 = Minibatch_Discriminator(h3, num_kernels=df_dim*4, name = 'd_MD')
        h8 = dense(tf.reshape(h7, [batchsize, -1]), df_dim*4*2, 1, scope='d_h8_lin')
        return tf.nn.sigmoid(h8), h8 
Example #29
Source File: text_sann.py    From Text-Pairs-Relation-Classification with Apache License 2.0 5 votes vote down vote up
def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(scope or type(self).__name__, reuse=self._reuse):
            c, h = state
            input_size = inputs.get_shape().as_list()[1]
            W_xh = tf.get_variable('W_xh',
                                   [input_size, 4 * self._num_units],
                                   initializer=orthogonal_initializer())
            W_hh = tf.get_variable('W_hh',
                                   [self._num_units, 4 * self._num_units],
                                   initializer=bn_lstm_identity_initializer(0.95))
            bias = tf.get_variable('bias', [4 * self._num_units])

            xh = tf.matmul(inputs, W_xh)
            hh = tf.matmul(h, W_hh)

            bn_xh = batch_norm(xh, self._is_training)
            bn_hh = batch_norm(hh, self._is_training)

            hidden = bn_xh + bn_hh + bias

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i, j, f, o = array_ops.split(value=hidden, num_or_size_splits=4, axis=1)

            new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))
            bn_new_c = batch_norm(new_c, 'c', self._is_training)
            new_h = self._activation(bn_new_c) * sigmoid(o)
            new_state = rnn.LSTMStateTuple(new_c, new_h)

            return new_h, new_state 
Example #30
Source File: layers.py    From BicycleGAN with MIT License 5 votes vote down vote up
def bn_layer(x, is_training, scope):
	return layers.batch_norm(x, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=is_training, scope=scope)

# Function for 2D convolutional layer