Python tensorflow.contrib.layers.python.layers.layers.batch_norm() Examples

The following are 30 code examples of tensorflow.contrib.layers.python.layers.layers.batch_norm(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.layers.python.layers.layers , or try the search function .
Example #1
Source File: tf_util.py    From LaneSegmentationNetwork with GNU Lesser General Public License v3.0 6 votes vote down vote up
def scale_conv2d(inputs, filters: tf.Tensor, bias=None, stride=list([1, 1, 1, 1]), padding='SAME',
                 initial_step=1, number_of_step=5, step_multiplier=1.25,
                 to_batch_norm=False, batch_norm_decay=0.997, is_training=True, activation_fn=None):
    _step = initial_step
    output = bilinear_conv2d(inputs, filters, _step, bias, padding, stride)

    for i in range(1, number_of_step):
        _step *= step_multiplier
        output += bilinear_conv2d(inputs, filters, _step, bias, padding, stride)
    output /= number_of_step

    if to_batch_norm:
        output = batch_norm(output, is_training, batch_norm_decay)

    if activation_fn is not None:
        output = activation_fn(output)

    return output 
Example #2
Source File: nets.py    From Machine-Learning-with-TensorFlow-1.x with MIT License 6 votes vote down vote up
def _fully_connected(input_data, num_output, name, relu=True):
    with tf.variable_scope(name) as scope:
        input_shape = input_data.get_shape()
        if input_shape.ndims == 5:
            dim = 1
            for d in input_shape[1:].as_list():
                dim *= d
            feed_in = tf.reshape(input_data, [-1, dim])
        else:
            feed_in, dim = (input_data, input_shape[-1].value)
        weights = tf.get_variable(name="weights", shape=[dim, num_output],
                                  regularizer=tf.contrib.layers.l2_regularizer(scale=0.0001),
                                  initializer=tf.truncated_normal_initializer(stddev=1e-1, dtype=tf.float32))
                                  #initializer=tf.contrib.layers.xavier_initializer(uniform=True))
        biases = tf.get_variable(name="biases", shape=[num_output], dtype=tf.float32,
                                 initializer=tf.constant_initializer(value=0.0))
        op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
        output = op(feed_in, weights, biases, name=scope.name)
        return batch_norm(output) 
Example #3
Source File: util.py    From predictron with MIT License 6 votes vote down vote up
def predictron_arg_scope(weight_decay=0.0001,
                         batch_norm_decay=0.997,
                         batch_norm_epsilon=1e-5,
                         batch_norm_scale=True):
  batch_norm_params = {
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'updates_collections': tf.GraphKeys.UPDATE_OPS,
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=None,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc 
Example #4
Source File: tf_util.py    From LaneSegmentationNetwork with GNU Lesser General Public License v3.0 6 votes vote down vote up
def multi_conv2d(inputs, filters: tf.Tensor, bias=None, stride=list([1, 1, 1, 1]),
                 padding='SAME', basis_rate=list([1, 3, 5]), to_batch_norm=False, batch_norm_decay=0.997,
                 is_training=True, activation_fn=None):
    _number_of_basis = len(basis_rate)
    if _number_of_basis < 2:
        raise ValueError('Number of basis_rate must be larger or equal than 2')

    output = conv2d(inputs, filters, bias, stride, padding)
    for idx, r in enumerate(basis_rate):
        output += atrous_conv2d(inputs, filters, r, bias, padding, stride)
    output /= _number_of_basis

    if to_batch_norm:
        output = batch_norm(output, is_training, batch_norm_decay)

    if activation_fn is not None:
        output = activation_fn(output)

    return output 
Example #5
Source File: extract_pool5.py    From zero-shot-gcn with MIT License 6 votes vote down vote up
def inception_arg_scope(is_training=True,
                        batch_norm_decay=0.997,
                        batch_norm_epsilon=1e-5,
                        batch_norm_scale=True):
    batch_norm_params = {
        'is_training': False,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'trainable': False,
        'updates_collections': tf.GraphKeys.UPDATE_OPS
    }
    with arg_scope(
            [slim.conv2d],
            weights_initializer=slim.variance_scaling_initializer(),
            trainable=is_training,
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
            return arg_sc 
Example #6
Source File: extract_pool5.py    From zero-shot-gcn with MIT License 6 votes vote down vote up
def resnet_arg_scope(is_training=True,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
    batch_norm_params = {
        'is_training': False,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'trainable': False,
        'updates_collections': tf.GraphKeys.UPDATE_OPS
    }
    with arg_scope(
            [slim.conv2d],
            weights_initializer=slim.variance_scaling_initializer(),
            trainable=is_training,
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
            return arg_sc 
Example #7
Source File: tf_util.py    From LaneSegmentationNetwork with GNU Lesser General Public License v3.0 6 votes vote down vote up
def conv2d(
        inputs, filters, bias=None,
        strides=list([1, 1, 1, 1]), padding='SAME', dilations=list([1, 1, 1, 1]),
        to_batch_norm=False, batch_norm_decay=0.997, is_training=True, activation_fn=None, name=None
):
    output = tf.nn.conv2d(
        input=inputs,
        filter=filters,
        strides=strides,
        padding=padding,
        dilations=dilations,
        name=name
    )

    if bias is not None:
        output = tf.nn.bias_add(output, bias)
    if to_batch_norm:
        output = batch_norm(output, is_training, batch_norm_decay)
    if activation_fn is not None:
        output = activation_fn(output)
    return output 
Example #8
Source File: inception_v2.py    From keras-lambda with MIT License 5 votes vote down vote up
def inception_v2_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars'):
  """Defines the default InceptionV2 arg scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_var_collection: The name of the collection for the batch norm
      variables.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': 0.9997,
      # epsilon to prevent 0s in variance.
      'epsilon': 0.001,
      # collection containing update_ops.
      'updates_collections': ops.GraphKeys.UPDATE_OPS,
      # collection containing the moving mean and moving variance.
      'variables_collections': {
          'beta': None,
          'gamma': None,
          'moving_mean': [batch_norm_var_collection],
          'moving_variance': [batch_norm_var_collection],
      }
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=nn_ops.relu,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc 
Example #9
Source File: slim_resnet_utils.py    From X-Detector with Apache License 2.0 5 votes vote down vote up
def resnet_v1_backbone(inputs,
              blocks,
              is_training=True,
              output_stride=None,
              include_root_block=True,
              reuse=None,
              scope=None):
  with variable_scope.variable_scope(
      scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    with arg_scope(
        [layers.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
        outputs_collections=end_points_collection):
      with arg_scope([layers.batch_norm], is_training=is_training):
        net = inputs
        if include_root_block:
          if output_stride is not None:
            if output_stride % 4 != 0:
              raise ValueError('The output_stride needs to be a multiple of 4.')
            output_stride /= 4
          net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
          net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope='pool1')
        net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
        # Convert end_points_collection into a dictionary of end_points.
        end_points = utils.convert_collection_to_dict(end_points_collection)

        return net, end_points 
Example #10
Source File: learning_test.py    From keras-lambda with MIT License 5 votes vote down vote up
def BatchNormClassifier(inputs):
  inputs = layers.batch_norm(inputs, decay=0.1)
  return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid) 
Example #11
Source File: trainer_test.py    From monopsr with MIT License 5 votes vote down vote up
def BatchNormClassifier(self, inputs):
        inputs = layers.batch_norm(inputs, decay=0.1, fused=None)
        return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid) 
Example #12
Source File: inception_v2.py    From lambda-packs with MIT License 5 votes vote down vote up
def inception_v2_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars'):
  """Defines the default InceptionV2 arg scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_var_collection: The name of the collection for the batch norm
      variables.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': 0.9997,
      # epsilon to prevent 0s in variance.
      'epsilon': 0.001,
      # collection containing update_ops.
      'updates_collections': ops.GraphKeys.UPDATE_OPS,
      # collection containing the moving mean and moving variance.
      'variables_collections': {
          'beta': None,
          'gamma': None,
          'moving_mean': [batch_norm_var_collection],
          'moving_variance': [batch_norm_var_collection],
      }
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=nn_ops.relu,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc 
Example #13
Source File: resnet_v1_noise.py    From RGB-N with MIT License 5 votes vote down vote up
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    # NOTE 'is_training' here does not work because inside resnet it gets reset:
    # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': cfg.RESNET.BN_TRAIN,
    'updates_collections': ops.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc 
Example #14
Source File: learning_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def BatchNormClassifier(inputs):
  inputs = layers.batch_norm(inputs, decay=0.1)
  return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid) 
Example #15
Source File: resnet_fusion_noise.py    From RGB-N with MIT License 5 votes vote down vote up
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    # NOTE 'is_training' here does not work because inside resnet it gets reset:
    # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': cfg.RESNET.BN_TRAIN,
    'updates_collections': ops.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc 
Example #16
Source File: extract_pool5.py    From zero-shot-gcn with MIT License 5 votes vote down vote up
def inception():
    image = tf.placeholder(tf.float32, [None, 224, 224, 3], 'image')
    with slim.arg_scope(inception_arg_scope(is_training=False)):
        with variable_scope.variable_scope(
                'InceptionV1', 'InceptionV1', [image, 1000], reuse=None) as scope:
            with arg_scope(
                    [layers_lib.batch_norm, layers_lib.dropout], is_training=False):
                net, end_points = inception_v1_base(image, scope=scope)
                with variable_scope.variable_scope('Logits'):
                    net_conv = layers_lib.avg_pool2d(
                        net, [7, 7], stride=1, scope='MaxPool_0a_7x7')
    print(net_conv.shape)

    return net_conv, image 
Example #17
Source File: model.py    From tf-hrnet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def forward_train(self, train_input):

        batch_norm_params = {'epsilon': 1e-5,
                             'scale': True,
                             'is_training': True,
                             'updates_collections': ops.GraphKeys.UPDATE_OPS}

        with slim.arg_scope([layers.batch_norm], **batch_norm_params):
            with slim.arg_scope([slim.conv2d],
                                weights_initializer=he_normal_fanout(),
                                weights_regularizer=slim.l2_regularizer(self.cfg['NET']['weight_l2_scale'])):
                final_logit = self._forward(train_input)

        return final_logit 
Example #18
Source File: model.py    From tf-hrnet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def forward_eval(self, eval_input):

        batch_norm_params = {'epsilon': 1e-5,
                             'scale': True,
                             'is_training': False,
                             'updates_collections': ops.GraphKeys.UPDATE_OPS}

        with slim.arg_scope([layers.batch_norm], **batch_norm_params):
            with slim.arg_scope([slim.conv2d],
                                weights_regularizer=slim.l2_regularizer(self.cfg['NET']['weight_l2_scale'])):
                final_logit = self._forward(eval_input)

        return final_logit 
Example #19
Source File: inception_v2.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def inception_v2_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars'):
  """Defines the default InceptionV2 arg scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_var_collection: The name of the collection for the batch norm
      variables.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': 0.9997,
      # epsilon to prevent 0s in variance.
      'epsilon': 0.001,
      # collection containing update_ops.
      'updates_collections': ops.GraphKeys.UPDATE_OPS,
      # collection containing the moving mean and moving variance.
      'variables_collections': {
          'beta': None,
          'gamma': None,
          'moving_mean': [batch_norm_var_collection],
          'moving_variance': [batch_norm_var_collection],
      }
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=nn_ops.relu,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc 
Example #20
Source File: resnet_fusion.py    From RGB-N with MIT License 5 votes vote down vote up
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    # NOTE 'is_training' here does not work because inside resnet it gets reset:
    # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': cfg.RESNET.BN_TRAIN,
    'updates_collections': ops.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc 
Example #21
Source File: tf_util.py    From LaneSegmentationNetwork with GNU Lesser General Public License v3.0 5 votes vote down vote up
def depthwise_conv2d(
        inputs, filters, bias=None,
        strides=list([1, 1, 1, 1]), padding='SAME', dilations=list([1, 1, 1, 1]),
        to_batch_norm=False, batch_norm_decay=0.997, is_training=True, activation_fn=None, name=None
):
    if isinstance(strides, int):
        strides = list([1, strides, strides, 1])
    if isinstance(dilations, int):
        dilations = list([1, dilations, dilations, 1])

    output = tf.nn.depthwise_conv2d(
        input=inputs,
        filter=filters,
        strides=strides,
        padding=padding,
        rate=dilations,
        name=name
    )

    if bias is not None:
        output = tf.nn.bias_add(output, bias)
    if to_batch_norm:
        output = batch_norm(output, is_training, batch_norm_decay)
    if activation_fn is not None:
        output = activation_fn(output)
    return output 
Example #22
Source File: trainer_test.py    From avod with MIT License 5 votes vote down vote up
def BatchNormClassifier(self, inputs):
        inputs = layers.batch_norm(inputs, decay=0.1, fused=None)
        return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid) 
Example #23
Source File: inception_v2.py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 5 votes vote down vote up
def inception_v2_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars'):
  """Defines the default InceptionV2 arg scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_var_collection: The name of the collection for the batch norm
      variables.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': 0.9997,
      # epsilon to prevent 0s in variance.
      'epsilon': 0.001,
      # collection containing update_ops.
      'updates_collections': ops.GraphKeys.UPDATE_OPS,
      # collection containing the moving mean and moving variance.
      'variables_collections': {
          'beta': None,
          'gamma': None,
          'moving_mean': [batch_norm_var_collection],
          'moving_variance': [batch_norm_var_collection],
      }
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=nn_ops.relu,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc 
Example #24
Source File: tf_util.py    From LaneSegmentationNetwork with GNU Lesser General Public License v3.0 5 votes vote down vote up
def batch_norm(x, is_training, decay=0.997, epsilon=1e-5, scale=True, center=True):
    normed = layers.batch_norm(
        x,
        decay=decay,
        epsilon=epsilon,
        is_training=is_training,
        center=center,
        scale=scale
    )
    return normed 
Example #25
Source File: resnet_v1.py    From Faster-RCNN-TensorFlow-Python3 with MIT License 5 votes vote down vote up
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    # NOTE 'is_training' here does not work because inside resnet it gets reset:
    # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': cfg.RESNET.BN_TRAIN,
    'updates_collections': ops.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc 
Example #26
Source File: resnet_v1.py    From GeetChinese_crack with MIT License 5 votes vote down vote up
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    # NOTE 'is_training' here does not work because inside resnet it gets reset:
    # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': cfg.RESNET.BN_TRAIN,
    'updates_collections': ops.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc 
Example #27
Source File: nets.py    From Machine-Learning-with-TensorFlow-1.x with MIT License 5 votes vote down vote up
def _conv3d(input_data, k_d, k_h, k_w, c_o, s_d, s_h, s_w, name, relu=True, padding="SAME"):
    c_i = input_data.get_shape()[-1].value
    convolve = lambda i, k: tf.nn.conv3d(i, k, [1, s_d, s_h, s_w, 1], padding=padding)
    with tf.variable_scope(name) as scope:
        weights = tf.get_variable(name="weights", shape=[k_d, k_h, k_w, c_i, c_o],
                                  regularizer=tf.contrib.layers.l2_regularizer(scale=0.0001),
                                  initializer=tf.truncated_normal_initializer(stddev=1e-1, dtype=tf.float32))
                                  #initializer=tf.contrib.layers.xavier_initializer(uniform=True))
        conv = convolve(input_data, weights)
        biases = tf.get_variable(name="biases", shape=[c_o], dtype=tf.float32,
                                 initializer=tf.constant_initializer(value=0.0))
        output = tf.nn.bias_add(conv, biases)
        if relu:
            output = tf.nn.relu(output, name=scope.name)
        return batch_norm(output) 
Example #28
Source File: trainer_test.py    From avod-ssd with MIT License 5 votes vote down vote up
def BatchNormClassifier(self, inputs):
        inputs = layers.batch_norm(inputs, decay=0.1, fused=None)
        return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid) 
Example #29
Source File: resnet_v1.py    From RGB-N with MIT License 5 votes vote down vote up
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    # NOTE 'is_training' here does not work because inside resnet it gets reset:
    # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': cfg.RESNET.BN_TRAIN,
    'updates_collections': ops.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc 
Example #30
Source File: resnet_utils.py    From video-to-pose3D with MIT License 4 votes vote down vote up
def resnet_arg_scope(is_training=True,
                     weight_decay=0.0001,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  """Defines the default ResNet arg scope.

  TODO(gpapan): The batch-normalization related default values above are
    appropriate for use in conjunction with the reference ResNet models
    released at https://github.com/KaimingHe/deep-residual-networks. When
    training ResNets from scratch, they might need to be tuned.

  Args:
    is_training: Whether or not we are training the parameters in the batch
      normalization layers of the model.
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_decay: The moving average decay when estimating layer activation
      statistics in batch normalization.
    batch_norm_epsilon: Small constant to prevent division by zero when
      normalizing activations by their variance in batch normalization.
    batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
      activations in the batch normalization layer.

  Returns:
    An `arg_scope` to use for the resnet models.
  """
  batch_norm_params = {
      'is_training': is_training,
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': ops.GraphKeys.UPDATE_OPS,
  }

  with arg_scope(
      [layers_lib.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params):
      # The following implies padding='SAME' for pool1, which makes feature
      # alignment easier for dense prediction tasks. This is also used in
      # https://github.com/facebook/fb.resnet.torch. However the accompanying
      # code of 'Deep Residual Learning for Image Recognition' uses
      # padding='VALID' for pool1. You can switch to that choice by setting
      # tf.contrib.framework.arg_scope([tf.contrib.layers.max_pool2d], padding='VALID').
      with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc