Python tensorflow.contrib.slim.separable_conv2d() Examples

The following are 30 code examples of tensorflow.contrib.slim.separable_conv2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.slim , or try the search function .
Example #1
Source File: base_memory.py    From iter-reason with MIT License 6 votes vote down vote up
def _context(self, net, is_training, name, iter):
    num_layers = cfg.MEM.CT_L
    xavier = tf.contrib.layers.variance_scaling_initializer()

    assert num_layers % 2 == 1
    conv = cfg.MEM.CT_CONV
    with tf.variable_scope(name):
      with slim.arg_scope([slim.conv2d, slim.separable_conv2d], 
                          activation_fn=None, 
                          trainable=is_training,
                          weights_initializer=xavier,
                          biases_initializer=tf.constant_initializer(0.0)):
        net = self._context_conv(net, cfg.MEM.CT_FCONV, "conv1")
        for i in xrange(2, num_layers+1, 2):
          net1 = tf.nn.relu(net, name="relu%02d" % (i-1))
          self._act_summaries.append(net1)
          self._score_summaries[iter].append(net1)
          net1 = self._context_conv(net1, conv, "conv%02d" % i)
          net2 = tf.nn.relu(net1, name="relu%02d" % i)
          self._act_summaries.append(net2)
          self._score_summaries[iter].append(net2)
          net2 = self._context_conv(net2, conv, "conv%02d" % (i+1))
          net = tf.add(net, net2, "residual%02d" % i)

    return net 
Example #2
Source File: mobilenet_v2.py    From R2CNN_Faster-RCNN_Tensorflow with MIT License 6 votes vote down vote up
def mobilenetv2_scope(is_training=True,
                      trainable=True,
                      weight_decay=0.00004,
                      stddev=0.09,
                      dropout_keep_prob=0.8,
                      bn_decay=0.997):
  """Defines Mobilenet training scope.
  In default. We do not use BN

  ReWrite the scope.
  """
  batch_norm_params = {
      'is_training': False,
      'trainable': False,
      'decay': bn_decay,
  }
  with slim.arg_scope(training_scope(is_training=is_training, weight_decay=weight_decay)):
      with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.separable_conv2d],
                          trainable=trainable):
          with slim.arg_scope([slim.batch_norm], **batch_norm_params) as sc:
              return sc 
Example #3
Source File: mobilenet_v2.py    From RetinaNet_Tensorflow_Rotation with MIT License 6 votes vote down vote up
def mobilenetv2_scope(is_training=True,
                      trainable=True,
                      weight_decay=0.00004,
                      stddev=0.09,
                      dropout_keep_prob=0.8,
                      bn_decay=0.997):
  """Defines Mobilenet training scope.
  In default. We do not use BN

  ReWrite the scope.
  """
  batch_norm_params = {
      'is_training': False,
      'trainable': False,
      'decay': bn_decay,
  }
  with slim.arg_scope(training_scope(is_training=is_training, weight_decay=weight_decay)):
      with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.separable_conv2d],
                          trainable=trainable):
          with slim.arg_scope([slim.batch_norm], **batch_norm_params) as sc:
              return sc 
Example #4
Source File: mobilenet_v2_r3det_plusplus.py    From R3Det_Tensorflow with MIT License 6 votes vote down vote up
def mobilenetv2_scope(is_training=True,
                      trainable=True,
                      weight_decay=0.00004,
                      stddev=0.09,
                      dropout_keep_prob=0.8,
                      bn_decay=0.997):
  """Defines Mobilenet training scope.
  In default. We do not use BN
  ReWrite the scope.
  """
  batch_norm_params = {
      'is_training': False,
      'trainable': False,
      'decay': bn_decay,
  }
  with slim.arg_scope(training_scope(is_training=is_training, weight_decay=weight_decay)):
      with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.separable_conv2d],
                          trainable=trainable):
          with slim.arg_scope([slim.batch_norm], **batch_norm_params) as sc:
              return sc 
Example #5
Source File: mobilenet_v2.py    From R3Det_Tensorflow with MIT License 6 votes vote down vote up
def mobilenetv2_scope(is_training=True,
                      trainable=True,
                      weight_decay=0.00004,
                      stddev=0.09,
                      dropout_keep_prob=0.8,
                      bn_decay=0.997):
  """Defines Mobilenet training scope.
  In default. We do not use BN
  ReWrite the scope.
  """
  batch_norm_params = {
      'is_training': False,
      'trainable': False,
      'decay': bn_decay,
  }
  with slim.arg_scope(training_scope(is_training=is_training, weight_decay=weight_decay)):
      with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.separable_conv2d],
                          trainable=trainable):
          with slim.arg_scope([slim.batch_norm], **batch_norm_params) as sc:
              return sc 
Example #6
Source File: mobilenetv2.py    From mobilenetv2 with MIT License 6 votes vote down vote up
def mobilenet_v2_arg_scope(weight_decay, is_training=True, depth_multiplier=1.0, regularize_depthwise=False,
                           dropout_keep_prob=1.0):

    regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
    if regularize_depthwise:
        depthwise_regularizer = regularizer
    else:
        depthwise_regularizer = None

    with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                        activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm,
                        normalizer_params={'is_training': is_training, 'center': True, 'scale': True }):

        with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):

            with slim.arg_scope([slim.separable_conv2d],
                                weights_regularizer=depthwise_regularizer, depth_multiplier=depth_multiplier):

                with slim.arg_scope([slim.dropout], is_training=is_training, keep_prob=dropout_keep_prob) as sc:

                    return sc 
Example #7
Source File: mobilenet_v2.py    From R2CNN-Plus-Plus_Tensorflow with MIT License 6 votes vote down vote up
def mobilenetv2_scope(is_training=True,
                      trainable=True,
                      weight_decay=0.00004,
                      stddev=0.09,
                      dropout_keep_prob=0.8,
                      bn_decay=0.997):
  """Defines Mobilenet training scope.
  In default. We do not use BN

  ReWrite the scope.
  """
  batch_norm_params = {
      'is_training': False,
      'trainable': False,
      'decay': bn_decay,
  }
  with slim.arg_scope(training_scope(is_training=is_training, weight_decay=weight_decay)):
      with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.separable_conv2d],
                          trainable=trainable):
          with slim.arg_scope([slim.batch_norm], **batch_norm_params) as sc:
              return sc 
Example #8
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 6 votes vote down vote up
def test_slim_dilated_depthwise_conv(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
          name='test_slim_separable_conv2d/input')
      with slim.arg_scope([slim.separable_conv2d], padding='SAME',
          weights_initializer=tf.truncated_normal_initializer(stddev=0.3)):
        net = slim.separable_conv2d(inputs,
            num_outputs=None,
            stride=1,
            depth_multiplier=1,
            kernel_size=[3, 3],
            rate=2,
            scope='conv1')

    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_separable_conv2d/input:0":[1,16,16,3]},
        output_name, delta=1e-2) 
Example #9
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_slim_separable_conv(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
          name='test_slim_separable_conv2d/input')
      with slim.arg_scope([slim.separable_conv2d], padding='SAME',
          weights_initializer=tf.truncated_normal_initializer(stddev=0.3)):
        net = slim.separable_conv2d(inputs, 2, [5, 5], 2, scope='conv1')

    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_separable_conv2d/input:0":[1,16,16,3]},
        output_name, delta=1e-2) 
Example #10
Source File: configurable_ops.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def _get_num_outputs_kwarg_name(self, function):
    """Gets the `num_outputs`-equivalent kwarg for a supported function."""
    alt_num_outputs_kwarg = {
        tf_layers.conv2d: 'filters',
        tf_layers.separable_conv2d: 'filters',
        tf_layers.dense: 'units',
    }
    return alt_num_outputs_kwarg.get(function, _DEFAULT_NUM_OUTPUTS_KWARG) 
Example #11
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_slim_deconv(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
          name='test_slim_decconv2d/input')
      with slim.arg_scope([slim.separable_conv2d], padding='SAME',
          weights_initializer=tf.truncated_normal_initializer(stddev=0.3)):
        net = slim.conv2d_transpose(inputs, 2, [3, 3], scope='conv1')

    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_decconv2d/input:0":[1,16,16,3]},
        output_name, delta=1e-2)

  # TODO - this fails due to unsupported op "Tile" 
Example #12
Source File: mobilenet_v1.py    From tf-faster-rcnn with MIT License 5 votes vote down vote up
def separable_conv2d_same(inputs, kernel_size, stride, rate=1, scope=None):
  """Strided 2-D separable convolution with 'SAME' padding.
  Args:
    inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
    kernel_size: An int with the kernel_size of the filters.
    stride: An integer, the output stride.
    rate: An integer, rate for atrous convolution.
    scope: Scope.
  Returns:
    output: A 4-D tensor of size [batch, height_out, width_out, channels] with
      the convolution output.
  """

  # By passing filters=None
  # separable_conv2d produces only a depth-wise convolution layer
  if stride == 1:
    return slim.separable_conv2d(inputs, None, kernel_size, 
                                  depth_multiplier=1, stride=1, rate=rate,
                                  padding='SAME', scope=scope)
  else:
    kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
    pad_total = kernel_size_effective - 1
    pad_beg = pad_total // 2
    pad_end = pad_total - pad_beg
    inputs = tf.pad(inputs,
                    [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
    return slim.separable_conv2d(inputs, None, kernel_size, 
                                  depth_multiplier=1, stride=stride, rate=rate, 
                                  padding='VALID', scope=scope)

# The following is adapted from:
# https://github.com/tensorflow/models/blob/master/slim/nets/mobilenet_v1.py

# Conv and DepthSepConv named tuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
# stride is the stride of the convolution
# depth is the number of channels or filters in a layer 
Example #13
Source File: mobilenet_v1.py    From tf-faster-rcnn with MIT License 5 votes vote down vote up
def mobilenet_v1_arg_scope(is_training=True,
                           stddev=0.09):
  batch_norm_params = {
      'is_training': False,
      'center': True,
      'scale': True,
      'decay': 0.9997,
      'epsilon': 0.001,
      'trainable': False,
  }

  # Set weight_decay for weights in Conv and DepthSepConv layers.
  weights_init = tf.truncated_normal_initializer(stddev=stddev)
  regularizer = tf.contrib.layers.l2_regularizer(cfg.MOBILENET.WEIGHT_DECAY)
  if cfg.MOBILENET.REGU_DEPTH:
    depthwise_regularizer = regularizer
  else:
    depthwise_regularizer = None

  with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                      trainable=is_training,
                      weights_initializer=weights_init,
                      activation_fn=tf.nn.relu6, 
                      normalizer_fn=slim.batch_norm,
                      padding='SAME'):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
      with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
        with slim.arg_scope([slim.separable_conv2d],
                            weights_regularizer=depthwise_regularizer) as sc:
          return sc 
Example #14
Source File: configurable_ops.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def separable_conv2d(self, *args, **kwargs):
    """Masks NUM_OUTPUTS from the function pointed to by 'separable_conv2d'.

    The object's parameterization has precedence over the given NUM_OUTPUTS
    argument. The resolution of the op names uses
    tf.contrib.framework.get_name_scope() and kwargs['scope'].

    Args:
      *args: Arguments for the operation.
      **kwargs: Key arguments for the operation.

    Returns:
      The result of the application of the function_map['separable_conv2d'] to
      the given 'inputs', '*args', and '**kwargs' while possibly overriding
      NUM_OUTPUTS according the parameterization.

    Raises:
      ValueError: If kwargs does not contain a key named 'scope'.
    """
    # This function actually only decorates the num_outputs of the Conv2D after
    # the depthwise convolution, as the former does not have any free params.
    fn, suffix = self._get_function_and_suffix('separable_conv2d')
    num_outputs_kwarg_name = self._get_num_outputs_kwarg_name(fn)
    num_outputs = _get_from_args_or_kwargs(
        num_outputs_kwarg_name, 1, args, kwargs, False)
    if num_outputs is None:
      tf.logging.warning(
          'Trying to decorate separable_conv2d with num_outputs = None')
      kwargs[num_outputs_kwarg_name] = None

    return self._mask(fn, suffix, *args, **kwargs) 
Example #15
Source File: mobilenetv2.py    From mobilenetv2 with MIT License 5 votes vote down vote up
def block(net, input_filters, output_filters, expansion, stride):
    res_block = net
    res_block = slim.conv2d(inputs=res_block, num_outputs=input_filters * expansion, kernel_size=[1, 1])
    res_block = slim.separable_conv2d(inputs=res_block, num_outputs=None, kernel_size=[3, 3], stride=stride)
    res_block = slim.conv2d(inputs=res_block, num_outputs=output_filters, kernel_size=[1, 1], activation_fn=None)
    if stride == 2:
        return res_block
    else:
        if input_filters != output_filters:
            net = slim.conv2d(inputs=net, num_outputs=output_filters, kernel_size=[1, 1], activation_fn=None)
        return tf.add(res_block, net) 
Example #16
Source File: layers.py    From mayo with MIT License 5 votes vote down vote up
def instantiate_depthwise_convolution(self, node, tensor, params):
        multiplier = params.pop('depth_multiplier', 1)
        return slim.separable_conv2d(
            tensor, num_outputs=None, depth_multiplier=multiplier, **params) 
Example #17
Source File: mobilenet_v1.py    From iter-reason with MIT License 5 votes vote down vote up
def separable_conv2d_same(inputs, kernel_size, stride, rate=1, scope=None):
  """Strided 2-D separable convolution with 'SAME' padding.
  Args:
    inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
    kernel_size: An int with the kernel_size of the filters.
    stride: An integer, the output stride.
    rate: An integer, rate for atrous convolution.
    scope: Scope.
  Returns:
    output: A 4-D tensor of size [batch, height_out, width_out, channels] with
      the convolution output.
  """

  # By passing filters=None
  # separable_conv2d produces only a depth-wise convolution layer
  if stride == 1:
    return slim.separable_conv2d(inputs, None, kernel_size, 
                                  depth_multiplier=1, stride=1, rate=rate,
                                  padding='SAME', scope=scope)
  else:
    kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
    pad_total = kernel_size_effective - 1
    pad_beg = pad_total // 2
    pad_end = pad_total - pad_beg
    inputs = tf.pad(inputs,
                    [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
    return slim.separable_conv2d(inputs, None, kernel_size, 
                                  depth_multiplier=1, stride=stride, rate=rate, 
                                  padding='VALID', scope=scope)

# The following is adapted from:
# https://github.com/tensorflow/models/blob/master/slim/nets/mobilenet_v1.py

# Conv and DepthSepConv named tuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
# stride is the stride of the convolution
# depth is the number of channels or filters in a layer 
Example #18
Source File: mobilenet_v1.py    From iter-reason with MIT License 5 votes vote down vote up
def mobilenet_v1_arg_scope(is_training=True,
                           stddev=0.09):
  batch_norm_params = {
      'is_training': False,
      'center': True,
      'scale': True,
      'decay': 0.9997,
      'epsilon': 0.001,
      'trainable': False,
  }

  # Set weight_decay for weights in Conv and DepthSepConv layers.
  weights_init = tf.truncated_normal_initializer(stddev=stddev)
  regularizer = tf.contrib.layers.l2_regularizer(cfg.MOBILENET.WEIGHT_DECAY)
  if cfg.MOBILENET.REGU_DEPTH:
    depthwise_regularizer = regularizer
  else:
    depthwise_regularizer = None

  with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                      trainable=is_training,
                      weights_initializer=weights_init,
                      activation_fn=tf.nn.relu6, 
                      normalizer_fn=slim.batch_norm,
                      padding='SAME'):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
      with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
        with slim.arg_scope([slim.separable_conv2d],
                            weights_regularizer=depthwise_regularizer) as sc:
          return sc 
Example #19
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_separable_conv2d(self):
    # conv layer with "fused activation"
    graph = tf.Graph()
    with graph.as_default() as g:
      x_image = tf.placeholder(tf.float32, shape=[None,8,8,3],
          name="test_separable_conv2d/input")
      conv1 = tf.layers.separable_conv2d(inputs=x_image, filters=4,
          kernel_size=[3,3], padding='valid', depth_multiplier=2)

    output_name = [conv1.op.name]
    self._test_tf_model(graph,
        {"test_separable_conv2d/input:0":[1,8,8,3]}, output_name, delta=1e-2) 
Example #20
Source File: mobilenet_v1.py    From TwinGAN with Apache License 2.0 5 votes vote down vote up
def mobilenet_v1_arg_scope(is_training=True,
                           weight_decay=0.00004,
                           stddev=0.09,
                           regularize_depthwise=False):
  """Defines the default MobilenetV1 arg scope.

  Args:
    is_training: Whether or not we're training the model.
    weight_decay: The weight decay to use for regularizing the model.
    stddev: The standard deviation of the trunctated normal weight initializer.
    regularize_depthwise: Whether or not apply regularization on depthwise.

  Returns:
    An `arg_scope` to use for the mobilenet v1 model.
  """
  batch_norm_params = {
      'is_training': is_training,
      'center': True,
      'scale': True,
      'decay': 0.9997,
      'epsilon': 0.001,
  }

  # Set weight_decay for weights in Conv and DepthSepConv layers.
  weights_init = tf.truncated_normal_initializer(stddev=stddev)
  regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
  if regularize_depthwise:
    depthwise_regularizer = regularizer
  else:
    depthwise_regularizer = None
  with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                      weights_initializer=weights_init,
                      activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
      with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
        with slim.arg_scope([slim.separable_conv2d],
                            weights_regularizer=depthwise_regularizer) as sc:
          return sc 
Example #21
Source File: configurable_ops.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def constructed_ops(self):
    """Returns a dictionary between op names built to their NUM_OUTPUTS.

       The dictionary will contain an op.name: NUM_OUTPUTS pair for each op
       constructed by the decorator. The dictionary is ordered according to the
       order items were added.
       The parameterization is accumulated during all the calls to the object's
       members, such as `conv2d`, `fully_connected` and `separable_conv2d`.
       The values used are either the values from the parameterization set for
       the object, or the values that where passed to the members.
    """
    return self._constructed_ops 
Example #22
Source File: mobilenet_v1.py    From densecap-tensorflow with MIT License 5 votes vote down vote up
def mobilenet_v1_arg_scope(is_training=True,
                           stddev=0.09):
    batch_norm_params = {
        'is_training': False,
        'center': True,
        'scale': True,
        'decay': 0.9997,
        'epsilon': 0.001,
        'trainable': False,
    }

    # Set weight_decay for weights in Conv and DepthSepConv layers.
    weights_init = tf.truncated_normal_initializer(stddev=stddev)
    regularizer = tf.contrib.layers.l2_regularizer(cfg.MOBILENET.WEIGHT_DECAY)
    if cfg.MOBILENET.REGU_DEPTH:
        depthwise_regularizer = regularizer
    else:
        depthwise_regularizer = None

    with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                        trainable=is_training,
                        weights_initializer=weights_init,
                        activation_fn=tf.nn.relu6,
                        normalizer_fn=slim.batch_norm,
                        padding='SAME'):
        with slim.arg_scope([slim.batch_norm], **batch_norm_params):
            with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
                with slim.arg_scope([slim.separable_conv2d],
                                    weights_regularizer=depthwise_regularizer) as sc:
                    return sc 
Example #23
Source File: mobilenet_v1.py    From densecap-tensorflow with MIT License 5 votes vote down vote up
def separable_conv2d_same(inputs, kernel_size, stride, rate=1, scope=None):
    """Strided 2-D separable convolution with 'SAME' padding.
  Args:
    inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
    kernel_size: An int with the kernel_size of the filters.
    stride: An integer, the output stride.
    rate: An integer, rate for atrous convolution.
    scope: Scope.
  Returns:
    output: A 4-D tensor of size [batch, height_out, width_out, channels] with
      the convolution output.
  """

    # By passing filters=None
    # separable_conv2d produces only a depth-wise convolution layer
    if stride == 1:
        return slim.separable_conv2d(inputs, None, kernel_size,
                                     depth_multiplier=1, stride=1, rate=rate,
                                     padding='SAME', scope=scope)
    else:
        kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
        pad_total = kernel_size_effective - 1
        pad_beg = pad_total // 2
        pad_end = pad_total - pad_beg
        inputs = tf.pad(inputs,
                        [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
        return slim.separable_conv2d(inputs, None, kernel_size,
                                     depth_multiplier=1, stride=stride, rate=rate,
                                     padding='VALID', scope=scope)


# The following is adapted from:
# https://github.com/tensorflow/models/blob/master/slim/nets/mobilenet_v1.py

# Conv and DepthSepConv named tuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
# stride is the stride of the convolution
# depth is the number of channels or filters in a layer 
Example #24
Source File: mobilenet_v1.py    From SSH-TensorFlow with MIT License 5 votes vote down vote up
def mobilenet_v1_arg_scope(is_training=True,
                           stddev=0.09):
    batch_norm_params = {
        'is_training': False,
        'center': True,
        'scale': True,
        'decay': 0.9997,
        'epsilon': 0.001,
        'trainable': False,
    }

    # Set weight_decay for weights in Conv and DepthSepConv layers.
    weights_init = tf.truncated_normal_initializer(stddev=stddev)
    regularizer = tf.contrib.layers.l2_regularizer(cfg.MOBILENET.WEIGHT_DECAY)
    if cfg.MOBILENET.REGU_DEPTH:
        depthwise_regularizer = regularizer
    else:
        depthwise_regularizer = None

    with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                        trainable=is_training,
                        weights_initializer=weights_init,
                        activation_fn=tf.nn.relu6,
                        normalizer_fn=slim.batch_norm,
                        padding='SAME'):
        with slim.arg_scope([slim.batch_norm], **batch_norm_params):
            with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
                with slim.arg_scope([slim.separable_conv2d],
                                    weights_regularizer=depthwise_regularizer) as sc:
                    return sc 
Example #25
Source File: mobilenet_v1.py    From SSH-TensorFlow with MIT License 5 votes vote down vote up
def separable_conv2d_same(inputs, kernel_size, stride, rate=1, scope=None):
    """Strided 2-D separable convolution with 'SAME' padding.
    Args:
      inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
      kernel_size: An int with the kernel_size of the filters.
      stride: An integer, the output stride.
      rate: An integer, rate for atrous convolution.
      scope: Scope.
    Returns:
      output: A 4-D tensor of size [batch, height_out, width_out, channels] with
        the convolution output.
    """

    # By passing filters=None
    # separable_conv2d produces only a depth-wise convolution layer
    if stride == 1:
        return slim.separable_conv2d(inputs, None, kernel_size,
                                     depth_multiplier=1, stride=1, rate=rate,
                                     padding='SAME', scope=scope)
    else:
        kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
        pad_total = kernel_size_effective - 1
        pad_beg = pad_total // 2
        pad_end = pad_total - pad_beg
        inputs = tf.pad(inputs,
                        [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
        return slim.separable_conv2d(inputs, None, kernel_size,
                                     depth_multiplier=1, stride=stride, rate=rate,
                                     padding='VALID', scope=scope)


# The following is adapted from:
# https://github.com/tensorflow/models/blob/master/slim/nets/mobilenet_v1.py

# Conv and DepthSepConv named tuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
# stride is the stride of the convolution
# depth is the number of channels or filters in a layer 
Example #26
Source File: nasnet_utils.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def _stacked_separable_conv(net, stride, operation, filter_size):
  """Takes in an operations and parses it to the correct sep operation."""
  num_layers, kernel_size = _operation_to_info(operation)
  net_type = net.dtype
  net = tf.cast(net, tf.float32) if net_type == tf.float16 else net

  for layer_num in range(num_layers - 1):
    net = tf.nn.relu(net)
    net = slim.separable_conv2d(
        net,
        filter_size,
        kernel_size,
        depth_multiplier=1,
        scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
        stride=stride)
    net = slim.batch_norm(
        net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
    stride = 1
  net = tf.nn.relu(net)
  net = slim.separable_conv2d(
      net,
      filter_size,
      kernel_size,
      depth_multiplier=1,
      scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers),
      stride=stride)
  net = slim.batch_norm(
      net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers))
  net = tf.cast(net, net_type)
  return net 
Example #27
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_slim_plane_conv(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
          name='test_slim_plane_conv2d/input')
      with slim.arg_scope([slim.separable_conv2d], padding='SAME',
          weights_initializer=tf.truncated_normal_initializer(stddev=0.3)):
        net = slim.conv2d_in_plane(inputs, 2, [3, 3], scope='conv1')

    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_plane_conv2d/input:0":[1,16,16,3]},
        output_name, delta=1e-2)

  # TODO - this fails due to unsupported op "Tile" 
Example #28
Source File: layer_utils.py    From centernet_tensorflow_wilderface_voc with MIT License 5 votes vote down vote up
def depthwise_conv_bn(x, kernel_size, strides=1, dilation=1):
    with tf.variable_scope(None, 'depthwise_conv_bn'):
        x = slim.separable_conv2d(x, None, kernel_size, depth_multiplier=1, stride=strides,
                                  rate=dilation, activation_fn=None, biases_initializer=None)
        x = slim.batch_norm(x, activation_fn=None, fused=False)
    return x 
Example #29
Source File: nasnet_model.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def nasnet_large_arg_scope(weight_decay=5e-5,
                           batch_norm_decay=0.9997,
                           batch_norm_epsilon=1e-3):
  """Defines the default arg scope for the NASNet-A Large ImageNet model.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_decay: Decay for batch norm moving average.
    batch_norm_epsilon: Small float added to variance to avoid dividing by zero
      in batch norm.
  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': batch_norm_decay,
      # epsilon to prevent 0s in variance.
      'epsilon': batch_norm_epsilon,
      'scale': True,
      'fused': True,
  }
  weights_regularizer = contrib_layers.l2_regularizer(weight_decay)
  weights_initializer = contrib_layers.variance_scaling_initializer(
      mode='FAN_OUT')
  with arg_scope(
      [slim.fully_connected, slim.conv2d, slim.separable_conv2d],
      weights_regularizer=weights_regularizer,
      weights_initializer=weights_initializer):
    with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'):
      with arg_scope(
          [slim.conv2d, slim.separable_conv2d],
          activation_fn=None,
          biases_initializer=None):
        with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
          return sc 
Example #30
Source File: nasnet_model.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def nasnet_mobile_arg_scope(weight_decay=4e-5,
                            batch_norm_decay=0.9997,
                            batch_norm_epsilon=1e-3):
  """Defines the default arg scope for the NASNet-A Mobile ImageNet model.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_decay: Decay for batch norm moving average.
    batch_norm_epsilon: Small float added to variance to avoid dividing by zero
      in batch norm.
  Returns:
    An `arg_scope` to use for the NASNet Mobile Model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': batch_norm_decay,
      # epsilon to prevent 0s in variance.
      'epsilon': batch_norm_epsilon,
      'scale': True,
      'fused': True,
  }
  weights_regularizer = contrib_layers.l2_regularizer(weight_decay)
  weights_initializer = contrib_layers.variance_scaling_initializer(
      mode='FAN_OUT')
  with arg_scope(
      [slim.fully_connected, slim.conv2d, slim.separable_conv2d],
      weights_regularizer=weights_regularizer,
      weights_initializer=weights_initializer):
    with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'):
      with arg_scope(
          [slim.conv2d, slim.separable_conv2d],
          activation_fn=None,
          biases_initializer=None):
        with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
          return sc