Python tensorflow.contrib.layers.separable_conv2d() Examples

The following are 25 code examples of tensorflow.contrib.layers.separable_conv2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.layers , or try the search function .
Example #1
Source File: layers.py    From tensornets with MIT License 6 votes vote down vote up
def gconvbn(*args, **kwargs):
    scope = kwargs.pop('scope', None)
    with tf.variable_scope(scope):
        x = sconv2d(*args, **kwargs)
        c = args[-1]
        infilters = int(x.shape[-1]) if tf_later_than('2') else x.shape[-1].value
        f = infilters // c
        g = f // c
        kernel = np.zeros((1, 1, f * c, f), np.float32)
        for i in range(f):
            start = (i // c) * c * c + i % c
            end = start + c * c
            kernel[:, :, start:end:c, i] = 1.
        x = conv2d_primitive(x, tf.constant(kernel), strides=[1, 1, 1, 1],
                             padding='VALID', name='gconv')
        return batch_norm(x) 
Example #2
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def __init__(self):
    self.conv2d = layers.conv2d
    self.fully_connected = layers.fully_connected
    self.separable_conv2d = layers.separable_conv2d
    self.concat = tf.concat 
Example #3
Source File: rnn_decoder_test.py    From models with Apache License 2.0 5 votes vote down vote up
def pre_bottleneck(self, inputs, state, input_index):
    with tf.variable_scope('bottleneck_%d' % input_index, reuse=tf.AUTO_REUSE):
      inputs = contrib_layers.separable_conv2d(
          tf.concat([inputs, state], 3),
          self._input_size,
          self._filter_size,
          depth_multiplier=1,
          activation_fn=tf.nn.relu6,
          normalizer_fn=None)
    return inputs 
Example #4
Source File: hyperparams_builder_test.py    From aster with MIT License 5 votes vote down vote up
def test_default_arg_scope_has_separable_conv2d_op(self):
    conv_hyperparams_text_proto = """
      regularizer {
        l1_regularizer {
        }
      }
      initializer {
        truncated_normal_initializer {
        }
      }
    """
    conv_hyperparams_proto = hyperparams_pb2.Hyperparams()
    text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto)
    scope = hyperparams_builder.build(conv_hyperparams_proto, is_training=True)
    self.assertTrue(self._get_scope_key(layers.separable_conv2d) in scope) 
Example #5
Source File: shufflenet_v2.py    From TF_Face_Toolbox with Apache License 2.0 5 votes vote down vote up
def separable_resBlock(self, x, 
                         num_outputs, 
                         stride=1, 
                         activation_fn=tf.nn.relu, 
                         normalizer_fn=layers.batch_norm,
                         scope=None):
    residual_flag = self.residual and (stride == 1 and num_outputs == x.get_shape().as_list()[self.channel_axis]) 
    with tf.variable_scope(scope, 'resBlock'):
      # channel_split
      shortcut, x = self._channel_split(x)
      if stride != 1:
        shortcut = layers.separable_conv2d(shortcut, num_outputs, kernel_size=3, stride=stride, 
                                           scope='separable_conv_shortcut_3x3')
        shortcut = layers.conv2d(shortcut, num_outputs, kernel_size=1, stride=1, scope='conv_shortcut_1x1')
      if residual_flag:
        res_shortcut = x
      x = layers.conv2d(x, num_outputs, kernel_size=1, stride=1, scope='conv1_1x1',)
      x = layers.separable_conv2d(x, num_outputs, kernel_size=3, stride=stride, scope='separable_conv2_3x3')
      x = layers.conv2d(x, num_outputs, kernel_size=1, stride=1, scope='conv3_1x1')
      if self.se:
        x = self._squeeze_excitation(x)      
      if residual_flag:
        x += res_shortcut

      # concat
      x = tf.concat([shortcut, x], axis=self.channel_axis)
      x = self._channel_shuffle(x)
      
    return x 
Example #6
Source File: depthwise_convolution_op_handler_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testDepthwiseChannelMapping(self):
    """Verify depth multiplier maps input to output as expected."""
    tf.reset_default_graph()

    # Construct input tensor with shape [1, 4, 4, 5].  There are 5 channels
    # where each channel has values corresponding to the channel index.
    channel0 = tf.ones([1, 4, 4, 1]) * 0
    channel1 = tf.ones([1, 4, 4, 1]) * 1
    channel2 = tf.ones([1, 4, 4, 1]) * 2
    channel3 = tf.ones([1, 4, 4, 1]) * 3
    channel4 = tf.ones([1, 4, 4, 1]) * 4
    inputs = tf.concat(
        [channel0, channel1, channel2, channel3, channel4], axis=3)
    # Sanity check that input tensor is the right shape.
    self.assertAllEqual([1, 4, 4, 5], inputs.shape.as_list())

    conv = layers.separable_conv2d(
        inputs, num_outputs=None, kernel_size=3, depth_multiplier=2,
        weights_initializer=identity_initializer, scope='depthwise_conv')

    with self.cached_session():
      with tf.variable_scope('', reuse=tf.AUTO_REUSE):
        weights = tf.get_variable('depthwise_conv/depthwise_weights')
        biases = tf.get_variable('depthwise_conv/biases', [10],
                                 initializer=tf.zeros_initializer)
      init = tf.variables_initializer([weights, biases])
      init.run()

      # The depth_multiplier replicates channels with [a, a, b, b, c, c, ...]
      # pattern.  Expected output has shape [1, 4, 4, 10].
      expected_output = tf.concat(
          [channel0, channel0,
           channel1, channel1,
           channel2, channel2,
           channel3, channel3,
           channel4, channel4],
          axis=3)
      # Sanity check that output tensor is the right shape.
      self.assertAllEqual([1, 4, 4, 10], expected_output.shape.as_list())

      self.assertAllEqual(expected_output.eval(), conv.eval()) 
Example #7
Source File: op_handler_util_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testOpAssumptions(self):
    # Verify that op assumptions are true.  For example, verify that specific
    # inputs are at expected indices.
    conv_transpose = layers.conv2d_transpose(
        self.batch_norm_op.outputs[0], num_outputs=8, kernel_size=3,
        scope='conv_transpose')
    layers.separable_conv2d(
        conv_transpose, num_outputs=9, kernel_size=3, scope='dwise_conv')
    layers.fully_connected(tf.zeros([1, 7]), 10, scope='fc')

    g = tf.get_default_graph()

    # Verify that FusedBatchNormV3 has gamma as inputs[1].
    self.assertEqual('conv1/BatchNorm/gamma/read:0',
                     self.batch_norm_op.inputs[1].name)

    # Verify that Conv2D has weights at expected index.
    index = op_handler_util.WEIGHTS_INDEX_DICT[self.conv_op.type]
    self.assertEqual('conv1/weights/read:0',
                     self.conv_op.inputs[index].name)

    # Verify that Conv2DBackpropInput has weights at expected index.
    conv_transpose_op = g.get_operation_by_name(
        'conv_transpose/conv2d_transpose')
    index = op_handler_util.WEIGHTS_INDEX_DICT[conv_transpose_op.type]
    self.assertEqual('conv_transpose/weights/read:0',
                     conv_transpose_op.inputs[index].name)

    # Verify that DepthwiseConv2dNative has weights at expected index.
    depthwise_conv_op = g.get_operation_by_name(
        'dwise_conv/separable_conv2d/depthwise')
    index = op_handler_util.WEIGHTS_INDEX_DICT[depthwise_conv_op.type]
    self.assertEqual('dwise_conv/depthwise_weights/read:0',
                     depthwise_conv_op.inputs[index].name)

    # Verify that MatMul has weights at expected index.
    matmul_op = g.get_operation_by_name('fc/MatMul')
    index = op_handler_util.WEIGHTS_INDEX_DICT[matmul_op.type]
    self.assertEqual('fc/weights/read:0',
                     matmul_op.inputs[index].name) 
Example #8
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testHijack(self, fake_module, has_conv2d, has_separable_conv2d,
                 has_fully_connected):
    # This test verifies that hijacking works with arg scope.
    # TODO(e1): Test that all is correct when hijacking a real module.
    def name_and_output_fn(name):
      # By design there is no add arg_scope here.
      def fn(*args, **kwargs):
        return (name, args[1], kwargs['scope'])

      return fn

    function_dict = {
        'fully_connected': name_and_output_fn('testing_fully_connected'),
        'conv2d': name_and_output_fn('testing_conv2d'),
        'separable_conv2d': name_and_output_fn('testing_separable_conv2d')
    }

    decorator = ops.ConfigurableOps(function_dict=function_dict)
    originals = ops.hijack_module_functions(decorator, fake_module)

    self.assertEqual('conv2d' in originals, has_conv2d)
    self.assertEqual('separable_conv2d' in originals, has_separable_conv2d)
    self.assertEqual('fully_connected' in originals, has_fully_connected)

    if has_conv2d:
      with arg_scope([fake_module.conv2d], num_outputs=2):
        out = fake_module.conv2d(
            inputs=tf.zeros([10, 3, 3, 4]), scope='test_conv2d')
      self.assertAllEqual(['testing_conv2d', 2, 'test_conv2d'], out)

    if has_fully_connected:
      with arg_scope([fake_module.fully_connected], num_outputs=3):
        out = fake_module.fully_connected(
            inputs=tf.zeros([10, 4]), scope='test_fc')
      self.assertAllEqual(['testing_fully_connected', 3, 'test_fc'], out)

    if has_separable_conv2d:
      with arg_scope([fake_module.separable_conv2d], num_outputs=4):
        out = fake_module.separable_conv2d(
            inputs=tf.zeros([10, 3, 3, 4]), scope='test_sep')
      self.assertAllEqual(['testing_separable_conv2d', 4, 'test_sep'], out) 
Example #9
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def __init__(self):
    self.fully_connected = layers.fully_connected
    self.separable_conv2d = layers.separable_conv2d 
Example #10
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testDefaultScopesRepeated(self):
    inputs = tf.ones([1, 3, 3, 2])
    parameterization = {
        's1/SeparableConv2d/separable_conv2d': 1,
        's1/SeparableConv2d_1/separable_conv2d': 2,
        's1/s2/SeparableConv2d/separable_conv2d': 3,
        's1/s2/SeparableConv2d_1/separable_conv2d': 4,
    }
    decorator = ops.ConfigurableOps(
        parameterization=parameterization,
        function_dict={'separable_conv2d': tf_contrib.slim.separable_conv2d})

    with tf.variable_scope('s1'):
      # first call in s1: op scope should be `s1/SeparableConv2d`
      _ = decorator.separable_conv2d(inputs, num_outputs=8, kernel_size=2)

      with tf.variable_scope('s2'):
        # first call in s2: op scope should be `s1/s2/SeparableConv2d`
        _ = decorator.separable_conv2d(inputs, num_outputs=8, kernel_size=2)

        # second call in s2: op scope should be `s1/s2/SeparableConv2d_1`
        _ = decorator.separable_conv2d(inputs, num_outputs=8, kernel_size=2)

      # second call in s1: op scope should be `s1/SeparableConv2d_1`
      _ = decorator.separable_conv2d(inputs, num_outputs=8, kernel_size=2)

    conv_op_names = [op.name for op in tf.get_default_graph().get_operations()
                     if op.name.endswith('separable_conv2d')]
    self.assertCountEqual(parameterization, conv_op_names)
    self.assertDictEqual(parameterization, decorator.constructed_ops) 
Example #11
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testSeparableConv2dOp(self):
    parameterization = {'test/separable_conv2d': 12}
    decorator = ops.ConfigurableOps(parameterization=parameterization)
    output = decorator.separable_conv2d(
        self.inputs,
        num_outputs=88,
        kernel_size=3,
        depth_multiplier=1,
        scope='test')
    self.assertEqual(12, output.shape.as_list()[-1]) 
Example #12
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testMapBinding(self):
    # TODO(e1): Clean up this file/test. Split to different tests
    function_dict = {
        'fully_connected': mock_fully_connected,
        'conv2d': mock_conv2d,
        'separable_conv2d': mock_separable_conv2d,
        'concat': mock_concat,
        'add_n': mock_add_n,
    }
    parameterization = {
        'fc/MatMul': 13,
        'conv/Conv2D': 15,
        'sep/separable_conv2d': 17
    }
    num_outputs = lambda res: res['args'][1]
    decorator = ops.ConfigurableOps(
        parameterization=parameterization, function_dict=function_dict)
    fc = decorator.fully_connected(self.fc_inputs, num_outputs=88, scope='fc')
    self.assertEqual('myfully_connected', fc['mock_name'])
    self.assertEqual(parameterization['fc/MatMul'], num_outputs(fc))

    conv2d = decorator.conv2d(
        self.inputs, num_outputs=11, kernel_size=3, scope='conv')
    self.assertEqual('myconv2d', conv2d['mock_name'])
    self.assertEqual(parameterization['conv/Conv2D'], num_outputs(conv2d))

    separable_conv2d = decorator.separable_conv2d(
        self.inputs, num_outputs=88, kernel_size=3, scope='sep')
    self.assertEqual('myseparable_conv2d', separable_conv2d['mock_name'])
    self.assertEqual(parameterization['sep/separable_conv2d'],
                     num_outputs(separable_conv2d))

    concat = decorator.concat(axis=1, values=[1, None, 2])
    self.assertEqual(concat['args'][0], [1, 2])
    self.assertEqual(concat['kwargs']['axis'], 1)
    with self.assertRaises(ValueError):
      _ = decorator.concat(inputs=[1, None, 2])

    add_n = decorator.add_n(name='add_n', inputs=[1, None, 2])
    self.assertEqual(add_n['args'][0], [1, 2]) 
Example #13
Source File: configurable_ops.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def _get_num_outputs_kwarg_name(self, function):
    """Gets the `num_outputs`-equivalent kwarg for a supported function."""
    alt_num_outputs_kwarg = {
        tf_layers.conv2d: 'filters',
        tf_layers.separable_conv2d: 'filters',
        tf_layers.dense: 'units',
    }
    return alt_num_outputs_kwarg.get(function, _DEFAULT_NUM_OUTPUTS_KWARG) 
Example #14
Source File: configurable_ops.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def constructed_ops(self):
    """Returns a dictionary between op names built to their NUM_OUTPUTS.

       The dictionary will contain an op.name: NUM_OUTPUTS pair for each op
       constructed by the decorator. The dictionary is ordered according to the
       order items were added.
       The parameterization is accumulated during all the calls to the object's
       members, such as `conv2d`, `fully_connected` and `separable_conv2d`.
       The values used are either the values from the parameterization set for
       the object, or the values that where passed to the members.
    """
    return self._constructed_ops 
Example #15
Source File: configurable_ops.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def separable_conv2d(self, *args, **kwargs):
    """Masks NUM_OUTPUTS from the function pointed to by 'separable_conv2d'.

    The object's parameterization has precedence over the given NUM_OUTPUTS
    argument. The resolution of the op names uses
    tf.contrib.framework.get_name_scope() and kwargs['scope'].

    Args:
      *args: Arguments for the operation.
      **kwargs: Key arguments for the operation.

    Returns:
      The result of the application of the function_map['separable_conv2d'] to
      the given 'inputs', '*args', and '**kwargs' while possibly overriding
      NUM_OUTPUTS according the parameterization.

    Raises:
      ValueError: If kwargs does not contain a key named 'scope'.
    """
    # This function actually only decorates the num_outputs of the Conv2D after
    # the depthwise convolution, as the former does not have any free params.
    fn, suffix = self._get_function_and_suffix('separable_conv2d')
    num_outputs_kwarg_name = self._get_num_outputs_kwarg_name(fn)
    num_outputs = _get_from_args_or_kwargs(
        num_outputs_kwarg_name, 1, args, kwargs, False)
    if num_outputs is None:
      tf.logging.warning(
          'Trying to decorate separable_conv2d with num_outputs = None')
      kwargs[num_outputs_kwarg_name] = None

    return self._mask(fn, suffix, *args, **kwargs) 
Example #16
Source File: cost_calculator_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def test_get_input_activation2(self, rank, fn, op_name):
    g = tf.get_default_graph()
    inputs = tf.zeros([6] * rank)
    with arg_scope([
        layers.conv2d, layers.conv2d_transpose, layers.separable_conv2d,
        layers.conv3d
    ],
                   scope='test_layer'):
      _ = fn(inputs)
    for op in g.get_operations():
      print(op.name)
    self.assertEqual(
        inputs,
        cc.get_input_activation(
            g.get_operation_by_name('test_layer/' + op_name))) 
Example #17
Source File: layers.py    From tensornets with MIT License 5 votes vote down vote up
def sconvbnrelu6(*args, **kwargs):
    scope = kwargs.pop('scope', None)
    with tf.variable_scope(scope):
        return relu6(batch_norm(sconv2d(*args, **kwargs))) 
Example #18
Source File: layers.py    From tensornets with MIT License 5 votes vote down vote up
def sconvbnrelu(*args, **kwargs):
    scope = kwargs.pop('scope', None)
    with tf.variable_scope(scope):
        return relu(batch_norm(sconv2d(*args, **kwargs))) 
Example #19
Source File: layers.py    From tensornets with MIT License 5 votes vote down vote up
def sconvbnact(*args, **kwargs):
    scope = kwargs.pop('scope', None)
    activation_fn = kwargs.pop('activation_fn', None)
    with tf.variable_scope(scope):
        return activation_fn(batch_norm(sconv2d(*args, **kwargs))) 
Example #20
Source File: layers.py    From tensornets with MIT License 5 votes vote down vote up
def sconvbn(*args, **kwargs):
    scope = kwargs.pop('scope', None)
    with tf.variable_scope(scope):
        return batch_norm(sconv2d(*args, **kwargs)) 
Example #21
Source File: shufflenet_v2.py    From TF_Face_Toolbox with Apache License 2.0 4 votes vote down vote up
def backbone(self, inputs, is_training=False, reuse=None):
    end_points = OrderedDict()
    with tf.variable_scope(self.name, values=[inputs], reuse=reuse):
      with arg_scope([layers.batch_norm], scale=True, fused=True, 
                      data_format=self.data_format, 
                      is_training=is_training):
        with arg_scope([layers.conv2d], 
                        activation_fn=tf.nn.relu, 
                        normalizer_fn=layers.batch_norm, 
                        biases_initializer=None, 
                        weights_regularizer=layers.l2_regularizer(self.weight_decay),
                        data_format=self.data_format):
          with arg_scope([layers.separable_conv2d], 
                          depth_multiplier=1,
                          activation_fn=None, 
                          normalizer_fn=layers.batch_norm, 
                          biases_initializer=None, 
                          weights_regularizer=layers.l2_regularizer(self.weight_decay),
                          data_format=self.data_format):
            if self.data_format == 'NCHW':
              inputs = tf.transpose(inputs, [0, 3, 1, 2])

            with tf.variable_scope('conv1'):
              net = layers.conv2d(inputs, num_outputs=24, kernel_size=3, stride=2, scope='conv_3x3')
              end_points['conv1/conv_3x3'] = net
              net = layers.max_pool2d(net, kernel_size=3, stride=2, padding='SAME', 
                                      data_format=self.data_format, scope='maxpool_3x3_2')
              end_points['conv1/maxpool_3x3_2'] = net

            with tf.variable_scope('conv2'):
              for idx in xrange(4):
                net = self.separable_resBlock(net, num_outputs=self.num_outputs[0], 
                                    stride=2 if not idx else 1, scope='resBlock_%d'%idx)
                end_points['conv2/resBlock_%d'%idx] = net

            with tf.variable_scope('conv3'):
              for idx in xrange(8):
                net = self.separable_resBlock(net, num_outputs=self.num_outputs[1], 
                                    stride=2 if not idx else 1, scope='resBlock_%d'%idx)
                end_points['conv3/resBlock_%d'%idx] = net

            with tf.variable_scope('conv4'):
              for idx in xrange(4):
                net = self.separable_resBlock(net, num_outputs=self.num_outputs[2], 
                                    stride=2 if not idx else 1, scope='resBlock_%d'%idx)
                end_points['conv4/resBlock_%d'%idx] = net

            with tf.variable_scope('conv5'):
              net = layers.conv2d(net, num_outputs=self.num_outputs[3], kernel_size=1, 
                                  stride=1, scope='conv_1x1')
              end_points['conv5/conv_1x1'] = net
                                    
            net = tf.reduce_mean(net, self.spatial_axis)

    return net, end_points 
Example #22
Source File: shufflenet_v2.py    From TF_Face_Toolbox with Apache License 2.0 4 votes vote down vote up
def backbone(self, inputs, is_training=False, reuse=None):
    end_points = OrderedDict()
    with tf.variable_scope(self.name, reuse=reuse):
      with arg_scope([layers.batch_norm], scale=True, fused=True, 
                      data_format=self.data_format, 
                      is_training=is_training):
        with arg_scope([layers.conv2d], 
                        activation_fn=tf.nn.relu, 
                        normalizer_fn=layers.batch_norm, 
                        biases_initializer=None, 
                        weights_regularizer=layers.l2_regularizer(self.weight_decay),
                        data_format=self.data_format):
          with arg_scope([layers.separable_conv2d], 
                          depth_multiplier=1,
                          activation_fn=None, 
                          normalizer_fn=layers.batch_norm, 
                          biases_initializer=None, 
                          weights_regularizer=layers.l2_regularizer(self.weight_decay),
                          data_format=self.data_format):
            if self.data_format == 'NCHW':
              inputs = tf.transpose(inputs, [0, 3, 1, 2])

            with tf.variable_scope('conv1'):
              net = layers.conv2d(inputs, num_outputs=64, kernel_size=3, stride=2, scope='conv_3x3')
              end_points['conv1/conv_3x3'] = net
              net = layers.max_pool2d(net, kernel_size=3, stride=2, padding='SAME', 
                                      data_format=self.data_format, scope='maxpool_3x3_2')
              end_points['conv1/maxpool_3x3_2'] = net

            with tf.variable_scope('conv2'):
              for idx in xrange(3):
                net = self.separable_resBlock(net, num_outputs=self.num_outputs[0], 
                                    stride=2 if not idx else 1, scope='resBlock_%d'%idx)
                end_points['conv2/resBlock_%d'%idx] = net

            with tf.variable_scope('conv3'):
              for idx in xrange(4):
                net = self.separable_resBlock(net, num_outputs=self.num_outputs[1], 
                                    stride=2 if not idx else 1, scope='resBlock_%d'%idx)
                end_points['conv3/resBlock_%d'%idx] = net

            with tf.variable_scope('conv4'):
              for idx in xrange(6):
                net = self.separable_resBlock(net, num_outputs=self.num_outputs[2], 
                                    stride=2 if not idx else 1, scope='resBlock_%d'%idx)
                end_points['conv4/resBlock_%d'%idx] = net

            with tf.variable_scope('conv5'):
              for idx in xrange(3):
                net = self.separable_resBlock(net, num_outputs=self.num_outputs[3], 
                                    stride=2 if not idx else 1, scope='resBlock_%d'%idx)
                end_points['conv4/resBlock_%d'%idx] = net

            with tf.variable_scope('conv5'):
              net = layers.conv2d(net, num_outputs=self.num_outputs[4], kernel_size=1, 
                                  stride=1, scope='conv_1x1')
              end_points['conv6/conv_1x1'] = net
                                    
            net = tf.reduce_mean(net, self.spatial_axis)

    return net, end_points 
Example #23
Source File: hyperparams_builder.py    From aster with MIT License 4 votes vote down vote up
def build(hyperparams_config, is_training):
  """Builds arg_scope for convolution ops based on the config.

  Returns an arg_scope to use for convolution ops containing weights
  initializer, weights regularizer, activation function, batch norm function
  and batch norm parameters based on the configuration.

  Note that if the batch_norm parameteres are not specified in the config
  (i.e. left to default) then batch norm is excluded from the arg_scope.

  The batch norm parameters are set for updates based on `is_training` argument
  and conv_hyperparams_config.batch_norm.train parameter. During training, they
  are updated only if batch_norm.train parameter is true. However, during eval,
  no updates are made to the batch norm variables. In both cases, their current
  values are used during forward pass.

  Args:
    hyperparams_config: hyperparams.proto object containing
      hyperparameters.
    is_training: Whether the network is in training mode.

  Returns:
    arg_scope: arg_scope containing hyperparameters for ops.

  Raises:
    ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
  """
  if not isinstance(hyperparams_config,
                    hyperparams_pb2.Hyperparams):
    raise ValueError('hyperparams_config not of type '
                     'hyperparams_pb.Hyperparams.')

  batch_norm = None
  batch_norm_params = None
  if hyperparams_config.HasField('batch_norm'):
    batch_norm = layers.batch_norm
    batch_norm_params = _build_batch_norm_params(
        hyperparams_config.batch_norm, is_training)

  affected_ops = [layers.conv2d, layers.separable_conv2d, layers.conv2d_transpose]
  if hyperparams_config.HasField('op') and (
      hyperparams_config.op == hyperparams_pb2.Hyperparams.FC):
    affected_ops = [layers.fully_connected]
  with arg_scope(
      affected_ops,
      weights_regularizer=_build_regularizer(
          hyperparams_config.regularizer),
      weights_initializer=_build_initializer(
          hyperparams_config.initializer),
      activation_fn=_build_activation_fn(hyperparams_config.activation),
      normalizer_fn=batch_norm,
      normalizer_params=batch_norm_params) as sc:
    return sc 
Example #24
Source File: mobilenetv1_model.py    From rigl with Apache License 2.0 4 votes vote down vote up
def depthwise_conv2d_fixed_padding(inputs,
                                   kernel_size,
                                   stride,
                                   data_format='channels_first',
                                   name=None):
  """Depthwise Strided 2-D convolution with explicit padding.

  The padding is consistent and is based only on `kernel_size`, not on the
  dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).

  Args:
    inputs:  Input tensor, float32 or bfloat16 of size [batch, channels, height,
      width].
    kernel_size: Int designating size of kernel to be used in the convolution.
    stride: Int specifying the stride. If stride >1, the input is downsampled.
    data_format: String that specifies either "channels_first" for [batch,
      channels, height,width] or "channels_last" for [batch, height, width,
      channels].
    name: String that specifies name for model layer.

  Returns:
    The output activation tensor of size [batch, filters, height_out, width_out]

  Raises:
    ValueError: If the data_format provided is not a valid string.
  """
  if stride > 1:
    inputs = resnet_model.fixed_padding(
        inputs, kernel_size, data_format=data_format)
  padding = 'SAME' if stride == 1 else 'VALID'

  if data_format == 'channels_last':
    data_format_channels = 'NHWC'
  elif data_format == 'channels_first':
    data_format_channels = 'NCHW'
  else:
    raise ValueError('Not a valid channel string:', data_format)

  return contrib_layers.separable_conv2d(
      inputs=inputs,
      num_outputs=None,
      kernel_size=kernel_size,
      stride=stride,
      padding=padding,
      data_format=data_format_channels,
      activation_fn=None,
      weights_regularizer=None,
      biases_initializer=None,
      biases_regularizer=None,
      scope=name) 
Example #25
Source File: mobilenetv2_model.py    From rigl with Apache License 2.0 4 votes vote down vote up
def depthwise_conv2d_fixed_padding(inputs,
                                   kernel_size,
                                   stride,
                                   data_format='channels_first',
                                   name=None):
  """Depthwise Strided 2-D convolution with explicit padding.

  The padding is consistent and is based only on `kernel_size`, not on the
  dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).

  Args:
    inputs:  Input tensor, float32 or bfloat16 of size [batch, channels, height,
      width].
    kernel_size: Int designating size of kernel to be used in the convolution.
    stride: Int specifying the stride. If stride >1, the input is downsampled.
    data_format: String that specifies either "channels_first" for [batch,
      channels, height,width] or "channels_last" for [batch, height, width,
      channels].
    name: String that specifies name for model layer.

  Returns:
    The output activation tensor of size [batch, filters, height_out, width_out]

  Raises:
    ValueError: If the data_format provided is not a valid string.
  """
  if stride > 1:
    inputs = resnet_model.fixed_padding(
        inputs, kernel_size, data_format=data_format)
  padding = 'SAME' if stride == 1 else 'VALID'

  if data_format == 'channels_last':
    data_format_channels = 'NHWC'
  elif data_format == 'channels_first':
    data_format_channels = 'NCHW'
  else:
    raise ValueError('Not a valid channel string:', data_format)

  return contrib_layers.separable_conv2d(
      inputs=inputs,
      num_outputs=None,
      kernel_size=kernel_size,
      stride=stride,
      padding=padding,
      data_format=data_format_channels,
      activation_fn=None,
      weights_regularizer=None,
      biases_initializer=None,
      biases_regularizer=None,
      scope=name)