Python tensorflow.contrib.layers.max_pool2d() Examples

The following are 30 code examples of tensorflow.contrib.layers.max_pool2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.layers , or try the search function .
Example #1
Source File: squeezenet.py    From DirectML with MIT License 6 votes vote down vote up
def _squeezenet(images, num_classes=200, data_format='NCHW'):
        net = conv2d(images, 96, [2, 2], scope='conv1')
        net = max_pool2d(net, [2, 2], scope='maxpool1')
        net = fire_module(net, 16, 64, scope='fire2', data_format=data_format)
        net = fire_module(net, 16, 64, scope='fire3', data_format=data_format)
        net = fire_module(net, 32, 128, scope='fire4', data_format=data_format)
        net = max_pool2d(net, [2, 2], scope='maxpool4')
        net = fire_module(net, 32, 128, scope='fire5', data_format=data_format)
        net = fire_module(net, 48, 192, scope='fire6', data_format=data_format)
        net = fire_module(net, 48, 192, scope='fire7', data_format=data_format)
        net = fire_module(net, 64, 256, scope='fire8', data_format=data_format)
        net = max_pool2d(net, [2, 2], scope='maxpool8')
        net = fire_module(net, 64, 256, scope='fire9', data_format=data_format)
        net = avg_pool2d(net, [7, 7], scope='avgpool10')
        net = conv2d(net, num_classes, [1, 1],
                     activation_fn=None,
                     normalizer_fn=None,
                     scope='conv10')

        squeeze_axes = [2, 3] if data_format == 'NCHW' else [1, 2]
        logits = tf.squeeze(net, squeeze_axes, name='logits')
        return logits 
Example #2
Source File: stn_convnet.py    From aster with MIT License 6 votes vote down vote up
def _extract_features(self, preprocessed_inputs):
    """Extract features
    Args:
      preprocessed_inputs: float32 tensor of shape [batch_size, image_height, image_width, 3]
    Return:
      feature_maps: a list of extracted feature maps
    """
    with arg_scope([conv2d], kernel_size=3, activation_fn=tf.nn.relu), \
         arg_scope([max_pool2d], kernel_size=2, stride=2):
      conv1 = conv2d(preprocessed_inputs, 32, scope='conv1') # 64
      pool1 = max_pool2d(conv1, scope='pool1')
      conv2 = conv2d(pool1, 64, scope='conv2')  # 32
      pool2 = max_pool2d(conv2, scope='pool2')
      conv3 = conv2d(pool2, 128, scope='conv3')  # 16
      pool3 = max_pool2d(conv3, scope='pool3')
      conv4 = conv2d(pool3, 256, scope='conv4')  # 8
      pool4 = max_pool2d(conv4, scope='pool4')
      conv5 = conv2d(pool4, 256, scope='conv5')  # 4
      pool5 = max_pool2d(conv5, scope='pool5')
      conv6 = conv2d(pool5, 256, scope='conv6')  # 2
      feature_maps_dict = {
        'conv1': conv1, 'conv2': conv2, 'conv3': conv3,
        'conv4': conv4, 'conv5': conv5, 'conv6': conv6 }
    return feature_maps_dict 
Example #3
Source File: stn_convnet.py    From aster with MIT License 6 votes vote down vote up
def _extract_features(self, preprocessed_inputs):
    """Extract features
    Args:
      preprocessed_inputs: float32 tensor of shape [batch_size, image_height, image_width, 3]
    Return:
      feature_maps: a list of extracted feature maps
    """
    with arg_scope([conv2d], kernel_size=3, activation_fn=tf.nn.relu), \
         arg_scope([max_pool2d], kernel_size=2, stride=2):
      conv1 = conv2d(preprocessed_inputs, 8, scope='conv1') # 64
      pool1 = max_pool2d(conv1, scope='pool1')
      conv2 = conv2d(pool1, 16, scope='conv2')  # 32
      pool2 = max_pool2d(conv2, scope='pool2')
      conv3 = conv2d(pool2, 32, scope='conv3')  # 16
      pool3 = max_pool2d(conv3, scope='pool3')
      conv4 = conv2d(pool3, 32, scope='conv4')  # 8
      pool4 = max_pool2d(conv4, scope='pool4')
      conv5 = conv2d(pool4, 64, scope='conv5')  # 4
      pool5 = max_pool2d(conv5, scope='pool5')
      conv6 = conv2d(pool5, 64, scope='conv6')  # 2
      feature_maps_dict = {
        'conv1': conv1, 'conv2': conv2, 'conv3': conv3,
        'conv4': conv4, 'conv5': conv5, 'conv6': conv6 }
    return feature_maps_dict 
Example #4
Source File: crnn_net.py    From aster with MIT License 6 votes vote down vote up
def _extract_features(self, preprocessed_inputs):
    """Extract features
    Args:
      preprocessed_inputs: float32 tensor of shape [batch_size, image_height, image_width, 3]
    Return:
      feature_maps: a list of extracted feature maps
    """
    with arg_scope([conv2d], kernel_size=3, padding='SAME', stride=1), \
         arg_scope([max_pool2d], stride=2):
      conv1 = conv2d(preprocessed_inputs, 64, scope='conv1')
      pool1 = max_pool2d(conv1, 2, scope='pool1')
      conv2 = conv2d(pool1, 128, scope='conv2')
      pool2 = max_pool2d(conv2, 2, scope='pool2')
      conv3 = conv2d(pool2, 256, scope='conv3')
      conv4 = conv2d(conv3, 256, scope='conv4')
      pool4 = max_pool2d(conv4, 2, stride=[2, 1], scope='pool4')
      conv5 = conv2d(pool4, 512, scope='conv5')
      conv6 = conv2d(conv5, 512, scope='conv6')
      pool6 = max_pool2d(conv6, 2, stride=[2, 1], scope='pool6')
      conv7 = conv2d(pool6, 512, kernel_size=[2, 1], padding='VALID', scope='conv7')
      feature_maps_dict = {
        'conv1': conv1, 'conv2': conv2, 'conv3': conv3, 'conv4': conv4,
        'conv5': conv5, 'conv6': conv6, 'conv7': conv7}
    return feature_maps_dict 
Example #5
Source File: crnn_net.py    From aster with MIT License 6 votes vote down vote up
def _extract_features(self, preprocessed_inputs):
    """Extract features
    Args:
      preprocessed_inputs: float32 tensor of shape [batch_size, image_height, image_width, 3]
    Return:
      feature_maps: a list of extracted feature maps
    """
    with arg_scope([conv2d], kernel_size=3, padding='SAME', stride=1), \
         arg_scope([max_pool2d], stride=2):
      conv1 = conv2d(preprocessed_inputs, 8, scope='conv1')
      pool1 = max_pool2d(conv1, 2, scope='pool1')
      conv2 = conv2d(pool1, 16, scope='conv2')
      pool2 = max_pool2d(conv2, 2, scope='pool2')
      conv3 = conv2d(pool2, 32, scope='conv3')
      conv4 = conv2d(conv3, 64, scope='conv4')
      pool4 = max_pool2d(conv4, 2, stride=[2, 1], scope='pool4')
      conv5 = conv2d(pool4, 128, scope='conv5')
      conv6 = conv2d(conv5, 128, scope='conv6')
      pool6 = max_pool2d(conv6, 2, stride=[2, 1], scope='pool6')
      conv7 = conv2d(pool6, 128, kernel_size=[2, 1], padding='VALID', scope='conv7')
      feature_maps_dict = {
        'conv1': conv1, 'conv2': conv2, 'conv3': conv3, 'conv4': conv4,
        'conv5': conv5, 'conv6': conv6, 'conv7': conv7}
    return feature_maps_dict 
Example #6
Source File: squeezenet.py    From DirectML with MIT License 6 votes vote down vote up
def _squeezenet(images, num_classes=1000, data_format='NCHW'):
        net = conv2d(images, 96, [2, 2], stride=2, scope='conv1')
        net = max_pool2d(net, [3, 3], stride=2, scope='maxpool1')
        net = fire_module(net, 16, 64, scope='fire2', data_format=data_format)
        net = fire_module(net, 16, 64, scope='fire3', data_format=data_format)
        net = fire_module(net, 32, 128, scope='fire4', data_format=data_format)
        net = max_pool2d(net, [3, 3], stride=2, scope='maxpool4')
        net = fire_module(net, 32, 128, scope='fire5', data_format=data_format)
        net = fire_module(net, 48, 192, scope='fire6', data_format=data_format)
        net = fire_module(net, 48, 192, scope='fire7', data_format=data_format)
        net = fire_module(net, 64, 256, scope='fire8', data_format=data_format)
        net = max_pool2d(net, [3, 3], stride=2, scope='maxpool8')
        net = fire_module(net, 64, 256, scope='fire9', data_format=data_format)
        net = conv2d(net, num_classes, [1, 1], stride=1, scope='conv10')
        net = avg_pool2d(net, [13, 13], stride=1, scope='avgpool10')

        squeeze_axes = [2, 3] if data_format == 'NCHW' else [1, 2]
        logits = tf.squeeze(net, squeeze_axes, name='logits')
        return logits 
Example #7
Source File: squeezenet.py    From DirectML with MIT License 6 votes vote down vote up
def _squeezenet(images, num_classes=10, data_format='NCHW'):
        net = conv2d(images, 96, [2, 2], scope='conv1')
        net = max_pool2d(net, [2, 2], scope='maxpool1')
        net = fire_module(net, 16, 64, scope='fire2', data_format=data_format)
        net = fire_module(net, 16, 64, scope='fire3', data_format=data_format)
        net = fire_module(net, 32, 128, scope='fire4', data_format=data_format)
        net = max_pool2d(net, [2, 2], scope='maxpool4')
        net = fire_module(net, 32, 128, scope='fire5', data_format=data_format)
        net = fire_module(net, 48, 192, scope='fire6', data_format=data_format)
        net = fire_module(net, 48, 192, scope='fire7', data_format=data_format)
        net = fire_module(net, 64, 256, scope='fire8', data_format=data_format)
        net = max_pool2d(net, [2, 2], scope='maxpool8')
        net = fire_module(net, 64, 256, scope='fire9', data_format=data_format)
        net = avg_pool2d(net, [4, 4], scope='avgpool10')
        net = conv2d(net, num_classes, [1, 1],
                     activation_fn=None,
                     normalizer_fn=None,
                     scope='conv10')

        squeeze_axes = [2, 3] if data_format == 'NCHW' else [1, 2]
        logits = tf.squeeze(net, squeeze_axes, name='logits')
        return logits 
Example #8
Source File: build_inception_v4.py    From tensorflow-litterbox with Apache License 2.0 6 votes vote down vote up
def _block_b_reduce(net, endpoints, scope='BlockReduceB'):
    # 17 x 17 -> 8 x 8 reduce
    with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'):
        with tf.variable_scope(scope):
            with tf.variable_scope('Br1_Pool'):
                br1 = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2')
            with tf.variable_scope('Br2_3x3'):
                br2 = layers.conv2d(net, 192, [1, 1], padding='SAME', scope='Conv1_1x1')
                br2 = layers.conv2d(br2, 192, [3, 3], stride=2, scope='Conv2_3x3/2')
            with tf.variable_scope('Br3_7x7x3'):
                br3 = layers.conv2d(net, 256, [1, 1], padding='SAME', scope='Conv1_1x1')
                br3 = layers.conv2d(br3, 256, [1, 7], padding='SAME', scope='Conv2_1x7')
                br3 = layers.conv2d(br3, 320, [7, 1], padding='SAME', scope='Conv3_7x1')
                br3 = layers.conv2d(br3, 320, [3, 3], stride=2, scope='Conv4_3x3/2')
            net = tf.concat(3, [br1, br2, br3], name='Concat1')
            endpoints[scope] = net
            print('%s output shape: %s' % (scope, net.get_shape()))
    return net 
Example #9
Source File: squeezenet.py    From squeezenet with MIT License 6 votes vote down vote up
def _squeezenet(images, num_classes=1000):
        net = conv2d(images, 96, [7, 7], stride=2, scope='conv1')
        net = max_pool2d(net, [3, 3], stride=2, scope='maxpool1')
        net = fire_module(net, 16, 64, scope='fire2')
        net = fire_module(net, 16, 64, scope='fire3')
        net = fire_module(net, 32, 128, scope='fire4')
        net = max_pool2d(net, [3, 3], stride=2, scope='maxpool4')
        net = fire_module(net, 32, 128, scope='fire5')
        net = fire_module(net, 48, 192, scope='fire6')
        net = fire_module(net, 48, 192, scope='fire7')
        net = fire_module(net, 64, 256, scope='fire8')
        net = max_pool2d(net, [3, 3], stride=2, scope='maxpool8')
        net = fire_module(net, 64, 256, scope='fire9')
        net = conv2d(net, num_classes, [1, 1], stride=1, scope='conv10')
        net = avg_pool2d(net, [13, 13], stride=1, scope='avgpool10')
        logits = tf.squeeze(net, [2], name='logits')
        return logits 
Example #10
Source File: squeezenet.py    From squeezenet with MIT License 6 votes vote down vote up
def _squeezenet(images, num_classes=10):
        net = conv2d(images, 96, [2, 2], scope='conv1')
        net = max_pool2d(net, [2, 2], scope='maxpool1')
        net = fire_module(net, 16, 64, scope='fire2')
        net = fire_module(net, 16, 64, scope='fire3')
        net = fire_module(net, 32, 128, scope='fire4')
        net = max_pool2d(net, [2, 2], scope='maxpool4')
        net = fire_module(net, 32, 128, scope='fire5')
        net = fire_module(net, 48, 192, scope='fire6')
        net = fire_module(net, 48, 192, scope='fire7')
        net = fire_module(net, 64, 256, scope='fire8')
        net = max_pool2d(net, [2, 2], scope='maxpool8')
        net = fire_module(net, 64, 256, scope='fire9')
        net = avg_pool2d(net, [4, 4], scope='avgpool10')
        net = conv2d(net, num_classes, [1, 1],
                     activation_fn=None,
                     normalizer_fn=None,
                     scope='conv10')
        logits = tf.squeeze(net, [2], name='logits')
        return logits 
Example #11
Source File: slim_resnet_utils.py    From X-Detector with Apache License 2.0 6 votes vote down vote up
def subsample(inputs, factor, scope=None):
  """Subsamples the input along the spatial dimensions.

  Args:
    inputs: A `Tensor` of size [batch, height_in, width_in, channels].
    factor: The subsampling factor.
    scope: Optional variable_scope.

  Returns:
    output: A `Tensor` of size [batch, height_out, width_out, channels] with the
      input, either intact (if factor == 1) or subsampled (if factor > 1).
  """
  if factor == 1:
    return inputs
  else:
    return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope) 
Example #12
Source File: squeezenet.py    From DirectML with MIT License 5 votes vote down vote up
def _arg_scope(is_training, weight_decay, bn_decay, data_format):
    with arg_scope([conv2d],
                   weights_regularizer=l2_regularizer(weight_decay),
                   normalizer_fn=batch_norm,
                   normalizer_params={'is_training': is_training,
                                      'fused': True,
                                      'decay': bn_decay}):
        with arg_scope([conv2d, avg_pool2d, max_pool2d, batch_norm],
                       data_format=data_format) as sc:
                return sc 
Example #13
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testCascadedGrouping(self):
    inputs = tf.zeros([6, 8, 8, 10], name='prev')
    with arg_scope(
        [layers.conv2d, layers.max_pool2d],
        kernel_size=1,
        stride=1,
        padding='SAME'):
      net = layers.conv2d(inputs, 17, scope='conv/input')

      first = layers.conv2d(net, num_outputs=17, scope='conv/first')
      add_0 = tf.add(first, net, 'Add/first')  # So conv/first must be 17.
      second = layers.conv2d(add_0, num_outputs=17, scope='conv/second')
      out = tf.add(net, second, 'Add/second')  # So conv/second must be 17.

    # Instantiate OpRegularizerManager.
    op_handler_dict = self._default_op_handler_dict
    op_handler_dict['Conv2D'] = IndexConvSourceOpHandler()
    op_reg_manager = orm.OpRegularizerManager([out.op], op_handler_dict)

    grouped_names = [
        [op_slice.op.name for op_slice in group.op_slices]
        for group in op_reg_manager._op_group_dict.values()]
    expected = set([
        'conv/second/Conv2D', 'Add/second', 'conv/first/Conv2D',
        'conv/input/Conv2D', 'Add/first'
    ])
    groups = []
    for group in grouped_names:
      filtered = []
      for op_name in group:
        if '/Conv2D' in op_name or 'Add/' in op_name:
          filtered.append(op_name)
      if filtered:
        groups.append(set(filtered))
        if DEBUG_PRINTS:
          print('Group Found = ', filtered)
    self.assertIn(expected, groups) 
Example #14
Source File: squeezenet.py    From squeezenet with MIT License 5 votes vote down vote up
def fire_module(inputs,
                squeeze_depth,
                expand_depth,
                reuse=None,
                scope=None):
    with tf.variable_scope(scope, 'fire', [inputs], reuse=reuse):
        with arg_scope([conv2d, max_pool2d]):
            net = _squeeze(inputs, squeeze_depth)
            net = _expand(net, expand_depth)
        return net 
Example #15
Source File: squeezenet.py    From DirectML with MIT License 5 votes vote down vote up
def fire_module(inputs,
                squeeze_depth,
                expand_depth,
                reuse=None,
                scope=None,
                data_format='NCHW'):
    with tf.compat.v1.variable_scope(scope, 'fire', [inputs], reuse=reuse):
        with arg_scope([conv2d, max_pool2d]):
            net = _squeeze(inputs, squeeze_depth)
            net = _expand(net, expand_depth, data_format)
        return net 
Example #16
Source File: test_tf_graph.py    From hiddenlayer with MIT License 5 votes vote down vote up
def pool(self, input):
        return layers.max_pool2d(input, kernel_size=(2, 2), 
                                 stride=(2, 2), padding="SAME") 
Example #17
Source File: pretrained_models.py    From dhSegment with GNU General Public License v3.0 5 votes vote down vote up
def vgg_16_fn(input_tensor: tf.Tensor, scope='vgg_16', blocks=5, weight_decay=0.0005) \
        -> (tf.Tensor, list):  # list of tf.Tensors (layers)
    intermediate_levels = []
    # intermediate_levels.append(input_tensor)
    with slim.arg_scope(nets.vgg.vgg_arg_scope(weight_decay=weight_decay)):
        with tf.variable_scope(scope, 'vgg_16', [input_tensor]) as sc:
            input_tensor = mean_substraction(input_tensor)
            intermediate_levels.append(input_tensor)
            end_points_collection = sc.original_name_scope + '_end_points'
            # Collect outputs for conv2d, fully_connected and max_pool2d.
            with slim.arg_scope(
                    [layers.conv2d, layers.fully_connected, layers.max_pool2d],
                    outputs_collections=end_points_collection):
                net = layers.repeat(
                    input_tensor, 2, layers.conv2d, 64, [3, 3], scope='conv1')
                intermediate_levels.append(net)
                net = layers.max_pool2d(net, [2, 2], scope='pool1')
                if blocks >= 2:
                    net = layers.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool2')
                if blocks >= 3:
                    net = layers.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool3')
                if blocks >= 4:
                    net = layers.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool4')
                if blocks >= 5:
                    net = layers.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool5')

                return net, intermediate_levels 
Example #18
Source File: squeezenet.py    From squeezenet with MIT License 5 votes vote down vote up
def _arg_scope(is_training, weight_decay, bn_decay):
    with arg_scope([conv2d],
                   weights_regularizer=l2_regularizer(weight_decay),
                   normalizer_fn=batch_norm,
                   normalizer_params={'is_training': is_training,
                                      'fused': True,
                                      'decay': bn_decay}):
        with arg_scope([conv2d, avg_pool2d, max_pool2d, batch_norm],
                       data_format='NCHW') as sc:
                return sc 
Example #19
Source File: slim_resnet_utils.py    From X-Detector with Apache License 2.0 5 votes vote down vote up
def resnet_v1_backbone(inputs,
              blocks,
              is_training=True,
              output_stride=None,
              include_root_block=True,
              reuse=None,
              scope=None):
  with variable_scope.variable_scope(
      scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    with arg_scope(
        [layers.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
        outputs_collections=end_points_collection):
      with arg_scope([layers.batch_norm], is_training=is_training):
        net = inputs
        if include_root_block:
          if output_stride is not None:
            if output_stride % 4 != 0:
              raise ValueError('The output_stride needs to be a multiple of 4.')
            output_stride /= 4
          net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
          net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope='pool1')
        net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
        # Convert end_points_collection into a dictionary of end_points.
        end_points = utils.convert_collection_to_dict(end_points_collection)

        return net, end_points 
Example #20
Source File: configurable_ops.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def max_pool2d(self, *args, **kwargs):
    return self._pass_through_mask(
        self._function_dict['max_pool2d'], *args, **kwargs) 
Example #21
Source File: build_vgg.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _block_a(net, endpoints, d=64, scope='BlockA'):
    with tf.variable_scope(scope):
        net = endpoints[scope+'/Conv1'] = layers.conv2d(net, d, [3, 3], scope='Conv1_3x3')
        net = endpoints[scope+'/Conv2'] = layers.conv2d(net, d, [3, 3], scope='Conv2_3x3')
        net = endpoints[scope+'/Pool1'] = layers.max_pool2d(net, [2, 2], stride=2, scope='Pool1_2x2/2')
    return net 
Example #22
Source File: build_vgg.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _block_b(net, endpoints, d=256, scope='BlockB'):
    with tf.variable_scope(scope):
        net = endpoints[scope+'/Conv1'] = layers.conv2d(net, d, [3, 3], scope='Conv1_3x3')
        net = endpoints[scope+'/Conv2'] = layers.conv2d(net, d, [3, 3], scope='Conv2_3x3')
        net = endpoints[scope+'/Conv3'] = layers.conv2d(net, d, [3, 3], scope='Conv3_3x3')
        net = endpoints[scope+'/Pool1'] = layers.max_pool2d(net, [2, 2], stride=2, scope='Pool1_2x2/2')
    return net 
Example #23
Source File: build_vgg.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _block_c(net, endpoints, d=256, scope='BlockC'):
    with tf.variable_scope(scope):
        net = endpoints[scope+'/Conv1'] = layers.conv2d(net, d, [3, 3], scope='Conv1_3x3')
        net = endpoints[scope+'/Conv2'] = layers.conv2d(net, d, [3, 3], scope='Conv2_3x3')
        net = endpoints[scope+'/Conv3'] = layers.conv2d(net, d, [3, 3], scope='Conv3_3x3')
        net = endpoints[scope+'/Conv4'] = layers.conv2d(net, d, [3, 3], scope='Conv4_3x3')
        net = endpoints[scope+'/Pool1'] = layers.max_pool2d(net, [2, 2], stride=2, scope='Pool1_2x2/2')
    return net 
Example #24
Source File: build_vgg.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _build_vgg16(
        inputs,
        num_classes=1000,
        dropout_keep_prob=0.5,
        is_training=True,
        scope=''):
    """Blah"""

    endpoints = {}
    with tf.name_scope(scope, 'vgg16', [inputs]):
        with arg_scope(
                [layers.batch_norm, layers.dropout], is_training=is_training):
            with arg_scope(
                    [layers.conv2d, layers.max_pool2d], 
                    stride=1,
                    padding='SAME'):

                net = _block_a(inputs, endpoints, d=64, scope='Scale1')
                net = _block_a(net, endpoints, d=128, scope='Scale2')
                net = _block_b(net, endpoints, d=256, scope='Scale3')
                net = _block_b(net, endpoints, d=512, scope='Scale4')
                net = _block_b(net, endpoints, d=512, scope='Scale5')
                logits = _block_output(net, endpoints, num_classes, dropout_keep_prob)

                endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions')
                return logits, endpoints 
Example #25
Source File: build_vgg.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _build_vgg19(
        inputs,
        num_classes=1000,
        dropout_keep_prob=0.5,
        is_training=True,
        scope=''):
    """Blah"""

    endpoints = {}
    with tf.name_scope(scope, 'vgg19', [inputs]):
        with arg_scope(
                [layers.batch_norm, layers.dropout], is_training=is_training):
            with arg_scope(
                    [layers.conv2d, layers.max_pool2d],
                    stride=1,
                    padding='SAME'):

                net = _block_a(inputs, endpoints, d=64, scope='Scale1')
                net = _block_a(net, endpoints, d=128, scope='Scale2')
                net = _block_c(net, endpoints, d=256, scope='Scale3')
                net = _block_c(net, endpoints, d=512, scope='Scale4')
                net = _block_c(net, endpoints, d=512, scope='Scale5')
                logits = _block_output(net, endpoints, num_classes, dropout_keep_prob)

                endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions')
                return logits, endpoints 
Example #26
Source File: build_inception_v4.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _block_a_reduce(net, endpoints, k=192, l=224, m=256, n=384, scope='BlockReduceA'):
    # 35 x 35 -> 17 x 17 reduce
    # inception-v4: k=192, l=224, m=256, n=384
    # inception-resnet-v1: k=192, l=192, m=256, n=384
    # inception-resnet-v2: k=256, l=256, m=384, n=384
    # default padding = VALID
    # default stride = 1
    with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'):
        with tf.variable_scope(scope):
            with tf.variable_scope('Br1_Pool'):
                br1 = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2')
                # 17 x 17 x input
            with tf.variable_scope('Br2_3x3'):
                br2 = layers.conv2d(net, n, [3, 3], stride=2, scope='Conv1_3x3/2')
                # 17 x 17 x n
            with tf.variable_scope('Br3_3x3Dbl'):
                br3 = layers.conv2d(net, k, [1, 1], padding='SAME', scope='Conv1_1x1')
                br3 = layers.conv2d(br3, l, [3, 3], padding='SAME', scope='Conv2_3x3')
                br3 = layers.conv2d(br3, m, [3, 3], stride=2, scope='Conv3_3x3/2')
                # 17 x 17 x m
            net = tf.concat(3, [br1, br2, br3], name='Concat1')
            # 17 x 17 x input + n + m
            # 1024 for v4 (384 + 384 + 256)
            # 896 for res-v1 (256 + 384 +256)
            # 1152 for res-v2 (384 + 384 + 384)
            endpoints[scope] = net
            print('%s output shape: %s' % (scope, net.get_shape()))
    return net 
Example #27
Source File: build_inception_v4.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _block_stem_res(net, endpoints, scope='Stem'):
    # Simpler _stem for inception-resnet-v1 network
    # NOTE observe endpoints of first 3 layers
    # default padding = VALID
    # default stride = 1
    with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'):
        with tf.variable_scope(scope):
            # 299 x 299 x 3
            net = layers.conv2d(net, 32, [3, 3], stride=2, scope='Conv1_3x3/2')
            endpoints[scope + '/Conv1'] = net
            # 149 x 149 x 32
            net = layers.conv2d(net, 32, [3, 3], scope='Conv2_3x3')
            endpoints[scope + '/Conv2'] = net
            # 147 x 147 x 32
            net = layers.conv2d(net, 64, [3, 3], padding='SAME', scope='Conv3_3x3')
            endpoints[scope + '/Conv3'] = net
            # 147 x 147 x 64
            net = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2')
            # 73 x 73 x 64
            net = layers.conv2d(net, 80, [1, 1], padding='SAME', scope='Conv4_1x1')
            # 73 x 73 x 80
            net = layers.conv2d(net, 192, [3, 3], scope='Conv5_3x3')
            # 71 x 71 x 192
            net = layers.conv2d(net, 256, [3, 3], stride=2, scope='Conv6_3x3/2')
            # 35 x 35 x 256
            endpoints[scope] = net
            print('%s output shape: %s' % (scope, net.get_shape()))
    return net 
Example #28
Source File: inception_block.py    From VAE-GAN with MIT License 5 votes vote down vote up
def inception_v3_figure5_downsample(
	name, x, end_points, 
	act_fn, norm_fn, norm_params, winit_fn, 
	filters=[[64,96,96,], [384,],],
	is_avg_pooling=True):


	with tf.variable_scope(name):
		with tf.variable_scope('branch0'):
			branch_0 = tcl.conv2d(x, filters[0][0], 1, 
						stride=1, activation_fn=act_fn, normalizer_fn=norm_fn, normalizer_params=norm_params,
						padding='SAME', weights_initializer=winit_fn, scope='conv2d_0a_1x1')
			branch_0 = tcl.conv2d(branch_0, filters[0][1], 3,
						stride=1, activation_fn=act_fn, normalizer_fn=norm_fn, normalizer_params=norm_params,
						padding='SAME', weights_initializer=winit_fn, scope='conv2d_0b_3x3')
			branch_0 = tcl.conv2d(branch_0, filters[0][2], 3,
						stride=2, activation_fn=act_fn, normalizer_fn=norm_fn, normalizer_params=norm_params,
						padding='VALID', weights_initializer=winit_fn, scope='conv2d_0c_3x3')

		with tf.variable_scope('branch1'):
			branch_1 = tcl.conv2d(x, filters[1][0], 3, 
						stride=2, activation_fn=act_fn, normalizer_fn=norm_fn, normalizer_params=norm_params,
						padding='VALID', weights_initializer=winit_fn, scope='conv2d_1a_3x3')

		with tf.variable_scope('branch2'):
			if is_avg_pooling:
				branch_2 = tcl.avg_pool2d(x, 3, stride=2,
						padding='VALID', scope='avgpool_2a_3x3')
			else:
				branch_2 = tcl.max_pool2d(x, 3, stride=2, 
						padding='VALID', scope='maxpool_2a_3x3')

		x = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])

		end_points[name] = x
	return x, end_points 
Example #29
Source File: build_inception_v4.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _block_b_reduce_res(net, endpoints, ver=2, scope='BlockReduceB'):
    # 17 x 17 -> 8 x 8 reduce

    # configure branch filter numbers
    br3_num = 256
    br4_num = 256
    if ver == 1:
        br3_inc = 0
        br4_inc = 0
    else:
        br3_inc = 32
        br4_inc = 32

    with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'):
        with tf.variable_scope(scope):
            with tf.variable_scope('Br1_Pool'):
                br1 = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2')
            with tf.variable_scope('Br2_3x3'):
                br2 = layers.conv2d(net, 256, [1, 1], padding='SAME', scope='Conv1_1x1')
                br2 = layers.conv2d(br2, 384, [3, 3], stride=2, scope='Conv2_3x3/2')
            with tf.variable_scope('Br3_3x3'):
                br3 = layers.conv2d(net, br3_num, [1, 1], padding='SAME', scope='Conv1_1x1')
                br3 = layers.conv2d(br3, br3_num + br3_inc, [3, 3], stride=2, scope='Conv2_3x3/2')
            with tf.variable_scope('Br4_3x3Dbl'):
                br4 = layers.conv2d(net, br4_num, [1, 1], padding='SAME', scope='Conv1_1x1')
                br4 = layers.conv2d(br4, br4_num + 1*br4_inc, [3, 3], padding='SAME', scope='Conv2_3x3')
                br4 = layers.conv2d(br4, br4_num + 2*br4_inc, [3, 3], stride=2, scope='Conv3_3x3/2')
            net = tf.concat(3, [br1, br2, br3, br4], name='Concat1')
            # 8 x 8 x 1792 v1, 2144 v2 (paper indicates 2048 but only get this if we use a v1 config for this block)
            endpoints[scope] = net
            print('%s output shape: %s' % (scope, net.get_shape()))
    return net 
Example #30
Source File: base_network.py    From VAE-GAN with MIT License 5 votes vote down vote up
def maxpool2d(self, name, x, size, stride, padding='SAME', disp=True, collect_end_points=True):
		_padding = self.config.get(name + ' padding', padding)
		x = tcl.max_pool2d(x, size, stride=stride, padding=_padding, scope=name)
		if disp:
			print('\t\tMaxPool(' + str(name) + ') --> ', x.get_shape())
		if collect_end_points:
			self.end_points[name] = x
		return x