Python tensorflow.keras.layers.GlobalAveragePooling2D() Examples

The following are 30 code examples of tensorflow.keras.layers.GlobalAveragePooling2D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.layers , or try the search function .
Example #1
Source File: osnet.py    From keras_imagenet with MIT License 7 votes vote down vote up
def get_aggregation_gate(in_filters, reduction=16):
    """Get the "aggregation gate (AG)" op.

    # Arguments
        reduction: channel reduction for the hidden layer.

    # Returns
        The AG op (a models.Sequential module).
    """
    gate = models.Sequential()
    gate.add(layers.GlobalAveragePooling2D())
    gate.add(layers.Dense(in_filters // reduction, use_bias=False))
    gate.add(layers.BatchNormalization())
    gate.add(layers.Activation('relu'))
    gate.add(layers.Dense(in_filters))
    gate.add(layers.Activation('sigmoid'))
    gate.add(layers.Reshape((1, 1, -1)))  # reshape as (H, W, C)
    return gate 
Example #2
Source File: yolo3_nano.py    From keras-YOLOv3-model-set with MIT License 6 votes vote down vote up
def _fca_block(inputs, reduct_ratio, block_id):
    in_channels = inputs.shape.as_list()[-1]
    #in_shapes = inputs.shape.as_list()[1:3]
    reduct_channels = int(in_channels // reduct_ratio)
    prefix = 'fca_block_{}_'.format(block_id)
    x = GlobalAveragePooling2D(name=prefix + 'average_pooling')(inputs)
    x = Dense(reduct_channels, activation='relu', name=prefix + 'fc1')(x)
    x = Dense(in_channels, activation='sigmoid', name=prefix + 'fc2')(x)

    x = Reshape((1,1,in_channels),name='reshape')(x)
    x = Multiply(name=prefix + 'multiply')([x, inputs])
    return x 
Example #3
Source File: densenet121_resisc45.py    From armory with MIT License 6 votes vote down vote up
def make_densenet121_resisc_model(**model_kwargs) -> tf.keras.Model:
    # Load ImageNet pre-trained DenseNet
    model_notop = DenseNet121(
        include_top=False, weights=None, input_shape=(224, 224, 3)
    )

    # Add new layers
    x = GlobalAveragePooling2D()(model_notop.output)
    predictions = Dense(num_classes, activation="softmax")(x)

    # Create graph of new model and freeze pre-trained layers
    new_model = Model(inputs=model_notop.input, outputs=predictions)

    for layer in new_model.layers[:-1]:
        layer.trainable = False
        if "bn" == layer.name[-2:]:  # allow batchnorm layers to be trainable
            layer.trainable = True

    # compile the model
    new_model.compile(
        optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
    )

    return new_model 
Example #4
Source File: SE.py    From TF.Keras-Commonly-used-models with Apache License 2.0 6 votes vote down vote up
def squeeze_excite_block(input, ratio=16):
    ''' Create a channel-wise squeeze-excite block
    Args:
        input: input tensor
        filters: number of output filters
    Returns: a keras tensor
    References
    -   [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
    '''
    init = input
    filters = init._keras_shape[1]
    se_shape = (1, 1, filters)
    se = GlobalAveragePooling2D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
    se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
    x = multiply([init, se])
    return x 
Example #5
Source File: bisenet.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 in_size,
                 data_format="channels_last",
                 **kwargs):
        super(PyramidPoolingZeroBranch, self).__init__(**kwargs)
        self.in_size = in_size
        self.data_format = data_format

        self.pool = nn.GlobalAveragePooling2D(
            data_format=data_format,
            name="pool")
        self.conv = conv1x1_block(
            in_channels=in_channels,
            out_channels=out_channels,
            data_format=data_format,
            name="conv")
        self.up = InterpolationBlock(
            scale_factor=None,
            interpolation="bilinear",
            data_format=data_format,
            name="up") 
Example #6
Source File: bisenet.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 data_format="channels_last",
                 **kwargs):
        super(AttentionRefinementBlock, self).__init__(**kwargs)
        self.data_format = data_format

        self.conv1 = conv3x3_block(
            in_channels=in_channels,
            out_channels=out_channels,
            data_format=data_format,
            name="conv1")
        self.pool = nn.GlobalAveragePooling2D(
            data_format=data_format,
            name="pool")
        self.conv2 = conv1x1_block(
            in_channels=out_channels,
            out_channels=out_channels,
            activation="sigmoid",
            data_format=data_format,
            name="conv2") 
Example #7
Source File: cnn_cifar_affinity.py    From affinity-loss with MIT License 6 votes vote down vote up
def create_models():
    input = layers.Input((32,32,3))
    x = input
    for i in range(3):
        x = conv_bn_relu(x, 64)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 128)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 256)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.BatchNormalization()(x)
    x = ClusteringAffinity(10, 1, 90.0)(x)

    # To calculate the regularization term, output n_dimensions is 1 more. 
    # Please ignore it at predict time

    return Model(input, x) 
Example #8
Source File: train.py    From object-localization with MIT License 6 votes vote down vote up
def create_model(trainable=False):
    model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA)

    # to freeze layers
    for layer in model.layers:
        layer.trainable = trainable

    out = model.layers[-1].output

    x = Conv2D(4, kernel_size=3)(out)
    x = Reshape((4,), name="coords")(x)

    y = GlobalAveragePooling2D()(out)
    y = Dense(CLASSES, name="classes", activation="softmax")(y)

    return Model(inputs=model.input, outputs=[x, y]) 
Example #9
Source File: mobilenet_v3.py    From keras-YOLOv3-model-set with MIT License 6 votes vote down vote up
def _se_block(inputs, filters, se_ratio, prefix):
    x = GlobalAveragePooling2D(name=prefix + 'squeeze_excite/AvgPool')(inputs)
    if K.image_data_format() == 'channels_first':
        x = Reshape((filters, 1, 1))(x)
    else:
        x = Reshape((1, 1, filters))(x)
    x = Conv2D(_depth(filters * se_ratio),
                      kernel_size=1,
                      padding='same',
                      name=prefix + 'squeeze_excite/Conv')(x)
    x = ReLU(name=prefix + 'squeeze_excite/Relu')(x)
    x = Conv2D(filters,
                      kernel_size=1,
                      padding='same',
                      name=prefix + 'squeeze_excite/Conv_1')(x)
    x = Activation(hard_sigmoid)(x)
    #if K.backend() == 'theano':
        ## For the Theano backend, we have to explicitly make
        ## the excitation weights broadcastable.
        #x = Lambda(
            #lambda br: K.pattern_broadcast(br, [True, True, True, False]),
            #output_shape=lambda input_shape: input_shape,
            #name=prefix + 'squeeze_excite/broadcast')(x)
    x = Multiply(name=prefix + 'squeeze_excite/Mul')([inputs, x])
    return x 
Example #10
Source File: cnn_cifar_optuna_affinity.py    From affinity-loss with MIT License 6 votes vote down vote up
def create_models(sigma, m):
    input = layers.Input((32,32,3))
    x = input
    for i in range(3):
        x = conv_bn_relu(x, 64)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 128)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 256)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.BatchNormalization()(x)
    x = ClusteringAffinity(10, m, sigma)(x)

    return Model(input, x) 
Example #11
Source File: horde_models.py    From ICCV2019-Horde with MIT License 6 votes vote down vote up
def KOrderModel(extractor_name,
                embedding_sizes,
                high_order_dims,
                ho_trainable=False,
                end_layer=None):
    model = get_extractor(extractor_name, end_layer=end_layer)
    inputs = model.input
    x = model.output

    max_order = len(high_order_dims)
    output_list = [x]

    # Add all high-order approximation layers:
    for k, order_dim in enumerate(high_order_dims, start=2):
        x_ho = CKOP(output_dim=order_dim, name='CKOP_' + str(k), ho_trainable=ho_trainable)([x] * k)
        output_list.append(x_ho)

    # Add pooling and embedding layers:
    for k in range(len(output_list)):
        output_list[k] = GlobalAveragePooling2D(name='GAP_' + extractor_name + '_O' + str(k + 1))(output_list[k])
        if embedding_sizes[k] > 0:
            output_list[k] = Dense(embedding_sizes[k], use_bias=False)(output_list[k])
        output_list[k] = L2Normalisation(name='L2_' + extractor_name + '_O' + str(k + 1))(output_list[k])

    return Model(inputs=inputs, outputs=output_list, name=extractor_name + '_O' + str(max_order)), get_preprocess_method(extractor_name) 
Example #12
Source File: mobilenet_v3_small.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def build(self, plot=False):
        """build MobileNetV3 Small.
        # Arguments
            plot: Boolean, weather to plot model.
        # Returns
            model: Model, model.
        """
        inputs = Input(shape=self.shape)

        x = self._conv_block(inputs, 16, (3, 3), strides=(2, 2), nl='HS')

        x = self._bottleneck(x, 16, (3, 3), e=16, s=2, squeeze=True, nl='RE')
        x = self._bottleneck(x, 24, (3, 3), e=72, s=2, squeeze=False, nl='RE')
        x = self._bottleneck(x, 24, (3, 3), e=88, s=1, squeeze=False, nl='RE')
        x = self._bottleneck(x, 40, (5, 5), e=96, s=2, squeeze=True, nl='HS')
        x = self._bottleneck(x, 40, (5, 5), e=240, s=1, squeeze=True, nl='HS')
        x = self._bottleneck(x, 40, (5, 5), e=240, s=1, squeeze=True, nl='HS')
        x = self._bottleneck(x, 48, (5, 5), e=120, s=1, squeeze=True, nl='HS')
        x = self._bottleneck(x, 48, (5, 5), e=144, s=1, squeeze=True, nl='HS')
        x = self._bottleneck(x, 96, (5, 5), e=288, s=2, squeeze=True, nl='HS')
        x = self._bottleneck(x, 96, (5, 5), e=576, s=1, squeeze=True, nl='HS')
        x = self._bottleneck(x, 96, (5, 5), e=576, s=1, squeeze=True, nl='HS')

        x = self._conv_block(x, 576, (1, 1), strides=(1, 1), nl='HS')
        x = GlobalAveragePooling2D()(x)
        x = Reshape((1, 1, 576))(x)

        x = Conv2D(1280, (1, 1), padding='same')(x)
        x = self._return_activation(x, 'HS')
        x = Conv2D(self.n_class, (1, 1), padding='same', activation='softmax')(x)

        output = Reshape((self.n_class,))(x)

        model = Model(inputs, output)

        #if plot:
        #    plot_model(model, to_file='images/MobileNetv3_small.png', show_shapes=True)

        return model 
Example #13
Source File: yolo2_darknet.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def darknet19(inputs):
    """Generate Darknet-19 model for Imagenet classification."""
    body = darknet19_body()(inputs)
    x = DarknetConv2D(1000, (1, 1))(body)
    x = GlobalAveragePooling2D()(x)
    logits = Softmax()(x)
    return Model(inputs, logits) 
Example #14
Source File: yamnet.py    From models with Apache License 2.0 5 votes vote down vote up
def yamnet(features):
  """Define the core YAMNet mode in Keras."""
  net = layers.Reshape(
    (params.PATCH_FRAMES, params.PATCH_BANDS, 1),
    input_shape=(params.PATCH_FRAMES, params.PATCH_BANDS))(features)
  for (i, (layer_fun, kernel, stride, filters)) in enumerate(_YAMNET_LAYER_DEFS):
    net = layer_fun('layer{}'.format(i + 1), kernel, stride, filters)(net)
  net = layers.GlobalAveragePooling2D()(net)
  logits = layers.Dense(units=params.NUM_CLASSES, use_bias=True)(net)
  predictions = layers.Activation(
    name=params.EXAMPLE_PREDICTIONS_LAYER_NAME,
    activation=params.CLASSIFIER_ACTIVATION)(logits)
  return predictions 
Example #15
Source File: SEResNeXt.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def squeeze_excitation_layer(self, x, out_dim):
        '''
        SE module performs inter-channel weighting.
        '''
        squeeze = GlobalAveragePooling2D()(x)
        
        excitation = Dense(units=out_dim // self.ratio)(squeeze)
        excitation = self.activation(excitation)
        excitation = Dense(units=out_dim)(excitation)
        excitation = self.activation(excitation, 'sigmoid')
        excitation = Reshape((1,1,out_dim))(excitation)
        
        scale = multiply([x,excitation])
        
        return scale 
Example #16
Source File: SEResNeXt.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def build_model(self, inputs, num_classes):
        '''
        Build a SENet model.
        '''
        x = self.initial_layer(inputs)
        
        x = self.residual_layer(x, out_dim=64)
        x = self.residual_layer(x, out_dim=128)
        x = self.residual_layer(x, out_dim=256)
        
        x = GlobalAveragePooling2D()(x)
        x = Dense(units=num_classes, activation='softmax')(x)
        
        return Model(inputs, x) 
Example #17
Source File: layers.py    From thundernet-tensorflow2.0 with MIT License 5 votes vote down vote up
def __init__(self, num_classes, first_channel=24, channels_per_stage=(132, 264, 528)):
        super(ShuffleNetv2, self).__init__(name="ShuffleNetv2")

        self.num_classes = num_classes

        self.conv1_bn_relu = Conv2D_BN_ReLU(first_channel, 3, 2)
        self.pool1 = MaxPool2D(3, strides=2, padding="SAME")
        self.stage2 = ShufflenetStage(first_channel, channels_per_stage[0], 4)
        self.stage3 = ShufflenetStage(channels_per_stage[0], channels_per_stage[1], 8)
        self.stage4 = ShufflenetStage(channels_per_stage[1], channels_per_stage[2], 4)
        #self.conv5_bn_relu = Conv2D_BN_ReLU(1024, 1, 1)
        self.gap = GlobalAveragePooling2D()
        self.linear = Dense(num_classes) 
Example #18
Source File: mobilenet_base.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def _squeeze(self, inputs):
        """Squeeze and Excitation.
        This function defines a squeeze structure.
        # Arguments
            inputs: Tensor, input tensor of conv layer.
        """
        input_channels = int(inputs.shape[-1])

        x = GlobalAveragePooling2D()(inputs)
        x = Dense(input_channels, activation='relu')(x)
        x = Dense(input_channels, activation='hard_sigmoid')(x)
        x = Reshape((1, 1, input_channels))(x)

        return x 
Example #19
Source File: se.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def squeeze_excite_block(input, ratio=16):
    ''' Create a channel-wise squeeze-excite block

    Args:
        input: input tensor
        filters: number of output filters

    Returns: a keras tensor

    References
    -   [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = init._keras_shape[channel_axis]
    se_shape = (1, 1, filters)

    se = GlobalAveragePooling2D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
    se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)

    if K.image_data_format() == 'channels_first':
        se = Permute((3, 1, 2))(se)

    x = multiply([init, se])
    return x 
Example #20
Source File: Darknet53.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def darknet():
    """Darknet-53 classifier.
    """
    inputs = Input(shape=(416, 416, 3))
    x = darknet_base(inputs)

    x = GlobalAveragePooling2D()(x)
    x = Dense(1000, activation='softmax')(x)

    model = Model(inputs, x)

    return model 
Example #21
Source File: chemnet_models.py    From deepchem with MIT License 5 votes vote down vote up
def _build_graph(self):
    smile_images = Input(shape=self.input_shape)
    stem = chemnet_layers.Stem(self.base_filters)(smile_images)

    inceptionA_out = self.build_inception_module(inputs=stem, type="A")
    reductionA_out = chemnet_layers.ReductionA(
        self.base_filters)(inceptionA_out)

    inceptionB_out = self.build_inception_module(
        inputs=reductionA_out, type="B")
    reductionB_out = chemnet_layers.ReductionB(
        self.base_filters)(inceptionB_out)

    inceptionC_out = self.build_inception_module(
        inputs=reductionB_out, type="C")
    avg_pooling_out = GlobalAveragePooling2D()(inceptionC_out)

    if self.mode == "classification":
      logits = Dense(self.n_tasks * self.n_classes)(avg_pooling_out)
      logits = Reshape((self.n_tasks, self.n_classes))(logits)
      if self.n_classes == 2:
        output = Activation(activation='sigmoid')(logits)
        loss = SigmoidCrossEntropy()
      else:
        output = Softmax()(logits)
        loss = SoftmaxCrossEntropy()
      outputs = [output, logits]
      output_types = ['prediction', 'loss']

    else:
      output = Dense(self.n_tasks * 1)(avg_pooling_out)
      output = Reshape((self.n_tasks, 1))(output)
      outputs = [output]
      output_types = ['prediction']
      loss = L2Loss()

    model = tf.keras.Model(inputs=[smile_images], outputs=outputs)
    return model, loss, output_types 
Example #22
Source File: squeeze_excitation.py    From DeepPoseKit with Apache License 2.0 5 votes vote down vote up
def channel_squeeze_excite_block(input, ratio=0.25):
    init = input
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = init._keras_shape[channel_axis]
    cse_shape = (1, 1, filters)

    cse = layers.GlobalAveragePooling2D()(init)
    cse = layers.Reshape(cse_shape)(cse)
    ratio_filters = int(np.round(filters * ratio))
    if ratio_filters < 1:
        ratio_filters += 1
    cse = layers.Conv2D(
        ratio_filters,
        (1, 1),
        padding="same",
        activation="relu",
        kernel_initializer="he_normal",
        use_bias=False,
    )(cse)
    cse = layers.BatchNormalization()(cse)
    cse = layers.Conv2D(
        filters,
        (1, 1),
        activation="sigmoid",
        kernel_initializer="he_normal",
        use_bias=False,
    )(cse)

    if K.image_data_format() == "channels_first":
        cse = layers.Permute((3, 1, 2))(cse)

    cse = layers.Multiply()([init, cse])
    return cse 
Example #23
Source File: cnn_cifar_softmax.py    From affinity-loss with MIT License 5 votes vote down vote up
def create_models():
    input = layers.Input((32,32,3))
    x = input
    for i in range(3):
        x = conv_bn_relu(x, 64)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 128)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 256)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(10, activation="softmax")(x)

    return Model(input, x) 
Example #24
Source File: horde_models.py    From ICCV2019-Horde with MIT License 5 votes vote down vote up
def CascadedABE(embedding_size,
                high_order_dims,
                features_reduction=256,
                ho_trainable=True,
                n_head=8):
    model, preprocess_method = ABE(embedding_size[0], n_head=8)
    inp = model.input
    multi_head_out = [model.get_layer(name='inception_5b/output').get_output_at(k) for k in range(n_head)]
    concat = Concatenate()(multi_head_out)  # Nx H x W x n_ensemble*1024

    if features_reduction is not None:
        concat = Conv2D(filters=features_reduction,
                        kernel_size=(1, 1),
                        use_bias=False)(concat)

    output_list = [concat]

    # Add all high-order approximation layers:
    for k, order_dim in enumerate(high_order_dims, start=2):
        only_project_second = False if k == 2 else True
        x_ho = PKOB(order_dim,
                    only_project_second=only_project_second,
                    ho_trainable=ho_trainable)([output_list[-1], concat])
        output_list.append(x_ho)

    # Add pooling and embedding layers:
    for k in range(1, len(output_list)):
        output_list[k] = GlobalAveragePooling2D(name='GAP_O' + str(k + 1))(output_list[k])
        if ho_trainable:
            output_list[k] = Dense(embedding_size[k],
                                   use_bias=False,
                                   name='Proj_O' + str(k + 1))(output_list[k])
        output_list[k] = L2Normalisation(name='L2_O' + str(k + 1))(output_list[k])

    # Finally we replace the first order by the true model:
    output_list[0] = model.get_layer(name='ABE'+str(n_head)).output
    return Model(inp, output_list, name='ABE'+str(n_head)+'_O'+str(len(embedding_size))), preprocess_method 
Example #25
Source File: horde_models.py    From ICCV2019-Horde with MIT License 5 votes vote down vote up
def CascadedKOrder(extractor_name,
                   embedding_sizes,
                   high_order_dims,
                   ho_trainable=True,
                   end_layer=None):
    model = get_extractor(extractor_name, end_layer=end_layer)
    inputs = model.input
    x = model.output

    max_order = len(high_order_dims)
    output_list = [x]

    # Add all high-order approximation layers:
    for k, order_dim in enumerate(high_order_dims, start=2):
        only_project_second = False if k == 2 else True
        x_ho = PKOB(order_dim,
                    only_project_second=only_project_second,
                    ho_trainable=ho_trainable)([output_list[-1], x])
        output_list.append(x_ho)

    # Add pooling and embedding layers:
    for k in range(len(output_list)):
        output_list[k] = GlobalAveragePooling2D(name='GAP_' + extractor_name + '_O' + str(k + 1))(output_list[k])

        if ho_trainable:
            output_list[k] = Dense(embedding_sizes[k],
                                   use_bias=False,
                                   name='Proj_' + extractor_name + '_O' + str(k + 1))(output_list[k])
        elif k == 0:
            output_list[k] = Dense(embedding_sizes[k],
                                   use_bias=False,
                                   name='Proj_' + extractor_name + '_O' + str(k + 1))(output_list[k])

        output_list[k] = L2Normalisation(name='L2_' + extractor_name + '_O' + str(k + 1))(output_list[k])

    return Model(inputs=inputs, outputs=output_list, name=extractor_name + '_O' + str(max_order)), get_preprocess_method(extractor_name) 
Example #26
Source File: dml_models.py    From ICCV2019-Horde with MIT License 5 votes vote down vote up
def Baseline(extractor_name,
             embedding_size,
             end_layer=None):
    model = get_extractor(extractor_name, end_layer=end_layer)
    inputs = model.input
    x = model.output

    x = Conv2D(embedding_size, (1, 1), use_bias=False, name='Embedding')(x)
    x = GlobalAveragePooling2D(name='GAP')(x)
    x = L2Normalisation(name='L2')(x)

    return Model(inputs=inputs, outputs=x, name=extractor_name), get_preprocess_method(extractor_name) 
Example #27
Source File: convert_test.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def _keras_global_avgpool_core(shape=None, data=None):
    assert shape is None or data is None
    if shape is None:
        shape = data.shape

    model = Sequential()
    layer = GlobalAveragePooling2D(input_shape=shape[1:], data_format="channels_last")
    model.add(layer)

    if data is None:
        data = np.random.uniform(size=shape)
    out = model.predict(data)
    return model, out 
Example #28
Source File: bisenet.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 reduction=4,
                 data_format="channels_last",
                 **kwargs):
        super(FeatureFusion, self).__init__(**kwargs)
        self.data_format = data_format
        mid_channels = out_channels // reduction

        self.conv_merge = conv1x1_block(
            in_channels=in_channels,
            out_channels=out_channels,
            data_format=data_format,
            name="conv_merge")
        self.pool = nn.GlobalAveragePooling2D(
            data_format=data_format,
            name="pool")
        self.conv1 = conv1x1(
            in_channels=out_channels,
            out_channels=mid_channels,
            data_format=data_format,
            name="conv1")
        self.activ = nn.ReLU()
        self.conv2 = conv1x1(
            in_channels=mid_channels,
            out_channels=out_channels,
            data_format=data_format,
            name="conv2")
        self.sigmoid = tf.nn.sigmoid 
Example #29
Source File: sinet.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 reduction=16,
                 round_mid=False,
                 mid_activation="relu",
                 out_activation="sigmoid",
                 data_format="channels_last",
                 **kwargs):
        super(SEBlock, self).__init__(**kwargs)
        self.data_format = data_format
        self.use_conv2 = (reduction > 1)
        mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)

        self.pool = nn.GlobalAveragePooling2D(
            data_format=data_format,
            name="pool")
        self.fc1 = nn.Dense(
            units=mid_channels,
            input_dim=channels,
            name="fc1")
        if self.use_conv2:
            self.activ = get_activation_layer(mid_activation, name="activ")
            self.fc2 = nn.Dense(
                units=channels,
                input_dim=mid_channels,
                name="fc2")
        self.sigmoid = get_activation_layer(out_activation, name="sigmoid") 
Example #30
Source File: se.py    From keras-squeeze-excite-network with MIT License 5 votes vote down vote up
def squeeze_excite_block(input_tensor, ratio=16):
    """ Create a channel-wise squeeze-excite block

    Args:
        input_tensor: input Keras tensor
        ratio: number of output filters

    Returns: a Keras tensor

    References
    -   [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
    """
    init = input_tensor
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = _tensor_shape(init)[channel_axis]
    se_shape = (1, 1, filters)

    se = GlobalAveragePooling2D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
    se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)

    if K.image_data_format() == 'channels_first':
        se = Permute((3, 1, 2))(se)

    x = multiply([init, se])
    return x