Python keras.layers.GlobalAveragePooling2D() Examples

The following are 30 code examples of keras.layers.GlobalAveragePooling2D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source Project: MobileNetV3   Author: xiaochus   File: mobilenet_base.py    License: MIT License 8 votes vote down vote up
def _squeeze(self, inputs):
        """Squeeze and Excitation.
        This function defines a squeeze structure.

        # Arguments
            inputs: Tensor, input tensor of conv layer.
        """
        input_channels = int(inputs.shape[-1])

        x = GlobalAveragePooling2D()(inputs)
        x = Dense(input_channels, activation='relu')(x)
        x = Dense(input_channels, activation='hard_sigmoid')(x)
        x = Reshape((1, 1, input_channels))(x)
        x = Multiply()([inputs, x])

        return x 
Example #2
Source Project: pygta5   Author: Sentdex   File: xception.py    License: GNU General Public License v3.0 6 votes vote down vote up
def get_model(session):

    # create the base pre-trained model
    base_model = Xception(weights=None, include_top=False, input_shape=(270, 480, 3))

    # add a global spatial average pooling layer
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # add a fully-connected layer
    x = Dense(1024, activation='relu')(x)
    # putput layer
    predictions = Dense(session.training_dataset_info['number_of_labels'], activation='softmax')(x)
    # model
    model = Model(inputs=base_model.input, outputs=predictions)

    learning_rate = 0.001
    opt = keras.optimizers.adam(lr=learning_rate, decay=1e-5)

    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    return model 
Example #3
Source Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning.py    License: MIT License 6 votes vote down vote up
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'):
		model = InceptionV3(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(5,activation='softmax')(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# ResNet50 Model for transfer Learning 
Example #4
Source Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning.py    License: MIT License 6 votes vote down vote up
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
		model = ResNet50(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(5,activation='softmax')(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# VGG16 Model for transfer Learning 
Example #5
Source Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_reg.py    License: MIT License 6 votes vote down vote up
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'):
		model = InceptionV3(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(1)(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# ResNet50 Model for transfer Learning 
Example #6
Source Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_reg.py    License: MIT License 6 votes vote down vote up
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
		model = ResNet50(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(1)(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# VGG16 Model for transfer Learning 
Example #7
Source Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_ffd.py    License: MIT License 6 votes vote down vote up
def inception_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
        model = InceptionV3(weights='imagenet',include_top=False)
        x = model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        out = Dense(5,activation='softmax')(x)
        model_final = Model(input = model.input,outputs=out)
        if full_freeze != 'N':
            for layer in model.layers[0:freeze_layers]:
                layer.trainable = False
        return model_final

# ResNet50 Model for transfer Learning 
Example #8
Source Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_ffd.py    License: MIT License 6 votes vote down vote up
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
        model = ResNet50(weights='imagenet',include_top=False)
        x = model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        out = Dense(5,activation='softmax')(x)
        model_final = Model(input = model.input,outputs=out)
        if full_freeze != 'N':
            for layer in model.layers[0:freeze_layers]:
                layer.trainable = False
        return model_final

# VGG16 Model for transfer Learning 
Example #9
Source Project: Keras-FasterRCNN   Author: you359   File: xception.py    License: MIT License 6 votes vote down vote up
def classifier_layers(x, input_shape, trainable=False):

    # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround
    # (hence a smaller stride in the region that follows the ROI pool)
    x = TimeDistributed(SeparableConv2D(1536, (3, 3),
                                        padding='same',
                                        use_bias=False),
                        name='block14_sepconv1')(x)
    x = TimeDistributed(BatchNormalization(), name='block14_sepconv1_bn')(x)
    x = Activation('relu', name='block14_sepconv1_act')(x)

    x = TimeDistributed(SeparableConv2D(2048, (3, 3),
                                        padding='same',
                                        use_bias=False),
                        name='block14_sepconv2')(x)
    x = TimeDistributed(BatchNormalization(), name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)

    TimeDistributed(GlobalAveragePooling2D(), name='avg_pool')(x)

    return x 
Example #10
Source Project: deep_architect   Author: negrinho   File: keras_ops.py    License: MIT License 5 votes vote down vote up
def global_pool2d():

    def compile_fn(di, dh):
        layer = layers.GlobalAveragePooling2D()

        def fn(di):
            return {'out': layer(di['in'])}

        return fn

    return siso_keras_module('GlobalAveragePool', compile_fn, {}) 
Example #11
Source Project: 12306   Author: testerSunshine   File: mlearn_for_image.py    License: MIT License 5 votes vote down vote up
def learn():
    (train_x, train_y, sample_weight), (test_x, test_y) = load_data()
    datagen = ImageDataGenerator(horizontal_flip=True,
                                 vertical_flip=True)
    train_generator = datagen.flow(train_x, train_y, sample_weight=sample_weight)
    base = VGG16(weights='imagenet', include_top=False, input_shape=(None, None, 3))
    for layer in base.layers[:-4]:
        layer.trainable = False
    model = models.Sequential([
        base,
        layers.BatchNormalization(),
        layers.Conv2D(64, (3, 3), activation='relu', padding='same'),
        layers.GlobalAveragePooling2D(),
        layers.BatchNormalization(),
        layers.Dense(64, activation='relu'),
        layers.BatchNormalization(),
        layers.Dropout(0.20),
        layers.Dense(80, activation='softmax')
    ])
    model.compile(optimizer=optimizers.RMSprop(lr=1e-5),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()
    reduce_lr = ReduceLROnPlateau(verbose=1)
    model.fit_generator(train_generator, epochs=400,
                        steps_per_epoch=100,
                        validation_data=(test_x[:800], test_y[:800]),
                        callbacks=[reduce_lr])
    result = model.evaluate(test_x, test_y)
    print(result)
    model.save('12306.image.model.h5', include_optimizer=False) 
Example #12
Source Project: keras-image-segmentation   Author: dhkim0225   File: pspnet.py    License: MIT License 5 votes vote down vote up
def _squeeze_excite_block(input, filters, k=1, name=None):
    init = input
    se_shape = (1, 1, filters * k) if K.image_data_format() == 'channels_last' else (filters * k, 1, 1)

    se = GlobalAveragePooling2D()(init)
    se = Reshape(se_shape)(se)
    se = Dense((filters * k) // 16, activation='relu', kernel_initializer='he_normal', use_bias=False,name=name+'_fc1')(se)
    se = Dense(filters * k, activation='sigmoid', kernel_initializer='he_normal', use_bias=False,name=name+'_fc2')(se)
    return se


# pyramid pooling function 
Example #13
Source Project: keras-squeeze-excite-network   Author: titu1994   File: se.py    License: MIT License 5 votes vote down vote up
def squeeze_excite_block(input, ratio=16):
    ''' Create a channel-wise squeeze-excite block

    Args:
        input: input tensor
        filters: number of output filters

    Returns: a keras tensor

    References
    -   [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = init._keras_shape[channel_axis]
    se_shape = (1, 1, filters)

    se = GlobalAveragePooling2D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
    se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)

    if K.image_data_format() == 'channels_first':
        se = Permute((3, 1, 2))(se)

    x = multiply([init, se])
    return x 
Example #14
Source Project: keras-squeeze-excite-network   Author: titu1994   File: se.py    License: MIT License 5 votes vote down vote up
def squeeze_excite_block(input_tensor, ratio=16):
    """ Create a channel-wise squeeze-excite block

    Args:
        input_tensor: input Keras tensor
        ratio: number of output filters

    Returns: a Keras tensor

    References
    -   [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
    """
    init = input_tensor
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = _tensor_shape(init)[channel_axis]
    se_shape = (1, 1, filters)

    se = GlobalAveragePooling2D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
    se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)

    if K.image_data_format() == 'channels_first':
        se = Permute((3, 1, 2))(se)

    x = multiply([init, se])
    return x 
Example #15
Source Project: neural-architecture-search   Author: titu1994   File: model.py    License: MIT License 5 votes vote down vote up
def model_fn(actions):
    # unpack the actions from the list
    kernel_1, filters_1, kernel_2, filters_2, kernel_3, filters_3, kernel_4, filters_4 = actions

    ip = Input(shape=(32, 32, 3))
    x = Conv2D(filters_1, (kernel_1, kernel_1), strides=(2, 2), padding='same', activation='relu')(ip)
    x = Conv2D(filters_2, (kernel_2, kernel_2), strides=(1, 1), padding='same', activation='relu')(x)
    x = Conv2D(filters_3, (kernel_3, kernel_3), strides=(2, 2), padding='same', activation='relu')(x)
    x = Conv2D(filters_4, (kernel_4, kernel_4), strides=(1, 1), padding='same', activation='relu')(x)
    x = GlobalAveragePooling2D()(x)
    x = Dense(10, activation='softmax')(x)

    model = Model(ip, x)
    return model 
Example #16
Source Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning.py    License: MIT License 5 votes vote down vote up
def VGG16_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
		model = VGG16(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(5,activation='softmax')(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final 
Example #17
Source Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_ffd.py    License: MIT License 5 votes vote down vote up
def VGG16_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
        model = VGG16(weights='imagenet',include_top=False)
        x = model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        out = Dense(5,activation='softmax')(x)
        model_final = Model(input = model.input,outputs=out)
        if full_freeze != 'N':
            for layer in model.layers[0:freeze_layers]:
                layer.trainable = False
        return model_final 
Example #18
Source Project: sesemi   Author: vuptran   File: utils.py    License: MIT License 5 votes vote down vote up
def compile_sesemi(network, input_shape, nb_classes,
                   lrate, in_network_dropout, super_dropout):
    weight_decay = 0.0005
    initer = initializers.glorot_uniform()

    fc_params = dict(
            use_bias=True,
            activation='softmax',
            kernel_initializer=initer,
            kernel_regularizer=l2(weight_decay),
        )

    cnn_trunk = network.create_network(input_shape, in_network_dropout)
    
    super_in = Input(shape=input_shape, name='super_data')
    self_in  = Input(shape=input_shape, name='self_data')
    super_out = cnn_trunk(super_in)
    self_out  = cnn_trunk(self_in)
    
    super_out = GlobalAveragePooling2D(name='super_gap')(super_out)
    self_out  = GlobalAveragePooling2D(name='self_gap')(self_out)
    if super_dropout > 0.0:
        super_out = Dropout(super_dropout, name='super_dropout')(super_out)
    
    super_out = Dense(nb_classes, name='super_clf', **fc_params)(super_out)
    self_out  = Dense(proxy_labels, name='self_clf', **fc_params)(self_out)

    sesemi_model = Model(inputs=[self_in, super_in],
                         outputs=[self_out, super_out])
    inference_model = Model(inputs=[super_in], outputs=[super_out])

    sgd = optimizers.SGD(lr=lrate, momentum=0.9, nesterov=True)
    sesemi_model.compile(optimizer=sgd,
                         loss={'super_clf': 'categorical_crossentropy',
                               'self_clf' : 'categorical_crossentropy'},
                         loss_weights={'super_clf': 1.0, 'self_clf': 1.0},
                         metrics=None)
    return sesemi_model, inference_model 
Example #19
Source Project: Aesthetic_attributes_maps   Author: gautamMalu   File: models.py    License: MIT License 5 votes vote down vote up
def squared_root_normalization(x):
    """
    Squared root normalization for convolution layers` output
    first apply global average pooling followed by squared root on all elements
    then l2 normalize the vector

    :param x: input tensor, output of convolution layer
    :return:
    """
    x = GlobalAveragePooling2D()(x)
    #output shape = (None, nc)
   # x = K.sqrt(x)
    #x = K.l2_normalize(x, axis=0)
    return x 
Example #20
Source Project: Aesthetic_attributes_maps   Author: gautamMalu   File: models.py    License: MIT License 5 votes vote down vote up
def model1(weights_path=None):
    '''
    Basic ResNet-FT for baseline comparisions.
    Creates a model by for all aesthetic attributes along
    with overall aesthetic score, by finetuning resnet50
    :param weights_path: path of the weight file
    :return: Keras model instance
    '''
    _input = Input(shape=(299, 299, 3))
    resnet = ResNet50(include_top=False, weights='imagenet', input_tensor=_input)

    last_layer_output = GlobalAveragePooling2D()(resnet.get_layer('activation_49').output)

    # output of model
    outputs = []
    attrs = ['BalacingElements', 'ColorHarmony', 'Content', 'DoF',
             'Light', 'MotionBlur', 'Object', 'RuleOfThirds', 'VividColor']
    for attribute in attrs:
        outputs.append(Dense(1, init='glorot_uniform', activation='tanh', name=attribute)(last_layer_output))

    non_negative_attrs = ['Repetition', 'Symmetry', 'score']
    for attribute in non_negative_attrs:
        outputs.append(Dense(1, init='glorot_uniform', activation='sigmoid', name=attribute)(last_layer_output))

    model = Model(input=_input, output=outputs)
    if weights_path:
        model.load_weights(weights_path)
    return model 
Example #21
Source Project: Keras-FasterRCNN   Author: you359   File: inception_resnet_v2.py    License: MIT License 5 votes vote down vote up
def classifier_layers(x, input_shape, trainable=False):

    # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround
    # (hence a smaller stride in the region that follows the ROI pool)

    channel_axis = 1 if K.image_data_format() == 'channels_first' else 4

    # Mixed 7a (Reduction-B block): 8 x 8 x 2080
    branch_0 = conv2d_bn_td(x, 256, 1, name='Reduction_B_block' + '_conv1')
    branch_0 = conv2d_bn_td(branch_0, 384, 3, strides=2, padding='valid', name='Reduction_B_block' + '_conv2')
    branch_1 = conv2d_bn_td(x, 256, 1, name='Reduction_B_block' + '_conv3')
    branch_1 = conv2d_bn_td(branch_1, 288, 3, strides=2, padding='valid', name='Reduction_B_block' + '_conv4')
    branch_2 = conv2d_bn_td(x, 256, 1, name='Reduction_B_block' + '_conv5')
    branch_2 = conv2d_bn_td(branch_2, 288, 3, name='Reduction_B_block' + '_conv6')
    branch_2 = conv2d_bn_td(branch_2, 320, 3, strides=2, padding='valid', name='Reduction_B_block' + '_conv7')
    branch_pool = TimeDistributed(MaxPooling2D(3, strides=2, padding='valid'))(x)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    x = Concatenate(axis=channel_axis, name='mixed_7a')(branches)

    # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
    for block_idx in range(1, 10):
        x = inception_resnet_block_td(x,
                                      scale=0.2,
                                      block_type='block8',
                                      block_idx=block_idx)
    x = inception_resnet_block_td(x,
                                  scale=1.,
                                  activation=None,
                                  block_type='block8',
                                  block_idx=10)

    # Final convolution block: 8 x 8 x 1536
    x = conv2d_bn_td(x, 1536, 1, name='conv_7b')

    TimeDistributed(GlobalAveragePooling2D(), name='avg_pool')(x)

    return x 
Example #22
Source Project: MAX-ResNet-50   Author: IBM   File: image_classification.py    License: Apache License 2.0 5 votes vote down vote up
def build_model(base_model, num_classes):
    x = base_model.output
    x = GlobalAveragePooling2D(name='Avg_pool_1')(x)
    x = Dense(1024, activation='relu', name='dense_one')(x)
    x = Dense(1024, activation='relu', name='dense_two')(x)
    x = Dense(512, activation='relu', name='dense_three')(x)
    x = Dense(num_classes, activation='softmax', name='main_output')(x)
    return x 
Example #23
def get_model_tocompiled(base_model, num_class):
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.5)(x)
    predictions = Dense(num_class, activation='softmax', name='predictions')(x)
    clf = Model(base_model.input, predictions)
    return clf 
Example #24
Source Project: CBAM-keras   Author: kobiso   File: attention_module.py    License: MIT License 5 votes vote down vote up
def se_block(input_feature, ratio=8):
	"""Contains the implementation of Squeeze-and-Excitation(SE) block.
	As described in https://arxiv.org/abs/1709.01507.
	"""
	
	channel_axis = 1 if K.image_data_format() == "channels_first" else -1
	channel = input_feature._keras_shape[channel_axis]

	se_feature = GlobalAveragePooling2D()(input_feature)
	se_feature = Reshape((1, 1, channel))(se_feature)
	assert se_feature._keras_shape[1:] == (1,1,channel)
	se_feature = Dense(channel // ratio,
					   activation='relu',
					   kernel_initializer='he_normal',
					   use_bias=True,
					   bias_initializer='zeros')(se_feature)
	assert se_feature._keras_shape[1:] == (1,1,channel//ratio)
	se_feature = Dense(channel,
					   activation='sigmoid',
					   kernel_initializer='he_normal',
					   use_bias=True,
					   bias_initializer='zeros')(se_feature)
	assert se_feature._keras_shape[1:] == (1,1,channel)
	if K.image_data_format() == 'channels_first':
		se_feature = Permute((3, 1, 2))(se_feature)

	se_feature = multiply([input_feature, se_feature])
	return se_feature 
Example #25
Source Project: CBAM-keras   Author: kobiso   File: attention_module.py    License: MIT License 5 votes vote down vote up
def channel_attention(input_feature, ratio=8):
	
	channel_axis = 1 if K.image_data_format() == "channels_first" else -1
	channel = input_feature._keras_shape[channel_axis]
	
	shared_layer_one = Dense(channel//ratio,
							 activation='relu',
							 kernel_initializer='he_normal',
							 use_bias=True,
							 bias_initializer='zeros')
	shared_layer_two = Dense(channel,
							 kernel_initializer='he_normal',
							 use_bias=True,
							 bias_initializer='zeros')
	
	avg_pool = GlobalAveragePooling2D()(input_feature)    
	avg_pool = Reshape((1,1,channel))(avg_pool)
	assert avg_pool._keras_shape[1:] == (1,1,channel)
	avg_pool = shared_layer_one(avg_pool)
	assert avg_pool._keras_shape[1:] == (1,1,channel//ratio)
	avg_pool = shared_layer_two(avg_pool)
	assert avg_pool._keras_shape[1:] == (1,1,channel)
	
	max_pool = GlobalMaxPooling2D()(input_feature)
	max_pool = Reshape((1,1,channel))(max_pool)
	assert max_pool._keras_shape[1:] == (1,1,channel)
	max_pool = shared_layer_one(max_pool)
	assert max_pool._keras_shape[1:] == (1,1,channel//ratio)
	max_pool = shared_layer_two(max_pool)
	assert max_pool._keras_shape[1:] == (1,1,channel)
	
	cbam_feature = Add()([avg_pool,max_pool])
	cbam_feature = Activation('sigmoid')(cbam_feature)
	
	if K.image_data_format() == "channels_first":
		cbam_feature = Permute((3, 1, 2))(cbam_feature)
	
	return multiply([input_feature, cbam_feature]) 
Example #26
Source Project: CBAM-keras   Author: kobiso   File: attention_module-checkpoint.py    License: MIT License 5 votes vote down vote up
def se_block(input_feature, ratio=8):
	"""Contains the implementation of Squeeze-and-Excitation(SE) block.
	As described in https://arxiv.org/abs/1709.01507.
	"""
	
	channel_axis = 1 if K.image_data_format() == "channels_first" else -1
	channel = input_feature._keras_shape[channel_axis]

	se_feature = GlobalAveragePooling2D()(input_feature)
	se_feature = Reshape((1, 1, channel))(se_feature)
	assert se_feature._keras_shape[1:] == (1,1,channel)
	se_feature = Dense(channel // ratio,
					   activation='relu',
					   kernel_initializer='he_normal',
					   use_bias=True,
					   bias_initializer='zeros')(se_feature)
	assert se_feature._keras_shape[1:] == (1,1,channel//ratio)
	se_feature = Dense(channel,
					   activation='sigmoid',
					   kernel_initializer='he_normal',
					   use_bias=True,
					   bias_initializer='zeros')(se_feature)
	assert se_feature._keras_shape[1:] == (1,1,channel)
	if K.image_data_format() == 'channels_first':
		se_feature = Permute((3, 1, 2))(se_feature)

	se_feature = multiply([input_feature, se_feature])
	return se_feature 
Example #27
Source Project: CBAM-keras   Author: kobiso   File: attention_module-checkpoint.py    License: MIT License 5 votes vote down vote up
def channel_attention(input_feature, ratio=8):
	
	channel_axis = 1 if K.image_data_format() == "channels_first" else -1
	channel = input_feature._keras_shape[channel_axis]
	
	shared_layer_one = Dense(channel//ratio,
							 activation='relu',
							 kernel_initializer='he_normal',
							 use_bias=True,
							 bias_initializer='zeros')
	shared_layer_two = Dense(channel,
							 kernel_initializer='he_normal',
							 use_bias=True,
							 bias_initializer='zeros')
	
	avg_pool = GlobalAveragePooling2D()(input_feature)    
	avg_pool = Reshape((1,1,channel))(avg_pool)
	assert avg_pool._keras_shape[1:] == (1,1,channel)
	avg_pool = shared_layer_one(avg_pool)
	assert avg_pool._keras_shape[1:] == (1,1,channel//ratio)
	avg_pool = shared_layer_two(avg_pool)
	assert avg_pool._keras_shape[1:] == (1,1,channel)
	
	max_pool = GlobalMaxPooling2D()(input_feature)
	max_pool = Reshape((1,1,channel))(max_pool)
	assert max_pool._keras_shape[1:] == (1,1,channel)
	max_pool = shared_layer_one(max_pool)
	assert max_pool._keras_shape[1:] == (1,1,channel//ratio)
	max_pool = shared_layer_two(max_pool)
	assert max_pool._keras_shape[1:] == (1,1,channel)
	
	cbam_feature = Add()([avg_pool,max_pool])
	cbam_feature = Activation('sigmoid')(cbam_feature)
	
	if K.image_data_format() == "channels_first":
		cbam_feature = Permute((3, 1, 2))(cbam_feature)
	
	return multiply([input_feature, cbam_feature]) 
Example #28
Source Project: kaggle-rsna18   Author: i-pan   File: TrainClassifierEnsemble.py    License: MIT License 5 votes vote down vote up
def get_model(base_model, 
              layer, 
              lr=1e-3, 
              input_shape=(224,224,1), 
              classes=2,
              activation="softmax",
              dropout=None, 
              pooling="avg", 
              weights=None,
              pretrained="imagenet"): 
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained, 
                      channels="gray") 
    if pooling == "avg": 
        x = GlobalAveragePooling2D()(base.output) 
    elif pooling == "max": 
        x = GlobalMaxPooling2D()(base.output) 
    elif pooling is None: 
        x = Flatten()(base.output) 
    if dropout is not None: 
        x = Dropout(dropout)(x) 
    x = Dense(classes, activation=activation)(x) 
    model = Model(inputs=base.input, outputs=x) 
    if weights is not None: 
        model.load_weights(weights) 
    for l in model.layers[:layer]:
        l.trainable = False 
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], 
                  optimizer=optimizers.Adam(lr)) 
    return model

##########
## DATA ##
##########

# == PREPROCESSING == # 
Example #29
Source Project: kaggle-rsna18   Author: i-pan   File: TrainOneClassifier.py    License: MIT License 5 votes vote down vote up
def get_model(base_model, 
              layer, 
              lr=1e-3, 
              input_shape=(224,224,1), 
              classes=2,
              activation="softmax",
              dropout=None, 
              pooling="avg", 
              weights=None,
              pretrained="imagenet"): 
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained, 
                      channels="gray") 
    if pooling == "avg": 
        x = GlobalAveragePooling2D()(base.output) 
    elif pooling == "max": 
        x = GlobalMaxPooling2D()(base.output) 
    elif pooling is None: 
        x = Flatten()(base.output) 
    if dropout is not None: 
        x = Dropout(dropout)(x) 
    x = Dense(classes, activation=activation)(x) 
    model = Model(inputs=base.input, outputs=x) 
    if weights is not None: 
        model.load_weights(weights) 
    for l in model.layers[:layer]:
        l.trainable = False 
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], 
                  optimizer=optimizers.Adam(lr)) 
    return model

##########
## DATA ##
##########

# == PREPROCESSING == # 
Example #30
Source Project: kaggle-rsna18   Author: i-pan   File: PredictOneClassifier.py    License: MIT License 5 votes vote down vote up
def get_model(base_model, 
              layer, 
              lr=1e-3, 
              input_shape=(224,224,1), 
              classes=2,
              activation="softmax",
              dropout=None, 
              pooling="avg", 
              weights=None,
              pretrained=None): 
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained, 
                      channels="gray") 
    if pooling == "avg": 
        x = GlobalAveragePooling2D()(base.output) 
    elif pooling == "max": 
        x = GlobalMaxPooling2D()(base.output) 
    elif pooling is None: 
        x = Flatten()(base.output) 
    if dropout is not None: 
        x = Dropout(dropout)(x) 
    x = Dense(classes, activation=activation)(x) 
    model = Model(inputs=base.input, outputs=x) 
    if weights is not None: 
        model.load_weights(weights) 
    for l in model.layers[:layer]:
        l.trainable = False 
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], 
                  optimizer=optimizers.Adam(lr)) 
    return model

##########
## DATA ##
##########

# == PREPROCESSING == #