Python keras.layers.GlobalAveragePooling3D() Examples

The following are 11 code examples of keras.layers.GlobalAveragePooling3D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source Project: CNNArt   Author: thomaskuestner   File: squeeze_excitation_block.py    License: Apache License 2.0 5 votes vote down vote up
def squeeze_excitation_block_3D(inputSE, ratio=16):
    '''
    Creates a squeeze and excitation block
    :param input: input tensor
    :param ratio: reduction ratio r for bottleneck given by the two FC layers
    :return: keras tensor
    '''

    if backend.image_data_format() == 'channels_first':
        channels = 1
    else:
        channels = -1

    # number of input filters/channels
    inputSE_shape = backend.int_shape(inputSE)
    numChannels = inputSE_shape[channels]

    #squeeze operation
    output = GlobalAveragePooling3D(data_format=backend.image_data_format())(inputSE)

    #excitation operation
    output = Dense(numChannels//ratio, activation='relu', use_bias=True, kernel_initializer='he_normal')(output)
    output = Dense(numChannels, activation='sigmoid', use_bias=True, kernel_initializer='he_normal')(output)

    #scale operation
    output = multiply([inputSE, output])

    return output 
Example #2
Source Project: CNNArt   Author: thomaskuestner   File: squeeze_excitation_block.py    License: Apache License 2.0 5 votes vote down vote up
def squeeze_excitation_block_3D(inputSE, ratio=16):
    '''
    Creates a squeeze and excitation block
    :param input: input tensor
    :param ratio: reduction ratio r for bottleneck given by the two FC layers
    :return: keras tensor
    '''

    if backend.image_data_format() == 'channels_first':
        channels = 1
    else:
        channels = -1

    # number of input filters/channels
    inputSE_shape = backend.int_shape(inputSE)
    numChannels = inputSE_shape[channels]

    #squeeze operation
    output = GlobalAveragePooling3D(data_format=backend.image_data_format())(inputSE)

    #excitation operation
    output = Dense(numChannels//ratio, activation='relu', use_bias=True, kernel_initializer='he_normal')(output)
    output = Dense(numChannels, activation='sigmoid', use_bias=True, kernel_initializer='he_normal')(output)

    #scale operation
    output = multiply([inputSE, output])

    return output 
Example #3
Source Project: CNNArt   Author: thomaskuestner   File: MSnetworks.py    License: Apache License 2.0 5 votes vote down vote up
def fCreateModel_FCN_simple(patchSize,dr_rate=0.0, iPReLU=0, l1_reg=0.0, l2_reg=1e-6):
    # Total params: 1,223,831
    # Replace the dense layer with a convolutional layer with filters=2 for the two classes
    Strides = fgetStrides()
    kernelnumber = fgetKernelNumber()
    inp = Input(shape=(1, int(patchSize[0]), int(patchSize[1]), int(patchSize[2])))

    after_Conv_1 = fCreateVNet_Block(inp, kernelnumber[0], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_1 = fCreateVNet_DownConv_Block(after_Conv_1, after_Conv_1._keras_shape[1], Strides[0],
                                                     iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    after_Conv_2 = fCreateVNet_Block(after_DownConv_1, kernelnumber[1], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_2 = fCreateVNet_DownConv_Block(after_Conv_2, after_Conv_2._keras_shape[1], Strides[1],
                                                   iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    after_Conv_3 = fCreateVNet_Block(after_DownConv_2, kernelnumber[2], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_3 = fCreateVNet_DownConv_Block(after_Conv_3, after_Conv_3._keras_shape[1], Strides[2],
                                                   iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    dropout_out = Dropout(dr_rate)(after_DownConv_3)
    fclayer = Conv3D(2,
                       kernel_size=(1,1,1),
                       kernel_initializer='he_normal',
                       weights=None,
                       padding='valid',
                       strides=(1, 1, 1),
                       kernel_regularizer=l1_l2(l1_reg, l2_reg),
                       )(dropout_out)
    fclayer = GlobalAveragePooling3D()(fclayer)
    outp = Activation('softmax')(fclayer)
    cnn_spp = Model(inputs=inp, outputs=outp)
    return cnn_spp 
Example #4
Source Project: Kaggle-DSB   Author: Wrosinski   File: preds3d_models.py    License: MIT License 5 votes vote down vote up
def preds3d_baseline(width):
    
    learning_rate = 5e-5
    #optimizer = SGD(lr=learning_rate, momentum = 0.9, decay = 1e-3, nesterov = True)
    optimizer = Adam(lr=learning_rate)
    
    inputs = Input(shape=(1, 136, 168, 168))
    conv1 = Convolution3D(width, 3, 3, 3, activation = 'relu', border_mode='same')(inputs)
    conv1 = BatchNormalization(axis = 1)(conv1)
    conv1 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(conv1)
    conv1 = BatchNormalization(axis = 1)(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv1)
    
    conv2 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(pool1)
    conv2 = BatchNormalization(axis = 1)(conv2)
    conv2 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(conv2)
    conv2 = BatchNormalization(axis = 1)(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv2)

    conv3 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(pool2)
    conv3 = BatchNormalization(axis = 1)(conv3)
    conv3 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(conv3)
    conv3 = BatchNormalization(axis = 1)(conv3)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv3)
    
    output = GlobalAveragePooling3D()(pool3)
    output = Dense(2, activation='softmax', name = 'predictions')(output)
    model3d = Model(inputs, output)
    model3d.compile(loss='categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
    return model3d 
Example #5
Source Project: Kaggle-DSB   Author: Wrosinski   File: preds3d_models.py    License: MIT License 5 votes vote down vote up
def preds3d_globalavg(width):
    
    learning_rate = 5e-5
    #optimizer = SGD(lr=learning_rate, momentum = 0.9, decay = 1e-3, nesterov = True)
    optimizer = Adam(lr=learning_rate)
    
    inputs = Input(shape=(1, 136, 168, 168))
    conv1 = Convolution3D(width, 3, 3, 3, activation = 'relu', border_mode='same')(inputs)
    conv1 = BatchNormalization(axis = 1)(conv1)
    conv1 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(conv1)
    conv1 = BatchNormalization(axis = 1)(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv1)
    
    conv2 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(pool1)
    conv2 = BatchNormalization(axis = 1)(conv2)
    conv2 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(conv2)
    conv2 = BatchNormalization(axis = 1)(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv2)

    conv3 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(pool2)
    conv3 = BatchNormalization(axis = 1)(conv3)
    conv3 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(conv3)
    conv3 = BatchNormalization(axis = 1)(conv3)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv3)
    
    conv4 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(pool3)
    conv4 = BatchNormalization(axis = 1)(conv4)
    conv4 = Convolution3D(width*16, 3, 3, 3, activation = 'relu', border_mode='same')(conv4)
    conv4 = BatchNormalization(axis = 1)(conv4)
    pool4 = MaxPooling3D(pool_size=(8, 8, 8), border_mode='same')(conv4)
    
    output = GlobalAveragePooling3D()(conv4)
    output = Dense(2, activation='softmax', name = 'predictions')(output)
    model3d = Model(inputs, output)
    model3d.compile(loss='categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
    return model3d 
Example #6
Source Project: Kaggle-DSB   Author: Wrosinski   File: preds3d_run.py    License: MIT License 5 votes vote down vote up
def preds3d_baseline(width):
    
    learning_rate = 5e-5
    optimizer = SGD(lr=learning_rate, momentum = 0.9, decay = 1e-3, nesterov = True)
    #optimizer = Adam(lr=learning_rate)
    
    inputs = Input(shape=(1, 136, 168, 168))
    conv1 = Convolution3D(width, 3, 3, 3, activation = 'relu', border_mode='same')(inputs)
    conv1 = BatchNormalization(axis = 1)(conv1)
    conv1 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(conv1)
    conv1 = BatchNormalization(axis = 1)(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv1)
    
    conv2 = Convolution3D(width*2, 3, 3, 3, activation = 'relu', border_mode='same')(pool1)
    conv2 = BatchNormalization(axis = 1)(conv2)
    conv2 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(conv2)
    conv2 = BatchNormalization(axis = 1)(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv2)

    conv3 = Convolution3D(width*4, 3, 3, 3, activation = 'relu', border_mode='same')(pool2)
    conv3 = BatchNormalization(axis = 1)(conv3)
    conv3 = Convolution3D(width*8, 3, 3, 3, activation = 'relu', border_mode='same')(conv3)
    conv3 = BatchNormalization(axis = 1)(conv3)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2), border_mode='same')(conv3)
    
    output = GlobalAveragePooling3D()(pool3)
    output = Dense(2, activation='softmax', name = 'predictions')(output)
    model3d = Model(inputs, output)
    model3d.compile(loss='categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
    return model3d


# 1398 stage1 original examples 
Example #7
Source Project: CNNArt   Author: thomaskuestner   File: multiclass_3D_SE-DenseNet-BC.py    License: Apache License 2.0 4 votes vote down vote up
def createModel(patchSize, numClasses):
    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    growthRate_k = 12
    compressionFactor = 0.5

    input_tensor = Input(shape=(patchSize[0], patchSize[1], patchSize[2], 1))

    # first conv layer
    x = Conv3D(16, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer='he_normal')(input_tensor)

    # 1. Dense Block
    x, numFilters = dense_block_3D(x, numInputFilters=16, numLayers=7, growthRate_k=growthRate_k,
                                   bottleneck_enabled=True)

    # Transition Layer
    x, numFilters = transition_SE_layer_3D(x, numFilters, compressionFactor=compressionFactor, se_ratio=8)

    # 2. Dense Block
    x, numFilters = dense_block_3D(x, numInputFilters=numFilters, numLayers=7, growthRate_k=growthRate_k,
                                   bottleneck_enabled=True)

    # Transition Layer
    x, numFilters = transition_SE_layer_3D(x, numFilters, compressionFactor=compressionFactor, se_ratio=8)

    # 3. Dense Block
    x, numFilters = dense_block_3D(x, numInputFilters=numFilters, numLayers=7, growthRate_k=growthRate_k,
                                   bottleneck_enabled=True)

    # SE Block
    x = squeeze_excitation_block_3D(x, ratio=16)

    x = BatchNormalization(axis=bn_axis)(x)
    x = Activation('relu')(x)

    # global average pooling
    x = GlobalAveragePooling3D(data_format='channels_last')(x)

    # fully-connected layer
    output = Dense(units=numClasses,
                   activation='softmax',
                   kernel_initializer='he_normal',
                   name='fully-connected')(x)

    # create model
    cnn = Model(input_tensor, output, name='3D-DenseNet-34')
    sModelName = '3D-DenseNet-34'

    return cnn, sModelName 
Example #8
Source Project: CNNArt   Author: thomaskuestner   File: multiclass_3D_SE-DenseNet.py    License: Apache License 2.0 4 votes vote down vote up
def createModel(patchSize, numClasses):
    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    growthRate_k = 12
    compressionFactor = 1.0

    input_tensor = Input(shape=(patchSize[0], patchSize[1], patchSize[2], 1))

    # first conv layer
    x = Conv3D(16, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer='he_normal')(input_tensor)

    # 1. Dense Block
    x, numFilters = dense_block_3D(x, numInputFilters=16, numLayers=10, growthRate_k=growthRate_k,
                                   bottleneck_enabled=True)

    # Transition Layer
    x, numFilters = transition_SE_layer_3D(x, numFilters, compressionFactor=compressionFactor, se_ratio=16)

    # 2. Dense Block
    x, numFilters = dense_block_3D(x, numInputFilters=numFilters, numLayers=10, growthRate_k=growthRate_k,
                                   bottleneck_enabled=True)

    # Transition Layer
    x, numFilters = transition_SE_layer_3D(x, numFilters, compressionFactor=compressionFactor, se_ratio=16)

    # 3. Dense Block
    x, numFilters = dense_block_3D(x, numInputFilters=numFilters, numLayers=10, growthRate_k=growthRate_k,
                                   bottleneck_enabled=True)

    # SE Block
    x = squeeze_excitation_block_3D(x, ratio=16)

    x = BatchNormalization(axis=bn_axis)(x)
    x = Activation('relu')(x)

    # global average pooling
    x = GlobalAveragePooling3D(data_format='channels_last')(x)

    # fully-connected layer
    output = Dense(units=numClasses,
                   activation='softmax',
                   kernel_initializer='he_normal',
                   name='fully-connected')(x)

    # create model
    cnn = Model(input_tensor, output, name='3D-DenseNet-34')
    sModelName = '3D-DenseNet-34'

    return cnn, sModelName 
Example #9
Source Project: CNNArt   Author: thomaskuestner   File: multiclass_3D_SE-DenseNet_BC.py    License: Apache License 2.0 4 votes vote down vote up
def createModel(patchSize, numClasses):

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    growthRate_k = 12
    compressionFactor = 0.5

    input_tensor = Input(shape=(patchSize[0], patchSize[1], patchSize[2], 1))

    # first conv layer
    x = Conv3D(16, (3,3,3), strides=(1,1,1), padding='same', kernel_initializer='he_normal')(input_tensor)

    # 1. Dense Block
    x, numFilters = dense_block_3D(x, numInputFilters=16, numLayers=7, growthRate_k=growthRate_k, bottleneck_enabled=True)

    # Transition Layer
    x, numFilters = transition_SE_layer_3D(x, numFilters, compressionFactor=compressionFactor, se_ratio=8)

    # 2. Dense Block
    x, numFilters = dense_block_3D(x, numInputFilters=numFilters, numLayers=7, growthRate_k=growthRate_k, bottleneck_enabled=True)

    #Transition Layer
    x, numFilters = transition_SE_layer_3D(x, numFilters, compressionFactor=compressionFactor, se_ratio=8)

    #3. Dense Block
    x, numFilters = dense_block_3D(x, numInputFilters=numFilters, numLayers=7, growthRate_k=growthRate_k, bottleneck_enabled=True)

    # SE Block
    x = squeeze_excitation_block_3D(x, ratio=16)

    x = BatchNormalization(axis=bn_axis)(x)
    x = Activation('relu')(x)

    # global average pooling
    x = GlobalAveragePooling3D(data_format='channels_last')(x)

    # fully-connected layer
    output = Dense(units=numClasses,
                   activation='softmax',
                   kernel_initializer='he_normal',
                   name='fully-connected')(x)

    # create model
    cnn = Model(input_tensor, output, name='3D-DenseNet-34')
    sModelName = '3D-DenseNet-34'

    return cnn, sModelName 
Example #10
Source Project: CNNArt   Author: thomaskuestner   File: MSnetworks.py    License: Apache License 2.0 4 votes vote down vote up
def fCreateModel_FCN_MultiFM(patchSize, dr_rate=0.0, iPReLU=0,l1_reg=0, l2_reg=1e-6):
    # Total params: 1,420,549
    # The dense layer is repleced by a convolutional layer with filters=2 for the two classes
    # The FM from the third down scaled convolutional layer is upsempled by deconvolution and
    # added with the FM from the second down scaled convolutional layer.
    # The combined FM goes through a convolutional layer with filters=2 for the two classes
    # The two predictions are averages as the final result.
    Strides = fgetStrides()
    kernelnumber = fgetKernelNumber()
    inp = Input(shape=(1, int(patchSize[0]), int(patchSize[1]), int(patchSize[2])))

    after_Conv_1 = fCreateVNet_Block(inp, kernelnumber[0], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_1 = fCreateVNet_DownConv_Block(after_Conv_1, after_Conv_1._keras_shape[1], Strides[0],
                                                     iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    after_Conv_2 = fCreateVNet_Block(after_DownConv_1, kernelnumber[1], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_2 = fCreateVNet_DownConv_Block(after_Conv_2, after_Conv_2._keras_shape[1], Strides[1],
                                                   iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    after_Conv_3 = fCreateVNet_Block(after_DownConv_2, kernelnumber[2], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_3 = fCreateVNet_DownConv_Block(after_Conv_3, after_Conv_3._keras_shape[1], Strides[2],
                                                   iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    # fully convolution over the FM from the deepest level
    dropout_out1 = Dropout(dr_rate)(after_DownConv_3)
    fclayer1 = Conv3D(2,
                       kernel_size=(1,1,1),
                       kernel_initializer='he_normal',
                       weights=None,
                       padding='valid',
                       strides=(1, 1, 1),
                       kernel_regularizer=l1_l2(l1_reg, l2_reg),
                       )(dropout_out1)
    fclayer1 = GlobalAveragePooling3D()(fclayer1)
    
    # Upsample FM from the deepest level, add with FM from level 2, 
    UpedFM_Level3 = Conv3DTranspose(filters=97, kernel_size=(3,3,1), strides=(2,2,1), padding='same')(after_DownConv_3)
    conbined_FM_Level23 = add([UpedFM_Level3, after_DownConv_2])    
    fclayer2 = Conv3D(2,
                       kernel_size=(1,1,1),
                       kernel_initializer='he_normal',
                       weights=None,
                       padding='valid',
                       strides=(1, 1, 1),
                       kernel_regularizer=l1_l2(l1_reg, l2_reg),
                       )(conbined_FM_Level23)
    fclayer2 = GlobalAveragePooling3D()(fclayer2)

    # combine the two predictions using average
    fcl_aver = average([fclayer1, fclayer2])
    predict = Activation('softmax')(fcl_aver)
    cnn_fcl_msfm = Model(inputs=inp, outputs=predict)
    return cnn_fcl_msfm 
Example #11
Source Project: 3D-ConvNets-for-Action-Recognition   Author: TianzhongSong   File: densenet_3d.py    License: MIT License 4 votes vote down vote up
def densenet_3d(nb_classes, input_shape, weight_decay=0.005, dropout_rate=0.2):

    model_input = Input(shape=input_shape)

    # 112x112x8
    # stage 1 Initial convolution
    x = conv_factory(model_input, 64)
    x = MaxPool3D((2, 2, 1), strides=(2, 2, 1), padding='same')(x)
    # 56x56x8

    # stage 2
    x = dense_block(x, 32, internal_layers=4,
                             dropout_rate=dropout_rate)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)
    x = conv_factory(x, 128, (1, 1, 1), dropout_rate=dropout_rate)
    # 28x28x4

    # stage 3
    x= dense_block(x, 32, internal_layers=4,
                   dropout_rate=dropout_rate)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)
    x = conv_factory(x, 128, (1, 1, 1), dropout_rate=dropout_rate)

    # 14x14x2

    # stage 4
    x = dense_block(x, 64, internal_layers=4,
                   dropout_rate=dropout_rate)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)
    x = conv_factory(x, 256, (1, 1, 1), dropout_rate=dropout_rate)

    # 7x7x1

    # stage 5
    x = dense_block(x, 64, internal_layers=4,
                   dropout_rate=dropout_rate)

    x = conv_factory(x, 256, (1, 1, 1), dropout_rate=dropout_rate)

    x = GlobalAveragePooling3D()(x)
    x = Dense(nb_classes,
              activation='softmax',
              kernel_regularizer=l2(weight_decay),
              bias_regularizer=l2(weight_decay))(x)

    model = Model(inputs=model_input, outputs=x, name="densenet_3d")

    return model