Python keras.layers.convolutional.AveragePooling2D() Examples

The following are 30 code examples of keras.layers.convolutional.AveragePooling2D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers.convolutional , or try the search function .
Example #1
Source File: wresnet.py    From Theano-MPI with Educational Community License v2.0 7 votes vote down vote up
def build_model(self):
    

        img_input = Input(shape=(img_channels, img_rows, img_cols))

        # one conv at the beginning (spatial size: 32x32)
        x = ZeroPadding2D((1, 1))(img_input)
        x = Convolution2D(16, nb_row=3, nb_col=3)(x)

        # Stage 1 (spatial size: 32x32)
        x = bottleneck(x, n, 16, 16 * k, dropout=0.3, subsample=(1, 1))
        # Stage 2 (spatial size: 16x16)
        x = bottleneck(x, n, 16 * k, 32 * k, dropout=0.3, subsample=(2, 2))
        # Stage 3 (spatial size: 8x8)
        x = bottleneck(x, n, 32 * k, 64 * k, dropout=0.3, subsample=(2, 2))

        x = BatchNormalization(mode=0, axis=1)(x)
        x = Activation('relu')(x)
        x = AveragePooling2D((8, 8), strides=(1, 1))(x)
        x = Flatten()(x)
        preds = Dense(nb_classes, activation='softmax')(x)

        self.model = Model(input=img_input, output=preds)
        
        self.keras_get_params() 
Example #2
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_averagepooling_2d():
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'same',
                       'pool_size': (2, 2)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'valid',
                       'pool_size': (3, 3)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (1, 1),
                       'padding': 'valid',
                       'pool_size': (2, 2),
                       'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6)) 
Example #3
Source File: inception_v4.py    From Triplet-deep-hash-pytorch with Apache License 2.0 6 votes vote down vote up
def block_inception_c(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 256, 1, 1)

    branch_1 = conv2d_bn(input, 384, 1, 1)
    branch_10 = conv2d_bn(branch_1, 256, 1, 3)
    branch_11 = conv2d_bn(branch_1, 256, 3, 1)
    branch_1 = concatenate([branch_10, branch_11], axis=channel_axis)

    branch_2 = conv2d_bn(input, 384, 1, 1)
    branch_2 = conv2d_bn(branch_2, 448, 3, 1)
    branch_2 = conv2d_bn(branch_2, 512, 1, 3)
    branch_20 = conv2d_bn(branch_2, 256, 1, 3)
    branch_21 = conv2d_bn(branch_2, 256, 3, 1)
    branch_2 = concatenate([branch_20, branch_21], axis=channel_axis)

    branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
    branch_3 = conv2d_bn(branch_3, 256, 1, 1)

    x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
    return x 
Example #4
Source File: inception_v4.py    From Triplet-deep-hash-pytorch with Apache License 2.0 6 votes vote down vote up
def block_inception_b(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 384, 1, 1)

    branch_1 = conv2d_bn(input, 192, 1, 1)
    branch_1 = conv2d_bn(branch_1, 224, 1, 7)
    branch_1 = conv2d_bn(branch_1, 256, 7, 1)

    branch_2 = conv2d_bn(input, 192, 1, 1)
    branch_2 = conv2d_bn(branch_2, 192, 7, 1)
    branch_2 = conv2d_bn(branch_2, 224, 1, 7)
    branch_2 = conv2d_bn(branch_2, 224, 7, 1)
    branch_2 = conv2d_bn(branch_2, 256, 1, 7)

    branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
    branch_3 = conv2d_bn(branch_3, 128, 1, 1)

    x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
    return x 
Example #5
Source File: inception_v4.py    From Triplet-deep-hash-pytorch with Apache License 2.0 6 votes vote down vote up
def block_inception_a(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 96, 1, 1)

    branch_1 = conv2d_bn(input, 64, 1, 1)
    branch_1 = conv2d_bn(branch_1, 96, 3, 3)

    branch_2 = conv2d_bn(input, 64, 1, 1)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)

    branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
    branch_3 = conv2d_bn(branch_3, 96, 1, 1)

    x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
    return x 
Example #6
Source File: inception_v4.py    From Inception-v4 with MIT License 6 votes vote down vote up
def inception_C(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    c1 = conv_block(input, 256, 1, 1)

    c2 = conv_block(input, 384, 1, 1)
    c2_1 = conv_block(c2, 256, 1, 3)
    c2_2 = conv_block(c2, 256, 3, 1)
    c2 = merge([c2_1, c2_2], mode='concat', concat_axis=channel_axis)

    c3 = conv_block(input, 384, 1, 1)
    c3 = conv_block(c3, 448, 3, 1)
    c3 = conv_block(c3, 512, 1, 3)
    c3_1 = conv_block(c3, 256, 1, 3)
    c3_2 = conv_block(c3, 256, 3, 1)
    c3 = merge([c3_1, c3_2], mode='concat', concat_axis=channel_axis)

    c4 = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(input)
    c4 = conv_block(c4, 256, 1, 1)

    m = merge([c1, c2, c3, c4], mode='concat', concat_axis=channel_axis)
    return m 
Example #7
Source File: inception_v4.py    From Inception-v4 with MIT License 6 votes vote down vote up
def inception_B(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    b1 = conv_block(input, 384, 1, 1)

    b2 = conv_block(input, 192, 1, 1)
    b2 = conv_block(b2, 224, 1, 7)
    b2 = conv_block(b2, 256, 7, 1)

    b3 = conv_block(input, 192, 1, 1)
    b3 = conv_block(b3, 192, 7, 1)
    b3 = conv_block(b3, 224, 1, 7)
    b3 = conv_block(b3, 224, 7, 1)
    b3 = conv_block(b3, 256, 1, 7)

    b4 = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(input)
    b4 = conv_block(b4, 128, 1, 1)

    m = merge([b1, b2, b3, b4], mode='concat', concat_axis=channel_axis)
    return m 
Example #8
Source File: inception_v4.py    From Inception-v4 with MIT License 6 votes vote down vote up
def inception_A(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    a1 = conv_block(input, 96, 1, 1)

    a2 = conv_block(input, 64, 1, 1)
    a2 = conv_block(a2, 96, 3, 3)

    a3 = conv_block(input, 64, 1, 1)
    a3 = conv_block(a3, 96, 3, 3)
    a3 = conv_block(a3, 96, 3, 3)

    a4 = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(input)
    a4 = conv_block(a4, 96, 1, 1)

    m = merge([a1, a2, a3, a4], mode='concat', concat_axis=channel_axis)
    return m 
Example #9
Source File: inception_v4.py    From keras-inceptionV4 with Apache License 2.0 6 votes vote down vote up
def block_inception_b(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 384, 1, 1)

    branch_1 = conv2d_bn(input, 192, 1, 1)
    branch_1 = conv2d_bn(branch_1, 224, 1, 7)
    branch_1 = conv2d_bn(branch_1, 256, 7, 1)

    branch_2 = conv2d_bn(input, 192, 1, 1)
    branch_2 = conv2d_bn(branch_2, 192, 7, 1)
    branch_2 = conv2d_bn(branch_2, 224, 1, 7)
    branch_2 = conv2d_bn(branch_2, 224, 7, 1)
    branch_2 = conv2d_bn(branch_2, 256, 1, 7)

    branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input)
    branch_3 = conv2d_bn(branch_3, 128, 1, 1)

    x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
    return x 
Example #10
Source File: inception_v4.py    From keras-inceptionV4 with Apache License 2.0 6 votes vote down vote up
def block_inception_a(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 96, 1, 1)

    branch_1 = conv2d_bn(input, 64, 1, 1)
    branch_1 = conv2d_bn(branch_1, 96, 3, 3)

    branch_2 = conv2d_bn(input, 64, 1, 1)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)

    branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input)
    branch_3 = conv2d_bn(branch_3, 96, 1, 1)

    x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
    return x 
Example #11
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_averagepooling_2d():
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'same',
                       'pool_size': (2, 2)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'valid',
                       'pool_size': (3, 3)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (1, 1),
                       'padding': 'valid',
                       'pool_size': (2, 2),
                       'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6)) 
Example #12
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_averagepooling_2d():
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'same',
                       'pool_size': (2, 2)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'valid',
                       'pool_size': (3, 3)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (1, 1),
                       'padding': 'valid',
                       'pool_size': (2, 2),
                       'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6)) 
Example #13
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_averagepooling_2d():
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'same',
                       'pool_size': (2, 2)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'valid',
                       'pool_size': (3, 3)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (1, 1),
                       'padding': 'valid',
                       'pool_size': (2, 2),
                       'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6)) 
Example #14
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_averagepooling_2d():
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'same',
                       'pool_size': (2, 2)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'valid',
                       'pool_size': (3, 3)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (1, 1),
                       'padding': 'valid',
                       'pool_size': (2, 2),
                       'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6)) 
Example #15
Source File: inception_v4.py    From cnn_evaluation_smoke with GNU General Public License v3.0 6 votes vote down vote up
def block_inception_a(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 96, 1, 1)

    branch_1 = conv2d_bn(input, 64, 1, 1)
    branch_1 = conv2d_bn(branch_1, 96, 3, 3)

    branch_2 = conv2d_bn(input, 64, 1, 1)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)

    branch_3 = AveragePooling2D((3,3), strides=(1,1), border_mode='same')(input)
    branch_3 = conv2d_bn(branch_3, 96, 1, 1)

    x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis)
    return x 
Example #16
Source File: inception_v4.py    From cnn_evaluation_smoke with GNU General Public License v3.0 6 votes vote down vote up
def block_inception_b(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 384, 1, 1)

    branch_1 = conv2d_bn(input, 192, 1, 1)
    branch_1 = conv2d_bn(branch_1, 224, 1, 7)
    branch_1 = conv2d_bn(branch_1, 256, 7, 1)

    branch_2 = conv2d_bn(input, 192, 1, 1)
    branch_2 = conv2d_bn(branch_2, 192, 7, 1)
    branch_2 = conv2d_bn(branch_2, 224, 1, 7)
    branch_2 = conv2d_bn(branch_2, 224, 7, 1)
    branch_2 = conv2d_bn(branch_2, 256, 1, 7)

    branch_3 = AveragePooling2D((3,3), strides=(1,1), border_mode='same')(input)
    branch_3 = conv2d_bn(branch_3, 128, 1, 1)

    x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis)
    return x 
Example #17
Source File: inception_v4.py    From FashionAI_Tianchi_2018 with MIT License 6 votes vote down vote up
def block_inception_a(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 96, 1, 1)

    branch_1 = conv2d_bn(input, 64, 1, 1)
    branch_1 = conv2d_bn(branch_1, 96, 3, 3)

    branch_2 = conv2d_bn(input, 64, 1, 1)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)

    branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input)
    branch_3 = conv2d_bn(branch_3, 96, 1, 1)

    x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
    return x 
Example #18
Source File: inception_v4.py    From FashionAI_Tianchi_2018 with MIT License 6 votes vote down vote up
def block_inception_b(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 384, 1, 1)

    branch_1 = conv2d_bn(input, 192, 1, 1)
    branch_1 = conv2d_bn(branch_1, 224, 1, 7)
    branch_1 = conv2d_bn(branch_1, 256, 7, 1)

    branch_2 = conv2d_bn(input, 192, 1, 1)
    branch_2 = conv2d_bn(branch_2, 192, 7, 1)
    branch_2 = conv2d_bn(branch_2, 224, 1, 7)
    branch_2 = conv2d_bn(branch_2, 224, 7, 1)
    branch_2 = conv2d_bn(branch_2, 256, 1, 7)

    branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input)
    branch_3 = conv2d_bn(branch_3, 128, 1, 1)

    x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
    return x 
Example #19
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_averagepooling_2d():
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'same',
                       'pool_size': (2, 2)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'valid',
                       'pool_size': (3, 3)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (1, 1),
                       'padding': 'valid',
                       'pool_size': (2, 2),
                       'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6)) 
Example #20
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_averagepooling_2d():
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'same',
                       'pool_size': (2, 2)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (2, 2),
                       'padding': 'valid',
                       'pool_size': (3, 3)},
               input_shape=(3, 5, 6, 4))
    layer_test(convolutional.AveragePooling2D,
               kwargs={'strides': (1, 1),
                       'padding': 'valid',
                       'pool_size': (2, 2),
                       'data_format': 'channels_first'},
               input_shape=(3, 4, 5, 6)) 
Example #21
Source File: inception_v4.py    From FashionAI_Tianchi_2018 with MIT License 5 votes vote down vote up
def block_inception_c(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 256, 1, 1)

    branch_1 = conv2d_bn(input, 384, 1, 1)
    branch_10 = conv2d_bn(branch_1, 256, 1, 3)
    branch_11 = conv2d_bn(branch_1, 256, 3, 1)
    branch_1 = concatenate([branch_10, branch_11], axis=channel_axis)


    branch_2 = conv2d_bn(input, 384, 1, 1)
    branch_2 = conv2d_bn(branch_2, 448, 3, 1)
    branch_2 = conv2d_bn(branch_2, 512, 1, 3)
    branch_20 = conv2d_bn(branch_2, 256, 1, 3)
    branch_21 = conv2d_bn(branch_2, 256, 3, 1)
    branch_2 = concatenate([branch_20, branch_21], axis=channel_axis)

    branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
    branch_3 = conv2d_bn(branch_3, 256, 1, 1)

    x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
    return x 
Example #22
Source File: inception_v4.py    From keras-inceptionV4 with Apache License 2.0 5 votes vote down vote up
def block_inception_c(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 256, 1, 1)

    branch_1 = conv2d_bn(input, 384, 1, 1)
    branch_10 = conv2d_bn(branch_1, 256, 1, 3)
    branch_11 = conv2d_bn(branch_1, 256, 3, 1)
    branch_1 = concatenate([branch_10, branch_11], axis=channel_axis)


    branch_2 = conv2d_bn(input, 384, 1, 1)
    branch_2 = conv2d_bn(branch_2, 448, 3, 1)
    branch_2 = conv2d_bn(branch_2, 512, 1, 3)
    branch_20 = conv2d_bn(branch_2, 256, 1, 3)
    branch_21 = conv2d_bn(branch_2, 256, 3, 1)
    branch_2 = concatenate([branch_20, branch_21], axis=channel_axis)

    branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
    branch_3 = conv2d_bn(branch_3, 256, 1, 1)

    x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
    return x 
Example #23
Source File: minigooglenet.py    From aiexamples with Apache License 2.0 5 votes vote down vote up
def build(width, height, depth, classes):
    input_shape = (width, height, depth)
    channel_dim = -1

    if K.image_data_format() == "channels_first":
      input_shape = (depth, width, height)
      channel_dim = 1

    inputs = Input(shape=input_shape)
    x = MiniGoogleNet.conv_module(inputs, 96, 3, 3, (1, 1), channel_dim=channel_dim)
    x = MiniGoogleNet.inception_module(x, 32, 32, channel_dim=channel_dim)
    x = MiniGoogleNet.inception_module(x, 32, 48, channel_dim=channel_dim)
    x = MiniGoogleNet.downsample_module(x, 80, channel_dim=channel_dim)

    x = MiniGoogleNet.inception_module(x, 112, 48, channel_dim=channel_dim)
    x = MiniGoogleNet.inception_module(x, 96, 64, channel_dim=channel_dim)
    x = MiniGoogleNet.inception_module(x, 80, 80, channel_dim=channel_dim)
    x = MiniGoogleNet.inception_module(x, 48, 96, channel_dim=channel_dim)
    x = MiniGoogleNet.downsample_module(x, 96, channel_dim=channel_dim)

    x = MiniGoogleNet.inception_module(x, 176, 160, channel_dim=channel_dim)
    x = MiniGoogleNet.inception_module(x, 176, 160, channel_dim=channel_dim)
    x = AveragePooling2D((7, 7))(x)
    x = Dropout(0.5)(x)

    # softmax classifier
    x = Flatten()(x)
    x = Dense(classes)(x)
    x = Activation("softmax")(x)

    model = Model(inputs, x, name="googlenet")

    return model 
Example #24
Source File: inception_v4.py    From cnn_evaluation_smoke with GNU General Public License v3.0 5 votes vote down vote up
def block_inception_c(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 256, 1, 1)

    branch_1 = conv2d_bn(input, 384, 1, 1)
    branch_10 = conv2d_bn(branch_1, 256, 1, 3)
    branch_11 = conv2d_bn(branch_1, 256, 3, 1)
    branch_1 = merge([branch_10, branch_11], mode='concat', concat_axis=channel_axis)


    branch_2 = conv2d_bn(input, 384, 1, 1)
    branch_2 = conv2d_bn(branch_2, 448, 3, 1)
    branch_2 = conv2d_bn(branch_2, 512, 1, 3)
    branch_20 = conv2d_bn(branch_2, 256, 1, 3)
    branch_21 = conv2d_bn(branch_2, 256, 3, 1)
    branch_2 = merge([branch_20, branch_21], mode='concat', concat_axis=channel_axis)

    branch_3 = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(input)
    branch_3 = conv2d_bn(branch_3, 256, 1, 1)

    x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis)
    return x 
Example #25
Source File: train.py    From stochastic_depth_keras with MIT License 5 votes vote down vote up
def residual_drop(x, input_shape, output_shape, strides=(1, 1)):
    global add_tables

    nb_filter = output_shape[0]
    conv = Convolution2D(nb_filter, 3, 3, subsample=strides,
                         border_mode="same", W_regularizer=l2(weight_decay))(x)
    conv = BatchNormalization(axis=1)(conv)
    conv = Activation("relu")(conv)
    conv = Convolution2D(nb_filter, 3, 3,
                         border_mode="same", W_regularizer=l2(weight_decay))(conv)
    conv = BatchNormalization(axis=1)(conv)

    if strides[0] >= 2:
        x = AveragePooling2D(strides)(x)

    if (output_shape[0] - input_shape[0]) > 0:
        pad_shape = (1,
                     output_shape[0] - input_shape[0],
                     output_shape[1],
                     output_shape[2])
        padding = K.zeros(pad_shape)
        padding = K.repeat_elements(padding, K.shape(x)[0], axis=0)
        x = Lambda(lambda y: K.concatenate([y, padding], axis=1),
                   output_shape=output_shape)(x)

    _death_rate = K.variable(death_rate)
    scale = K.ones_like(conv) - _death_rate
    conv = Lambda(lambda c: K.in_test_phase(scale * c, c),
                  output_shape=output_shape)(conv)

    out = merge([conv, x], mode="sum")
    out = Activation("relu")(out)

    gate = K.variable(1, dtype="uint8")
    add_tables += [{"death_rate": _death_rate, "gate": gate}]
    return Lambda(lambda tensors: K.switch(gate, tensors[0], tensors[1]),
                  output_shape=output_shape)([out, x]) 
Example #26
Source File: inception_v4.py    From FashionAI_Tianchi_2018 with MIT License 4 votes vote down vote up
def inception_v4(num_classes, dropout_keep_prob, weights, include_top,width):
    '''
    Creates the inception v4 network

    Args:
    	num_classes: number of classes
    	dropout_keep_prob: float, the fraction to keep before final layer.
    
    Returns: 
    	logits: the logits outputs of the model.
    '''

    # Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
    if K.image_data_format() == 'channels_first':
        inputs = Input((3, width, width))
    else:
        inputs = Input((width, width, 3))

    # Make inception base
    x = inception_v4_base(inputs)


    # Final pooling and prediction
    if include_top:
        # 1 x 1 x 1536
        x = AveragePooling2D((8,8), padding='valid')(x)
        x = Dropout(dropout_keep_prob)(x)
        x = Flatten()(x)
        # 1536
        x = Dense(units=num_classes, activation='softmax')(x)

    model = Model(inputs, x, name='inception_v4')

    # load weights
    if weights == 'imagenet':
        if K.image_data_format() == 'channels_first':
            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
        if include_top:
            weights_path = get_file(
                'inception-v4_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                md5_hash='9fe79d77f793fe874470d84ca6ba4a3b')
        else:
            weights_path = get_file(
                'inception-v4_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                md5_hash='9296b46b5971573064d12e4669110969')
        model.load_weights(weights_path, by_name=True)
    return model 
Example #27
Source File: inception_v4.py    From keras-inceptionV4 with Apache License 2.0 4 votes vote down vote up
def inception_v4(num_classes, dropout_keep_prob, weights, include_top):
    '''
    Creates the inception v4 network

    Args:
    	num_classes: number of classes
    	dropout_keep_prob: float, the fraction to keep before final layer.
    
    Returns: 
    	logits: the logits outputs of the model.
    '''

    # Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
    if K.image_data_format() == 'channels_first':
        inputs = Input((3, 299, 299))
    else:
        inputs = Input((299, 299, 3))

    # Make inception base
    x = inception_v4_base(inputs)


    # Final pooling and prediction
    if include_top:
        # 1 x 1 x 1536
        x = AveragePooling2D((8,8), padding='valid')(x)
        x = Dropout(dropout_keep_prob)(x)
        x = Flatten()(x)
        # 1536
        x = Dense(units=num_classes, activation='softmax')(x)

    model = Model(inputs, x, name='inception_v4')

    # load weights
    if weights == 'imagenet':
        if K.image_data_format() == 'channels_first':
            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
        if include_top:
            weights_path = get_file(
                'inception-v4_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                md5_hash='9fe79d77f793fe874470d84ca6ba4a3b')
        else:
            weights_path = get_file(
                'inception-v4_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                md5_hash='9296b46b5971573064d12e4669110969')
        model.load_weights(weights_path, by_name=True)
    return model 
Example #28
Source File: inception_resnet_v2.py    From Inception-v4 with MIT License 4 votes vote down vote up
def create_inception_resnet_v2(nb_classes=1001, scale=True):
    '''
    Creates a inception resnet v2 network

    :param nb_classes: number of classes.txt
    :param scale: flag to add scaling of activations
    :return: Keras Model with 1 input (299x299x3) input shape and 2 outputs (final_output, auxiliary_output)
    '''

    if K.image_dim_ordering() == 'th':
        init = Input((3, 299, 299))
    else:
        init = Input((299, 299, 3))

    # Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
    x = inception_resnet_stem(init)

    # 10 x Inception Resnet A
    for i in range(10):
        x = inception_resnet_v2_A(x, scale_residual=scale)

    # Reduction A
    x = reduction_A(x, k=256, l=256, m=384, n=384)

    # 20 x Inception Resnet B
    for i in range(20):
        x = inception_resnet_v2_B(x, scale_residual=scale)

    # Auxiliary tower
    aux_out = AveragePooling2D((5, 5), strides=(3, 3))(x)
    aux_out = Convolution2D(128, 1, 1, border_mode='same', activation='relu')(aux_out)
    aux_out = Convolution2D(768, 5, 5, activation='relu')(aux_out)
    aux_out = Flatten()(aux_out)
    aux_out = Dense(nb_classes, activation='softmax')(aux_out)

    # Reduction Resnet B
    x = reduction_resnet_v2_B(x)

    # 10 x Inception Resnet C
    for i in range(10):
        x = inception_resnet_v2_C(x, scale_residual=scale)

    # Average Pooling
    x = AveragePooling2D((8,8))(x)

    # Dropout
    x = Dropout(0.8)(x)
    x = Flatten()(x)

    # Output
    out = Dense(output_dim=nb_classes, activation='softmax')(x)

    model = Model(init, output=[out, aux_out], name='Inception-Resnet-v2')
    return model 
Example #29
Source File: example.py    From residual_block_keras with GNU General Public License v3.0 4 votes vote down vote up
def design_for_residual_blocks(num_channel_input=1):
    ''''''
    model = Sequential() # it's a CONTAINER, not MODEL
    # set numbers
    num_big_blocks = 3
    image_patch_sizes = [[3,3]]*num_big_blocks
    pool_sizes = [(2,2)]*num_big_blocks
    n_features = [128, 256, 512, 512, 1024]
    n_features_next = [256, 512, 512, 512, 1024]
    height_input = 32
    width_input = 32
    for conv_idx in range(num_big_blocks):    
        n_feat_here = n_features[conv_idx]
        # residual block 0
        model.add(residual_blocks.building_residual_block(  (num_channel_input, height_input, width_input),
                                                            n_feat_here,
                                                            kernel_sizes=image_patch_sizes[conv_idx]
                                                            ))

        # residual block 1 (you can add it as you want (and your resources allow..))
        if False:
            model.add(residual_blocks.building_residual_block(  (n_feat_here, height_input, width_input),
                                                                n_feat_here,
                                                                kernel_sizes=image_patch_sizes[conv_idx]
                                                                ))
        
        # the last residual block N-1
        # the last one : pad zeros, subsamples, and increase #channels
        pad_height = compute_padding_length(height_input, pool_sizes[conv_idx][0], image_patch_sizes[conv_idx][0])
        pad_width = compute_padding_length(width_input, pool_sizes[conv_idx][1], image_patch_sizes[conv_idx][1])
        model.add(ZeroPadding2D(padding=(pad_height,pad_width))) 
        height_input += 2*pad_height
        width_input += 2*pad_width
        n_feat_next = n_features_next[conv_idx]
        model.add(residual_blocks.building_residual_block(  (n_feat_here, height_input, width_input),
                                                            n_feat_next,
                                                            kernel_sizes=image_patch_sizes[conv_idx],
                                                            is_subsample=True,
                                                            subsample=pool_sizes[conv_idx]
                                                            ))

        height_input, width_input = model.output_shape[2:]
        # width_input  = int(width_input/pool_sizes[conv_idx][1])
        num_channel_input = n_feat_next

    # Add average pooling at the end:
    print('Average pooling, from (%d,%d) to (1,1)' % (height_input, width_input))
    model.add(AveragePooling2D(pool_size=(height_input, width_input)))

    return model 
Example #30
Source File: wrn_renorm.py    From BatchRenormalization with MIT License 4 votes vote down vote up
def create_wide_residual_network(input_dim, nb_classes=100, N=2, k=1, dropout=0.0, verbose=1):
    """
    Creates a Wide Residual Network with specified parameters

    :param input: Input Keras object
    :param nb_classes: Number of output classes
    :param N: Depth of the network. Compute N = (n - 4) / 6.
              Example : For a depth of 16, n = 16, N = (16 - 4) / 6 = 2
              Example2: For a depth of 28, n = 28, N = (28 - 4) / 6 = 4
              Example3: For a depth of 40, n = 40, N = (40 - 4) / 6 = 6
    :param k: Width of the network.
    :param dropout: Adds dropout if value is greater than 0.0
    :param verbose: Debug info to describe created WRN
    :return:
    """
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1

    ip = Input(shape=input_dim)

    x = initial_conv(ip)
    nb_conv = 4

    x = expand_conv(x, 16, k)

    for i in range(N - 1):
        x = conv1_block(x, k, dropout)
        nb_conv += 2

    x = BatchRenormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_init='uniform')(x)
    x = Activation('relu')(x)

    x = expand_conv(x, 32, k, strides=(2, 2))

    for i in range(N - 1):
        x = conv2_block(x, k, dropout)
        nb_conv += 2

    x = BatchRenormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_init='uniform')(x)
    x = Activation('relu')(x)

    x = expand_conv(x, 64, k, strides=(2, 2))

    for i in range(N - 1):
        x = conv3_block(x, k, dropout)
        nb_conv += 2

    x = AveragePooling2D((8, 8))(x)
    x = Flatten()(x)

    x = Dense(nb_classes, activation='softmax')(x)

    model = Model(ip, x)

    if verbose: print("Wide Residual Network-%d-%d created." % (nb_conv, k))
    return model