Python keras.layers.merge.concatenate() Examples

The following are code examples for showing how to use keras.layers.merge.concatenate(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Inception-v4   Author: ShobhitLamba   File: inception_resnet_v2.py    MIT License 7 votes vote down vote up
def inception_resnet_v2_A(input, scale_residual = True):
    '''Architecture of Inception_ResNet_A block which is a 35 * 35 grid module.'''
        
    ar1 = Conv2D(32, (1, 1), activation = "relu", padding = "same")(input)
    
    ar2 = Conv2D(32, (1, 1), activation = "relu", padding = "same")(input)
    ar2 = Conv2D(32, (3, 3), activation = "relu", padding = "same")(ar2)
    
    ar3 = Conv2D(32, (1, 1), activation = "relu", padding = "same")(input)
    ar3 = Conv2D(48, (3, 3), activation = "relu", padding = "same")(ar3)
    ar3 = Conv2D(64, (3, 3), activation = "relu", padding = "same")(ar3)
    
    merged = concatenate([ar1, ar2, ar3], axis = -1)
    
    ar = Conv2D(384, (1, 1), activation = "linear", padding = "same")(merged)
    if scale_residual: ar = Lambda(lambda a: a * 0.1)(ar)
    
    output = add([input, ar])
    output = BatchNormalization(axis = -1)(output)
    output = Activation("relu")(output)
    
    return output 
Example 2
Project: Inception-v4   Author: ShobhitLamba   File: inception_resnet_v2.py    MIT License 6 votes vote down vote up
def inception_resnet_v2_B(input, scale_residual = True):
    '''Architecture of Inception_ResNet_B block which is a 17 * 17 grid module.'''
    
    br1 = Conv2D(192, (1, 1), activation = "relu", padding = "same")(input)
    
    br2 = Conv2D(128, (1, 1), activation = "relu", padding = "same")(input)
    br2 = Conv2D(160, (1, 7), activation = "relu", padding = "same")(br2)
    br2 = Conv2D(192, (7, 1), activation = "relu", padding = "same")(br2)
    
    merged = concatenate([br1, br2], axis = -1)
    
    br = Conv2D(1152, (1, 1), activation = "linear", padding = "same")(merged)
    if scale_residual: br = Lambda(lambda b: b * 0.1)(br)
    
    output = add([input, br])
    output = BatchNormalization(axis = -1)(output)
    output = Activation("relu")(output)
    
    return output 
Example 3
Project: Inception-v4   Author: ShobhitLamba   File: inception_resnet_v2.py    MIT License 6 votes vote down vote up
def inception_resnet_v2_C(input, scale_residual = True):
    '''Architecture of Inception_ResNet_C block which is a 8 * 8 grid module.'''
    
    cr1 = Conv2D(192, (1, 1), activation = "relu", padding = "same")(input)
    
    cr2 = Conv2D(192, (1, 1), activation = "relu", padding = "same")(input)
    cr2 = Conv2D(224, (1, 3), activation = "relu", padding = "same")(cr2)
    cr2 = Conv2D(256, (3, 1), activation = "relu", padding = "same")(cr2)
    
    merged = concatenate([cr1, cr2], axis = -1)
    
    cr = Conv2D(2144, (1, 1), activation = "linear", padding = "same")(merged)
    if scale_residual: cr = Lambda(lambda c: c * 0.1)(cr)
    
    output = add([input, cr])
    output = BatchNormalization(axis = -1)(output)
    output = Activation("relu")(output)
    
    return output 
Example 4
Project: Inception-v4   Author: ShobhitLamba   File: inception_resnet_v2.py    MIT License 6 votes vote down vote up
def reduction_resnet_v2_B(input):
    '''Architecture of a 17 * 17 to 8 * 8 Reduction_ResNet_B block.'''
    
    rbr1 = MaxPooling2D((3,3), strides = (2,2), padding = "valid")(input)
    
    rbr2 = Conv2D(256, (1, 1), activation = "relu", padding = "same")(input)
    rbr2 = Conv2D(384, (3, 3), activation = "relu", strides = (2,2))(rbr2)
    
    rbr3 = Conv2D(256, (1, 1), activation = "relu", padding = "same")(input)
    rbr3 = Conv2D(288, (3, 3), activation = "relu", strides = (2,2))(rbr3)
    
    rbr4 = Conv2D(256, (1, 1), activation = "relu", padding = "same")(input)
    rbr4 = Conv2D(288, (3, 3), activation = "relu", padding = "same")(rbr4)
    rbr4 = Conv2D(320, (3, 3), activation = "relu", strides = (2,2))(rbr4)
    
    merged = concatenate([rbr1, rbr2, rbr3, rbr4], axis = -1)
    rbr = BatchNormalization(axis = -1)(merged)
    rbr = Activation("relu")(rbr)
    
    return rbr 
Example 5
Project: Inception-v4   Author: ShobhitLamba   File: inception_v4.py    MIT License 6 votes vote down vote up
def inception_A(input):
    '''Architecture of Inception_A block which is a 35 * 35 grid module.'''
    
    a1 = AveragePooling2D((3, 3), strides = (1, 1), padding = "same")(input)
    a1 = conv_block(a1, 96, 1, 1)
    
    a2 = conv_block(input, 96, 1, 1)
    
    a3 = conv_block(input, 64, 1, 1)
    a3 = conv_block(a3, 96, 3, 3)
    
    a4 = conv_block(input, 64, 1, 1)
    a4 = conv_block(a4, 96, 3, 3)
    a4 = conv_block(a4, 96, 3, 3)
    
    merged = concatenate([a1, a2, a3, a4], axis = -1)
    
    return merged 
Example 6
Project: Inception-v4   Author: ShobhitLamba   File: inception_v4.py    MIT License 6 votes vote down vote up
def inception_B(input):
    '''Architecture of Inception_B block which is a 17 * 17 grid module.'''
    
    b1 = AveragePooling2D((3, 3), strides = (1, 1), padding = "same")(input)
    b1 = conv_block(b1, 128, 1, 1)
    
    b2 = conv_block(input, 384, 1, 1)
    
    b3 = conv_block(input, 192, 1, 1)
    b3 = conv_block(b3, 224, 1, 7)
    b3 = conv_block(b3, 256, 7, 1)
    
    b4 = conv_block(input, 192, 1, 1)
    b4 = conv_block(b4, 192, 7, 1)
    b4 = conv_block(b4, 224, 1, 7)
    b4 = conv_block(b4, 224, 7, 1)
    b4 = conv_block(b4, 256, 1, 7)
    
    merged = concatenate([b1, b2, b3, b4], axis = -1)
    
    return merged 
Example 7
Project: Inception-v4   Author: ShobhitLamba   File: inception_v4.py    MIT License 6 votes vote down vote up
def inception_C(input):
    '''Architecture of Inception_C block which is a 8 * 8 grid module.'''
    
    c1 = AveragePooling2D((3, 3), strides = (1, 1), padding = "same")(input)
    c1 = conv_block(c1, 256, 1, 1)
    
    c2 = conv_block(input, 256, 1, 1)

    c3 = conv_block(input, 384, 1, 1)
    c31 = conv_block(c2, 256, 1, 3)
    c32 = conv_block(c2, 256, 3, 1)
    c3 = concatenate([c31, c32], axis = -1)

    c4 = conv_block(input, 384, 1, 1)
    c4 = conv_block(c3, 448, 3, 1)
    c4 = conv_block(c3, 512, 1, 3)
    c41 = conv_block(c3, 256, 1, 3)
    c42 = conv_block(c3, 256, 3, 1)
    c4 = concatenate([c41, c42], axis = -1)
  
    merged = concatenate([c1, c2, c3, c4], axis = -1)
    
    return merged 
Example 8
Project: Inception-v4   Author: ShobhitLamba   File: inception_v4.py    MIT License 6 votes vote down vote up
def reduction_B(input):
    '''Architecture of a 17 * 17 to 8 * 8 Reduction_B block.'''
    
    rb1 = MaxPooling2D((3, 3), strides = (2, 2), padding = "same")(input)
    
    rb2 = conv_block(input, 192, 1, 1)
    rb2 = conv_block(rb2, 192, 3, 3, strides = (2, 2), padding = "same")
    
    rb3 = conv_block(input, 256, 1, 1)
    rb3 = conv_block(rb3, 256, 1, 7)
    rb3 = conv_block(rb3, 320, 7, 1)
    rb3 = conv_block(rb3, 320, 3, 3, strides = (2, 2), padding = "same")
    
    merged = concatenate([rb1, rb2, rb3], axis = -1)
    
    return merged 
Example 9
Project: Inception-v4   Author: ShobhitLamba   File: inception_resnet_v1.py    MIT License 6 votes vote down vote up
def inception_resnet_v1_B(input, scale_residual = True):
    '''Architecture of Inception_ResNet_B block which is a 17 * 17 grid module.'''
    
    br1 = Conv2D(128, (1, 1), activation = "relu", padding = "same")(input)
    
    br2 = Conv2D(128, (1, 1), activation = "relu", padding = "same")(input)
    br2 = Conv2D(128, (1, 7), activation = "relu", padding = "same")(br2)
    br2 = Conv2D(128, (7, 1), activation = "relu", padding = "same")(br2)
    
    merged = concatenate([br1, br2], axis = -1)
    
    br = Conv2D(896, (1, 1), activation = "linear", padding = "same")(merged)
    if scale_residual: br = Lambda(lambda b: b * 0.1)(br)
    
    output = add([input, br])
    output = BatchNormalization(axis = -1)(output)
    output = Activation("relu")(output)
    
    return output 
Example 10
Project: Inception-v4   Author: ShobhitLamba   File: inception_resnet_v1.py    MIT License 6 votes vote down vote up
def inception_resnet_v1_C(input, scale_residual = True):
    '''Architecture of Inception_ResNet_C block which is a 8 * 8 grid module.'''
    
    cr1 = Conv2D(192, (1, 1), activation = "relu", padding = "same")(input)
    
    cr2 = Conv2D(192, (1, 1), activation = "relu", padding = "same")(input)
    cr2 = Conv2D(192, (1, 3), activation = "relu", padding = "same")(cr2)
    cr2 = Conv2D(192, (3, 1), activation = "relu", padding = "same")(cr2)
    
    merged = concatenate([cr1, cr2], axis = -1)
    
    cr = Conv2D(1792, (1, 1), activation = "linear", padding = "same")(merged)
    if scale_residual: cr = Lambda(lambda c: c * 0.1)(cr)
    
    output = add([input, cr])
    output = BatchNormalization(axis = -1)(output)
    output = Activation("relu")(output)
    
    return output 
Example 11
Project: Inception-v4   Author: ShobhitLamba   File: inception_resnet_v1.py    MIT License 6 votes vote down vote up
def reduction_resnet_A(input, k = 192, l = 224, m = 256, n = 384):
    '''Architecture of a 35 * 35 to 17 * 17 Reduction_ResNet_A block. It is used by both v1 and v2 Inception-ResNets.'''
    
    rar1 = MaxPooling2D((3,3), strides = (2,2))(input)

    rar2 = Conv2D(n, (3, 3), activation = "relu", strides = (2,2))(input)

    rar3 = Conv2D(k, (1, 1), activation = "relu", padding = "same")(input)
    rar3 = Conv2D(l, (3, 3), activation = "relu", padding = "same")(rar3)
    rar3 = Conv2D(m, (3, 3), activation = "relu", strides = (2,2))(rar3)

    merged = concatenate([rar1, rar2, rar3], axis = -1)
    rar = BatchNormalization(axis = -1)(merged)
    rar = Activation("relu")(rar)
    
    return rar 
Example 12
Project: Inception-v4   Author: ShobhitLamba   File: inception_resnet_v1.py    MIT License 6 votes vote down vote up
def reduction_resnet_v1_B(input):
    '''Architecture of a 17 * 17 to 8 * 8 Reduction_ResNet_B block.'''
    
    rbr1 = MaxPooling2D((3,3), strides = (2,2), padding = "valid")(input)
    
    rbr2 = Conv2D(256, (1, 1), activation = "relu", padding = "same")(input)
    rbr2 = Conv2D(384, (3, 3), activation = "relu", strides = (2,2))(rbr2)
    
    rbr3 = Conv2D(256, (1, 1), activation = "relu", padding = "same")(input)
    rbr3 = Conv2D(256, (3, 3), activation = "relu", strides = (2,2))(rbr3)
    
    rbr4 = Conv2D(256, (1, 1), activation = "relu", padding = "same")(input)
    rbr4 = Conv2D(256, (3, 3), activation = "relu", padding = "same")(rbr4)
    rbr4 = Conv2D(256, (3, 3), activation = "relu", strides = (2,2))(rbr4)
    
    merged = concatenate([rbr1, rbr2, rbr3, rbr4], axis = -1)
    rbr = BatchNormalization(axis = -1)(merged)
    rbr = Activation("relu")(rbr)
    
    return rbr 
Example 13
Project: object-detection   Author: kaka-lin   File: keras_yolo.py    MIT License 6 votes vote down vote up
def yolo_body(inputs, num_anchors, num_classes):
    """Create YOLO_V2 model CNN body in Keras."""
    darknet = Model(inputs, darknet_body()(inputs))
    conv20 = compose(
        DarknetConv2D_BN_Leaky(1024, (3, 3)),
        DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output)

    conv13 = darknet.layers[43].output
    conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13)
    # TODO: Allow Keras Lambda to use func arguments for output_shape?
    conv21_reshaped = Lambda(
        space_to_depth_x2,
        output_shape=space_to_depth_x2_output_shape,
        name='space_to_depth')(conv21)

    x = concatenate([conv21_reshaped, conv20])
    x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x)
    x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x)
    return Model(inputs, x) 
Example 14
Project: Disaster_management_robot   Author: JoelRaymann   File: keras_yolo.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def yolo_body(inputs, num_anchors, num_classes):
    """Create YOLO_V2 model CNN body in Keras."""
    darknet = Model(inputs, darknet_body()(inputs))
    conv20 = compose(
        DarknetConv2D_BN_Leaky(1024, (3, 3)),
        DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output)

    conv13 = darknet.layers[43].output
    conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13)
    # TODO: Allow Keras Lambda to use func arguments for output_shape?
    conv21_reshaped = Lambda(
        space_to_depth_x2,
        output_shape=space_to_depth_x2_output_shape,
        name='space_to_depth')(conv21)

    x = concatenate([conv21_reshaped, conv20])
    x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x)
    x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x)
    return Model(inputs, x) 
Example 15
Project: SE_Despeckling   Author: adigasu   File: test_SE.py    MIT License 6 votes vote down vote up
def conv2d(layer_input, filters, f_size=5):
    lay = Conv2D(filters, kernel_size=f_size)(layer_input)
    lay = BatchNormalization()(lay)
    lay_1 = Activation('relu')(lay)

    lay = Dropout(0.2)(lay_1)

    lay = Conv2D(filters, (f_size, f_size))(lay)
    lay = BatchNormalization()(lay)
    lay_2 = Activation('relu')(lay)

    lay = concatenate([Cropping2D(cropping=(2, 2))(lay_1), lay_2], axis=-1)

    return lay

# Encoder network 
Example 16
Project: SE_Despeckling   Author: adigasu   File: train_SE.py    MIT License 6 votes vote down vote up
def conv2d(layer_input, filters, f_size=5):
    lay = Conv2D(filters, kernel_size=f_size)(layer_input)
    lay = BatchNormalization()(lay)
    lay_1 = Activation('relu')(lay)

    lay = Dropout(0.2)(lay_1)

    lay = Conv2D(filters, (f_size, f_size))(lay)
    lay = BatchNormalization()(lay)
    lay_2 = Activation('relu')(lay)

    lay = concatenate([Cropping2D(cropping=(2, 2))(lay_1), lay_2], axis=-1)

    return lay

# Encoder network 
Example 17
Project: DLToy   Author: Spground   File: keras_yolo.py    GNU General Public License v3.0 6 votes vote down vote up
def yolo_body(inputs, num_anchors, num_classes):
    """Create YOLO_V2 model CNN body in Keras."""
    darknet = Model(inputs, darknet_body()(inputs))
    conv20 = compose(
        DarknetConv2D_BN_Leaky(1024, (3, 3)),
        DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output)

    conv13 = darknet.layers[43].output
    conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13)
    # TODO: Allow Keras Lambda to use func arguments for output_shape?
    conv21_reshaped = Lambda(
        space_to_depth_x2,
        output_shape=space_to_depth_x2_output_shape,
        name='space_to_depth')(conv21)

    x = concatenate([conv21_reshaped, conv20])
    x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x)
    x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x)
    return Model(inputs, x) 
Example 18
Project: dr.b   Author: taoddiao   File: cheX_net.py    Apache License 2.0 6 votes vote down vote up
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_layers: the number of layers of conv_block to append to the model.
            nb_filter: number of filters
            growth_rate: growth rate
            dropout_rate: dropout rate
            weight_decay: weight decay factor
            grow_nb_filters: flag to decide to allow number of filters to grow
    '''

    eps = 1.1e-5
    concat_feat = x

    for i in range(nb_layers):
        branch = i+1
        x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
        concat_feat = concatenate([concat_feat, x], axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))

        if grow_nb_filters:
            nb_filter += growth_rate

    return concat_feat, nb_filter 
Example 19
Project: dr.b   Author: taoddiao   File: densenet121.py    Apache License 2.0 6 votes vote down vote up
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_layers: the number of layers of conv_block to append to the model.
            nb_filter: number of filters
            growth_rate: growth rate
            dropout_rate: dropout rate
            weight_decay: weight decay factor
            grow_nb_filters: flag to decide to allow number of filters to grow
    '''

    eps = 1.1e-5
    concat_feat = x

    for i in range(nb_layers):
        branch = i+1
        x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
        concat_feat = concatenate([concat_feat, x], axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))

        if grow_nb_filters:
            nb_filter += growth_rate

    return concat_feat, nb_filter 
Example 20
Project: FashionAI_Tianchi_2018   Author: Jeremyczhj   File: inception_v4.py    MIT License 6 votes vote down vote up
def block_inception_a(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 96, 1, 1)

    branch_1 = conv2d_bn(input, 64, 1, 1)
    branch_1 = conv2d_bn(branch_1, 96, 3, 3)

    branch_2 = conv2d_bn(input, 64, 1, 1)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)

    branch_3 = AveragePooling2D((3,3), strides=(1,1), padding='same')(input)
    branch_3 = conv2d_bn(branch_3, 96, 1, 1)

    x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
    return x 
Example 21
Project: FashionAI_Tianchi_2018   Author: Jeremyczhj   File: inception_v4.py    MIT License 6 votes vote down vote up
def block_reduction_a(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 384, 3, 3, strides=(2,2), padding='valid')

    branch_1 = conv2d_bn(input, 192, 1, 1)
    branch_1 = conv2d_bn(branch_1, 224, 3, 3)
    branch_1 = conv2d_bn(branch_1, 256, 3, 3, strides=(2,2), padding='valid')

    branch_2 = MaxPooling2D((3,3), strides=(2,2), padding='valid')(input)

    x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis)
    return x 
Example 22
Project: phoneticSimilarity   Author: ronggong   File: models.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def timbral_layer_schluter_valid(filter_density_layer1, pool_n_row, pool_n_col, dropout, input_dim):
    reshape_dim = (1, input_dim[0], input_dim[1])

    input = Input(shape=reshape_dim)

    x_1 = createModel_schluter_valid(input, 32, 50, 1, filter_density_layer1, pool_n_row, pool_n_col,
                          dropout)

    x_2 = createModel_schluter_valid(input, 16, 50, 5, filter_density_layer1, pool_n_row, pool_n_col,
                          dropout)

    x_3 = createModel_schluter_valid(input, 8, 50, 10, filter_density_layer1, pool_n_row, pool_n_col,
                          dropout)

    x_4 = createModel_schluter_valid(input, 32, 70, 1, filter_density_layer1, pool_n_row, pool_n_col,
                          dropout)

    x_5 = createModel_schluter_valid(input, 16, 70, 5, filter_density_layer1, pool_n_row, pool_n_col,
                          dropout)

    x_6 = createModel_schluter_valid(input, 8, 70, 10, filter_density_layer1, pool_n_row, pool_n_col,
                          dropout)

    merged = concatenate([x_1, x_2, x_3, x_4, x_5, x_6])


    return input, merged 
Example 23
Project: kaggle-carvana-2017   Author: killthekitten   File: models.py    MIT License 5 votes vote down vote up
def get_unet_resnet(input_shape):
    resnet_base = ResNet50(input_shape=input_shape, include_top=False)

    if args.show_summary:
        resnet_base.summary()

    for l in resnet_base.layers:
        l.trainable = True
    conv1 = resnet_base.get_layer("activation_1").output
    conv2 = resnet_base.get_layer("activation_10").output
    conv3 = resnet_base.get_layer("activation_22").output
    conv4 = resnet_base.get_layer("activation_40").output
    conv5 = resnet_base.get_layer("activation_49").output

    up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1)
    conv6 = conv_block_simple(up6, 256, "conv6_1")
    conv6 = conv_block_simple(conv6, 256, "conv6_2")

    up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1)
    conv7 = conv_block_simple(up7, 192, "conv7_1")
    conv7 = conv_block_simple(conv7, 192, "conv7_2")

    up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1)
    conv8 = conv_block_simple(up8, 128, "conv8_1")
    conv8 = conv_block_simple(conv8, 128, "conv8_2")

    up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1)
    conv9 = conv_block_simple(up9, 64, "conv9_1")
    conv9 = conv_block_simple(conv9, 64, "conv9_2")

    vgg = VGG16(input_shape=input_shape, input_tensor=resnet_base.input, include_top=False)
    for l in vgg.layers:
        l.trainable = False
    vgg_first_conv = vgg.get_layer("block1_conv2").output
    up10 = concatenate([UpSampling2D()(conv9), resnet_base.input, vgg_first_conv], axis=-1)
    conv10 = conv_block_simple(up10, 32, "conv10_1")
    conv10 = conv_block_simple(conv10, 32, "conv10_2")
    conv10 = SpatialDropout2D(0.2)(conv10)
    x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10)
    model = Model(resnet_base.input, x)
    return model 
Example 24
Project: kaggle-carvana-2017   Author: killthekitten   File: models.py    MIT License 5 votes vote down vote up
def get_simple_unet(input_shape):
    img_input = Input(input_shape)
    conv1 = conv_block_simple(img_input, 32, "conv1_1")
    conv1 = conv_block_simple(conv1, 32, "conv1_2")
    pool1 = MaxPooling2D((2, 2), strides=(2, 2), padding="same", name="pool1")(conv1)

    conv2 = conv_block_simple(pool1, 64, "conv2_1")
    conv2 = conv_block_simple(conv2, 64, "conv2_2")
    pool2 = MaxPooling2D((2, 2), strides=(2, 2), padding="same", name="pool2")(conv2)

    conv3 = conv_block_simple(pool2, 128, "conv3_1")
    conv3 = conv_block_simple(conv3, 128, "conv3_2")
    pool3 = MaxPooling2D((2, 2), strides=(2, 2), padding="same", name="pool3")(conv3)

    conv4 = conv_block_simple(pool3, 256, "conv4_1")
    conv4 = conv_block_simple(conv4, 256, "conv4_2")
    conv4 = conv_block_simple(conv4, 256, "conv4_3")

    up5 = concatenate([UpSampling2D()(conv4), conv3], axis=-1)
    conv5 = conv_block_simple(up5, 128, "conv5_1")
    conv5 = conv_block_simple(conv5, 128, "conv5_2")

    up6 = concatenate([UpSampling2D()(conv5), conv2], axis=-1)
    conv6 = conv_block_simple(up6, 64, "conv6_1")
    conv6 = conv_block_simple(conv6, 64, "conv6_2")

    up7 = concatenate([UpSampling2D()(conv6), conv1], axis=-1)
    conv7 = conv_block_simple(up7, 32, "conv7_1")
    conv7 = conv_block_simple(conv7, 32, "conv7_2")

    conv7 = SpatialDropout2D(0.2)(conv7)

    prediction = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv7)
    model = Model(img_input, prediction)
    return model 
Example 25
Project: kaggle-carvana-2017   Author: killthekitten   File: models.py    MIT License 5 votes vote down vote up
def get_unet_mobilenet(input_shape):
    base_model = MobileNet(include_top=False, input_shape=input_shape)

    conv1 = base_model.get_layer('conv_pw_1_relu').output
    conv2 = base_model.get_layer('conv_pw_3_relu').output
    conv3 = base_model.get_layer('conv_pw_5_relu').output
    conv4 = base_model.get_layer('conv_pw_11_relu').output
    conv5 = base_model.get_layer('conv_pw_13_relu').output
    up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1)
    conv6 = conv_block_simple(up6, 256, "conv6_1")
    conv6 = conv_block_simple(conv6, 256, "conv6_2")

    up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1)
    conv7 = conv_block_simple(up7, 256, "conv7_1")
    conv7 = conv_block_simple(conv7, 256, "conv7_2")

    up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1)
    conv8 = conv_block_simple(up8, 192, "conv8_1")
    conv8 = conv_block_simple(conv8, 128, "conv8_2")

    up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1)
    conv9 = conv_block_simple(up9, 96, "conv9_1")
    conv9 = conv_block_simple(conv9, 64, "conv9_2")

    up10 = concatenate([UpSampling2D()(conv9), base_model.input], axis=-1)
    conv10 = conv_block_simple(up10, 48, "conv10_1")
    conv10 = conv_block_simple(conv10, 32, "conv10_2")
    conv10 = SpatialDropout2D(0.2)(conv10)
    x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10)
    model = Model(base_model.input, x)
    return model 
Example 26
Project: kaggle-carvana-2017   Author: killthekitten   File: models.py    MIT License 5 votes vote down vote up
def get_unet_inception_resnet_v2(input_shape):
    base_model = InceptionResNetV2(include_top=False, input_shape=input_shape)
    conv1 = base_model.get_layer('activation_3').output
    conv2 = base_model.get_layer('activation_5').output
    conv3 = base_model.get_layer('block35_10_ac').output
    conv4 = base_model.get_layer('block17_20_ac').output
    conv5 = base_model.get_layer('conv_7b_ac').output
    up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1)
    conv6 = conv_block_simple(up6, 256, "conv6_1")
    conv6 = conv_block_simple(conv6, 256, "conv6_2")

    up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1)
    conv7 = conv_block_simple(up7, 256, "conv7_1")
    conv7 = conv_block_simple(conv7, 256, "conv7_2")

    up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1)
    conv8 = conv_block_simple(up8, 128, "conv8_1")
    conv8 = conv_block_simple(conv8, 128, "conv8_2")

    up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1)
    conv9 = conv_block_simple(up9, 64, "conv9_1")
    conv9 = conv_block_simple(conv9, 64, "conv9_2")

    up10 = concatenate([UpSampling2D()(conv9), base_model.input], axis=-1)
    conv10 = conv_block_simple(up10, 48, "conv10_1")
    conv10 = conv_block_simple(conv10, 32, "conv10_2")
    conv10 = SpatialDropout2D(0.4)(conv10)
    x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10)
    model = Model(base_model.input, x)
    return model 
Example 27
Project: rogueinabox   Author: rogueinabox   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def build_model(self):
    
        initializer = initializers.random_normal(stddev=0.02)
    
        input_img = Input(shape=(self.layers, 22, 80))
        input_2 = Lambda(lambda x: x[:, 1:, :, :], output_shape=lambda x: (None, self.layers - 1, 22, 80))(input_img) # no map channel
    
        # whole map
        tower_1 = Conv2D(64, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same")(input_img)
        tower_1 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same")(tower_1)
        tower_1 = MaxPooling2D(pool_size=(22, 80), data_format="channels_first")(tower_1)
    
    
        #tower2
        tower_2 = MaxPooling2D(pool_size=(2, 2), data_format="channels_first")(input_2)
        for _ in range(self.depth):
            tower_2 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same", activation='relu')(tower_2)
        tower_2 = MaxPooling2D(pool_size=(11, 40), data_format="channels_first")(tower_2)
    
        #tower3
        tower_3 = MaxPooling2D(pool_size=(3, 6), data_format="channels_first", padding='same')(input_2)
        for _ in range(self.depth):
            tower_3 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same", activation='relu')(tower_3)
        tower_3 = MaxPooling2D(pool_size=(8, 14), data_format="channels_first", padding='same')(tower_3)
    
        merged_layers = concatenate([tower_1, tower_2, tower_3], axis=1)
    
        flat_layer = Flatten()(merged_layers)
        
        predictions = Dense(5, kernel_initializer=initializer)(flat_layer)
        model = Model(inputs=input_img, outputs=predictions)
        
        rmsprop = RMSprop(lr=0.00025)
        model.compile(loss='mse', optimizer=rmsprop)
        return model 
Example 28
Project: rogueinabox   Author: rogueinabox   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def build_model(self):
    
        initializer = initializers.random_normal(stddev=0.02)
    
        input_img = Input(shape=(self.layers, 22, 80))
        input_2 = Lambda(lambda x: x[:, :2, :, :], output_shape=lambda x: (None, 2, 22, 80))(input_img) # no map channel
    
        # whole map 10x1
        tower_1 = ZeroPadding2D(padding=(1, 0), data_format="channels_first")(input_2)
        tower_1 = Conv2D(32, (10, 1), data_format="channels_first", strides=(7, 1), kernel_initializer=initializer, padding="valid")(tower_1)
        tower_1 = Flatten()(tower_1)
    
        # whole map 1x10
        tower_2 = Conv2D(32, (1, 10), data_format="channels_first", strides=(1, 7), kernel_initializer=initializer, padding="valid")(input_2)
        tower_2 = Flatten()(tower_2)
    
        # whole map 3x3 then maxpool 22x80
        tower_3 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same")(input_2)
        tower_3 = MaxPooling2D(pool_size=(22, 80), data_format="channels_first")(tower_3)
        tower_3 = Flatten()(tower_3)
    
        merged_layers = concatenate([tower_1, tower_2, tower_3], axis=1)
    
        predictions = Dense(4, kernel_initializer=initializer)(merged_layers)
        model = Model(inputs=input_img, outputs=predictions)
        
        adam = Adam(lr=1e-6)
        model.compile(loss='mse', optimizer=adam)
        return model 
Example 29
Project: CalibrationNN   Author: Andres-Hernandez   File: neural_network.py    GNU General Public License v3.0 5 votes vote down vote up
def preprocessing_fnn(x):
        if len(x[0].shape) == 1:
            p = np.concatenate(x)
            p.shape = (1, p.shape[0])
        else:
            p = np.concatenate(x, axis=1)
        return p 
Example 30
Project: ai-platform   Author: produvia   File: yolov3_weights_to_keras.py    MIT License 5 votes vote down vote up
def _SPP_block(inp, kernels, strides):
    pools = [MaxPool2D(pool_size = pool_size, strides = stride, padding = 'same')(inp) \
             for pool_size, stride in zip(kernels, strides)]
    pools = [inp] + pools
    return concatenate(pools)


#Downsampling block is common to all YOLO-v3 models and are unaffected by the SPP or fully connected blocks or the number of labes 
Example 31
Project: ai-platform   Author: produvia   File: yolov3_weights_to_keras.py    MIT License 5 votes vote down vote up
def upSampling(x, skip_36, skip_61, layer_idx, num_classes=80):
    out_filters = 3*(num_classes+5)
    yolo_83 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx},
            {'filter':  out_filters, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': layer_idx+1}], skip=False)

    x = _conv_block(x, [{'filter':  256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+4}],\
                    skip = False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_61])
    x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+7},
            {'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+8},
            {'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+9},
            {'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+10},
            {'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': layer_idx+11}], skip=False)
  # Layer 92 => 94
    yolo_95 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True,  'leaky': True, \
                               'layer_idx': layer_idx+12},
                {'filter': out_filters, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': layer_idx+13}],\
                          skip=False)
  # Layer 95 => 98
    x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True,   'layer_idx': layer_idx+16}],\
                    skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_36])
  # Layer 99 => 106
    yolo_107 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True,  'leaky': True, \
                                'layer_idx':layer_idx+19},
                 {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True,  'leaky': True,  'layer_idx': layer_idx+20},
                 {'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True,  'leaky': True,  'layer_idx': layer_idx+21},
                 {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True,  'leaky': True,  'layer_idx': layer_idx+22},
                 {'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True,  'leaky': True,  'layer_idx': layer_idx+23},
                 {'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True,  'leaky': True,  'layer_idx': layer_idx+24},
                 {'filter': out_filters, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': layer_idx+25}],\
                           skip=False)
    
    
    return yolo_83, yolo_95, yolo_107

#The midblock is where the spatial pyramid pooling as well as the FC block with change for the YOLOv3-SPP model are reflected 
Example 32
Project: Inception-v4   Author: ShobhitLamba   File: inception_resnet_v2.py    MIT License 5 votes vote down vote up
def resnet_v2_stem(input):
    '''The stem of the pure Inception-v4 and Inception-ResNet-v2 networks. This is input part of those networks.'''
    
     # Input shape is 299 * 299 * 3 (Tensorflow dimension ordering)
    x = Conv2D(32, (3, 3), activation = "relu", strides = (2, 2))(input) # 149 * 149 * 32
    x = Conv2D(32, (3, 3), activation = "relu")(x) # 147 * 147 * 32
    x = Conv2D(64, (3, 3), activation = "relu", padding = "same")(x) # 147 * 147 * 64
    
    x1 = MaxPooling2D((3, 3), strides = (2, 2))(x)
    x2 = Conv2D(96, (3, 3), activation = "relu", strides = (2, 2))(x)
    
    x = concatenate([x1, x2], axis = -1) # 73 * 73 * 160
    
    x1 = Conv2D(64, (1, 1), activation = "relu", padding = "same")(x)
    x1 = Conv2D(96, (3, 3), activation = "relu")(x1)
    
    x2 = Conv2D(64, (1, 1), activation = "relu", padding = "same")(x)
    x2 = Conv2D(64, (7, 1), activation = "relu", padding = "same")(x2)
    x2 = Conv2D(64, (1, 7), activation = "relu", padding = "same")(x2)
    x2 = Conv2D(96, (3, 3), activation = "relu", padding = "valid")(x2)
    
    x = concatenate([x1, x2], axis = -1) # 71 * 71 * 192
    
    x1 = Conv2D(192, (3, 3), activation = "relu", strides = (2, 2))(x)
    
    x2 = MaxPooling2D((3, 3), strides = (2, 2))(x)
    
    x = concatenate([x1, x2], axis = -1) # 35 * 35 * 384
    
    x = BatchNormalization(axis = -1)(x)
    x = Activation("relu")(x)
    
    return x 
Example 33
Project: Inception-v4   Author: ShobhitLamba   File: inception_v4.py    MIT License 5 votes vote down vote up
def reduction_A(input, k = 192, l = 224, m = 256, n = 384):
    '''Architecture of a 35 * 35 to 17 * 17 Reduction_A block.'''

    ra1 = MaxPooling2D((3, 3), strides = (2, 2), padding = "same")(input)
    
    ra2 = conv_block(input, n, 3, 3, strides = (2, 2), padding = "same")

    ra3 = conv_block(input, k, 1, 1)
    ra3 = conv_block(ra3, l, 3, 3)
    ra3 = conv_block(ra3, m, 3, 3, strides = (2, 2), padding = "same")

    merged = concatenate([ra1, ra2, ra3], axis = -1)
    
    return merged 
Example 34
Project: object-detection   Author: kaka-lin   File: keras_yolo.py    MIT License 5 votes vote down vote up
def yolo_boxes_to_corners(box_xy, box_wh):
    """Convert YOLO box predictions to bounding box corners."""
    box_mins = box_xy - (box_wh / 2.)
    box_maxes = box_xy + (box_wh / 2.)

    return K.concatenate([
        box_mins[..., 1:2],  # y_min
        box_mins[..., 0:1],  # x_min
        box_maxes[..., 1:2],  # y_max
        box_maxes[..., 0:1]  # x_max
    ]) 
Example 35
Project: keras-image-segmentation   Author: dhkim0225   File: psp_temp.py    MIT License 5 votes vote down vote up
def pyramid_pooling_block(input_tensor, bin_sizes):
    concat_list = [input_tensor]
    h = input_tensor.shape[1].value
    w = input_tensor.shape[2].value

    for bin_size in bin_sizes:
        x = AveragePooling2D(pool_size=(h//bin_size, w//bin_size), strides=(h//bin_size, w//bin_size))(input_tensor)
        x = Conv2D(512, kernel_size=1)(x)
        x = Lambda(lambda x: tf.image.resize_images(x, (h, w)))(x)

        concat_list.append(x)

    return concatenate(concat_list) 
Example 36
Project: keras-image-segmentation   Author: dhkim0225   File: pspnet.py    MIT License 5 votes vote down vote up
def pyramid_pooling_block(input_tensor, bin_sizes):
    concat_list = [input_tensor]
    h = input_tensor.shape[1].value
    w = input_tensor.shape[2].value

    for bin_size in bin_sizes:
        x = AveragePooling2D(pool_size=(h//bin_size, w//bin_size), strides=(h//bin_size, w//bin_size))(input_tensor)
        x = Conv2D(512, kernel_size=1)(x)
        x = Lambda(lambda x: tf.image.resize_images(x, (h, w)))(x)

        concat_list.append(x)

    return concatenate(concat_list) 
Example 37
Project: keras-image-segmentation   Author: dhkim0225   File: pspnet.py    MIT License 5 votes vote down vote up
def pyramid_pooling_block(input_tensor, bin_sizes):
    concat_list = [input_tensor]
    h = input_tensor.shape[1].value
    w = input_tensor.shape[2].value

    for bin_size in bin_sizes:
        x = AveragePooling2D(pool_size=(h//bin_size, w//bin_size), strides=(h//bin_size, w//bin_size))(input_tensor)
        x = Conv2D(512, kernel_size=1)(x)
        x = Lambda(lambda x: tf.image.resize_images(x, (h, w)))(x)

        concat_list.append(x)

    return concatenate(concat_list) 
Example 38
Project: keras-global-context-networks   Author: titu1994   File: gc_densenet.py    MIT License 5 votes vote down vote up
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4,
                  grow_nb_filters=True, return_concat_list=False):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
    Args:
        x: keras tensor
        nb_layers: the number of layers of conv_block to append to the model.
        nb_filter: number of filters
        growth_rate: growth rate
        bottleneck: bottleneck block
        dropout_rate: dropout rate
        weight_decay: weight decay factor
        grow_nb_filters: flag to decide to allow number of filters to grow
        return_concat_list: return the list of feature maps along with the actual output
    Returns: keras tensor with nb_layers of conv_block appended
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x_list = [x]

    for i in range(nb_layers):
        cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
        x_list.append(cb)

        x = concatenate([x, cb], axis=concat_axis)

        if grow_nb_filters:
            nb_filter += growth_rate

    # global context block
    x = global_context_block(x)

    if return_concat_list:
        return x, nb_filter, x_list
    else:
        return x, nb_filter 
Example 39
Project: dep-gan-im   Author: febrianrachmadi   File: RLD44-depgan-im-flair-noSL.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def log_images(self, tag, images, step, dtype='RGB', denorm=[0,255]):
        """Logs a list of images."""

        im_summaries = []
        for nr, img in enumerate(images):
            # Write the image to a string
            s = StringIO()
            if dtype == 'RGB':
                img = ( (img+1)/2*denorm[1]).clip(denorm[0],denorm[1]).astype('uint8')
                plt.imsave(s, img, format='png')
            else:
#                 img_t = np.concatenate((img, img), axis=-1)
#                 img_t = np.concatenate((img_t, img), axis=-1)
                plt.imsave(s, np.squeeze(img), cmap='viridis', format='png')                

            # Create an Image object
            img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
                                       height=img.shape[0],
                                       width=img.shape[1])
            # Create a Summary value
            im_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, nr),
                                                 image=img_sum))

        # Create and write Summary
        summary = tf.Summary(value=im_summaries)
        self.writer.add_summary(summary, step) 
Example 40
Project: dep-gan-im   Author: febrianrachmadi   File: RLD441-depgan-twoCritics-im-flair-noSL.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def log_images(self, tag, images, step, dtype='RGB', denorm=[0,255]):
        """Logs a list of images."""

        im_summaries = []
        for nr, img in enumerate(images):
            # Write the image to a string
            s = StringIO()
            if dtype == 'RGB':
                img = ( (img+1)/2*denorm[1]).clip(denorm[0],denorm[1]).astype('uint8')
                plt.imsave(s, img, format='png')
            else:
#                 img_t = np.concatenate((img, img), axis=-1)
#                 img_t = np.concatenate((img_t, img), axis=-1)
                plt.imsave(s, np.squeeze(img), cmap='viridis', format='png')                

            # Create an Image object
            img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
                                       height=img.shape[0],
                                       width=img.shape[1])
            # Create a Summary value
            im_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, nr),
                                                 image=img_sum))

        # Create and write Summary
        summary = tf.Summary(value=im_summaries)
        self.writer.add_summary(summary, step) 
Example 41
Project: musical-onset-efficient   Author: ronggong   File: models.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def temporal_layer_schluter(filter_density_layer1, pool_n_row, pool_n_col, dropout, input_dim):
    reshape_dim = (1, input_dim[0], input_dim[1])

    input = Input(shape=reshape_dim)

    x_1 = createModel_schluter(input, 12, 1, 7, filter_density_layer1, pool_n_row, pool_n_col,
                          dropout)

    x_2 = createModel_schluter(input, 6, 3, 7, filter_density_layer1, pool_n_row, pool_n_col,
                          dropout)

    x_3 = createModel_schluter(input, 3, 5, 7, filter_density_layer1, pool_n_row, pool_n_col,
                          dropout)

    x_4 = createModel_schluter(input, 12, 1, 12, filter_density_layer1, pool_n_row, pool_n_col,
                          dropout)

    x_5 = createModel_schluter(input, 6, 3, 12, filter_density_layer1, pool_n_row, pool_n_col,
                          dropout)

    x_6 = createModel_schluter(input, 3, 5, 12, filter_density_layer1, pool_n_row, pool_n_col,
                          dropout)

    merged = concatenate([x_1, x_2, x_3, x_4, x_5, x_6], axis=1)

    return input, merged 
Example 42
Project: Coloring-greyscale-images   Author: emilwallner   File: colorize_base.py    MIT License 5 votes vote down vote up
def concatenateNumba(x, y):
    return np.concatenate([x, y], axis=-1) 
Example 43
Project: OCR   Author: DongfeiJi   File: densenet.py    Apache License 2.0 5 votes vote down vote up
def dense_block(x, nb_layers, nb_filter, growth_rate, droput_rate=0.2, weight_decay=1e-4):
    for i in range(nb_layers):
        cb = conv_block(x, growth_rate, droput_rate, weight_decay)
        x = concatenate([x, cb], axis=-1)
        nb_filter += growth_rate
    return x, nb_filter 
Example 44
Project: OCR   Author: DongfeiJi   File: densenet.py    Apache License 2.0 5 votes vote down vote up
def dense_block(x, nb_layers, nb_filter, growth_rate, droput_rate=0.2, weight_decay=1e-4):
    for i in range(nb_layers):
        cb = conv_block(x, growth_rate, droput_rate, weight_decay)
        x = concatenate([x, cb], axis=-1)
        nb_filter += growth_rate
    return x, nb_filter 
Example 45
Project: keras-ocr   Author: AlbanSeurat   File: ocr.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, lexicon_size, weights="data/dictnet.mat", cnn_trainable=False):
        parent = dict.DictNet(weights=weights, trainable=cnn_trainable)

        layer4_output = parent.get_layer("layer4").output

        x = Reshape(target_shape=(52, 512))(layer4_output)

        gru_1 = GRU(self.rnn_size, return_sequences=True,
                    kernel_initializer='he_normal', name='gru1')(x)

        gru_1b = GRU(self.rnn_size, return_sequences=True,
                     go_backwards=True, kernel_initializer='he_normal',
                     name='gru1_b')(x)
        gru1_merged = add([gru_1, gru_1b])
        gru_2 = GRU(self.rnn_size, return_sequences=True,
                    kernel_initializer='he_normal', name='gru2')(gru1_merged)
        gru_2b = GRU(self.rnn_size, return_sequences=True, go_backwards=True,
                     kernel_initializer='he_normal', name='gru2_b')(gru1_merged)

        gru_concat = concatenate([gru_2, gru_2b])

        # transforms RNN output to character activations:
        inner = Dense(lexicon_size, kernel_initializer='he_normal',
                      name='dense2')(gru_concat)
        y_pred = Activation('softmax', name='softmax')(inner)

        super(_OcrBase, self).__init__(inputs=parent.input, outputs=y_pred, name="_OcrBase") 
Example 46
Project: Multi-level-DCNet   Author: ssrp   File: densenet.py    GNU General Public License v3.0 5 votes vote down vote up
def DenseBlock(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4,
                  grow_nb_filters=True, return_concat_list=False):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
    Args:
        x: keras tensor
        nb_layers: the number of layers of conv_block to append to the model.
        nb_filter: number of filters
        growth_rate: growth rate
        bottleneck: bottleneck block
        dropout_rate: dropout rate
        weight_decay: weight decay factor
        grow_nb_filters: flag to decide to allow number of filters to grow
        return_concat_list: return the list of feature maps along with the actual output
    Returns: keras tensor with nb_layers of conv_block appended
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x_list = [x]

    for i in range(nb_layers):
        cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
        x_list.append(cb)

        x = concatenate([x, cb], axis=concat_axis)

        if grow_nb_filters:
            nb_filter += growth_rate

    if return_concat_list:
        return x, nb_filter, x_list
    else:
        return x, nb_filter 
Example 47
Project: Model-Playgrounds   Author: OlafenwaMoses   File: densenet.py    MIT License 5 votes vote down vote up
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4,
                  grow_nb_filters=True, return_concat_list=False):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
    Args:
        x: keras tensor
        nb_layers: the number of layers of conv_block to append to the model.
        nb_filter: number of filters
        growth_rate: growth rate
        bottleneck: bottleneck block
        dropout_rate: dropout rate
        weight_decay: weight decay factor
        grow_nb_filters: flag to decide to allow number of filters to grow
        return_concat_list: return the list of feature maps along with the actual output
    Returns: keras tensor with nb_layers of conv_block appended
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x_list = [x]

    for i in range(nb_layers):
        cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
        x_list.append(cb)

        x = concatenate([x, cb], axis=concat_axis)

        if grow_nb_filters:
            nb_filter += growth_rate

    if return_concat_list:
        return x, nb_filter, x_list
    else:
        return x, nb_filter 
Example 48
Project: Disaster_management_robot   Author: JoelRaymann   File: keras_yolo.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def yolo_boxes_to_corners(box_xy, box_wh):
    """Convert YOLO box predictions to bounding box corners."""
    box_mins = box_xy - (box_wh / 2.)
    box_maxes = box_xy + (box_wh / 2.)

    return K.concatenate([
        box_mins[..., 1:2],  # y_min
        box_mins[..., 0:1],  # x_min
        box_maxes[..., 1:2],  # y_max
        box_maxes[..., 0:1]  # x_max
    ]) 
Example 49
Project: wmh_ibbmTum   Author: hongweilibran   File: test_leave_one_out.py    GNU General Public License v3.0 5 votes vote down vote up
def Utrecht_preprocessing(FLAIR_image, T1_image):

    channel_num = 2
    #print(np.shape(FLAIR_image))
    num_selected_slice = np.shape(FLAIR_image)[0]
    image_rows_Dataset = np.shape(FLAIR_image)[1]
    image_cols_Dataset = np.shape(FLAIR_image)[2]
    T1_image = np.float32(T1_image)

    brain_mask_FLAIR = np.ndarray((np.shape(FLAIR_image)[0],image_rows_Dataset, image_cols_Dataset), dtype=np.float32)
    brain_mask_T1 = np.ndarray((np.shape(FLAIR_image)[0],image_rows_Dataset, image_cols_Dataset), dtype=np.float32)
    imgs_two_channels = np.ndarray((num_selected_slice, rows_standard, cols_standard, channel_num), dtype=np.float32)
    imgs_mask_two_channels = np.ndarray((num_selected_slice, rows_standard, cols_standard,1), dtype=np.float32)

    # FLAIR --------------------------------------------
    brain_mask_FLAIR[FLAIR_image >=thresh_FLAIR] = 1
    brain_mask_FLAIR[FLAIR_image < thresh_FLAIR] = 0
    for iii in range(np.shape(FLAIR_image)[0]):
        brain_mask_FLAIR[iii,:,:] = scipy.ndimage.morphology.binary_fill_holes(brain_mask_FLAIR[iii,:,:])  #fill the holes inside brain
    FLAIR_image = FLAIR_image[:, (image_rows_Dataset/2-rows_standard/2):(image_rows_Dataset/2+rows_standard/2), (image_cols_Dataset/2-cols_standard/2):(image_cols_Dataset/2+cols_standard/2)]
    brain_mask_FLAIR = brain_mask_FLAIR[:, (image_rows_Dataset/2-rows_standard/2):(image_rows_Dataset/2+rows_standard/2), (image_cols_Dataset/2-cols_standard/2):(image_cols_Dataset/2+cols_standard/2)]
    ###------Gaussion Normalization here
    FLAIR_image -=np.mean(FLAIR_image[brain_mask_FLAIR == 1])      #Gaussion Normalization
    FLAIR_image /=np.std(FLAIR_image[brain_mask_FLAIR == 1])
    # T1 -----------------------------------------------
    brain_mask_T1[T1_image >=thresh_T1] = 1
    brain_mask_T1[T1_image < thresh_T1] = 0
    for iii in range(np.shape(T1_image)[0]):
        brain_mask_T1[iii,:,:] = scipy.ndimage.morphology.binary_fill_holes(brain_mask_T1[iii,:,:])  #fill the holes inside brain
    T1_image = T1_image[:, (image_rows_Dataset/2-rows_standard/2):(image_rows_Dataset/2+rows_standard/2), (image_cols_Dataset/2-cols_standard/2):(image_cols_Dataset/2+cols_standard/2)]
    brain_mask_T1 = brain_mask_T1[:, (image_rows_Dataset/2-rows_standard/2):(image_rows_Dataset/2+rows_standard/2), (image_cols_Dataset/2-cols_standard/2):(image_cols_Dataset/2+cols_standard/2)]
    #------Gaussion Normalization
    T1_image -=np.mean(T1_image[brain_mask_T1 == 1])      
    T1_image /=np.std(T1_image[brain_mask_T1 == 1])
    #---------------------------------------------------
    FLAIR_image  = FLAIR_image[..., np.newaxis]
    T1_image  = T1_image[..., np.newaxis]
    imgs_two_channels = np.concatenate((FLAIR_image, T1_image), axis = 3)
    #print(np.shape(imgs_two_channels))
    return imgs_two_channels 
Example 50
Project: keras-chinese-resume-parser-and-analyzer   Author: chen0040   File: cnn.py    MIT License 5 votes vote down vote up
def define_model(self, length, vocab_size):

        embedding_size = 100
        cnn_filter_size = 32

        inputs1 = Input(shape=(length,))
        embedding1 = Embedding(vocab_size, embedding_size)(inputs1)
        conv1 = Conv1D(filters=cnn_filter_size, kernel_size=4, activation='relu')(
            embedding1)
        drop1 = Dropout(0.5)(conv1)
        pool1 = MaxPooling1D(pool_size=2)(drop1)
        flat1 = Flatten()(pool1)

        inputs2 = Input(shape=(length,))
        embedding2 = Embedding(vocab_size, embedding_size)(inputs2)
        conv2 = Conv1D(filters=cnn_filter_size, kernel_size=6, activation='relu')(
            embedding2)
        drop2 = Dropout(0.5)(conv2)
        pool2 = MaxPooling1D(pool_size=2)(drop2)
        flat2 = Flatten()(pool2)

        inputs3 = Input(shape=(length,))
        embedding3 = Embedding(vocab_size, embedding_size)(inputs3)
        conv3 = Conv1D(filters=cnn_filter_size, kernel_size=8, activation='relu')(
            embedding3)
        drop3 = Dropout(0.5)(conv3)
        pool3 = MaxPooling1D(pool_size=2)(drop3)
        flat3 = Flatten()(pool3)

        merged = concatenate([flat1, flat2, flat3])
        # interpretation
        dense1 = Dense(10, activation='relu')(merged)

        outputs = Dense(units=len(self.labels), activation='softmax')(dense1)

        model = Model(inputs=[inputs1, inputs2, inputs3], outputs=outputs)
        # compile
        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        # summarize
        print(model.summary())
        return model 
Example 51
Project: Neural-Chatbot   Author: saurabhmathur96   File: sequence_blocks.py    GNU General Public License v3.0 5 votes vote down vote up
def Encoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True):
    if activation is None:
        activation = ELU()
    if use_gru:
        def _encoder(x):
            if bidirectional:
                branch_1 = GRU(int(hidden_size/2), activation='linear',
                               return_sequences=return_sequences, go_backwards=False)(x)
                branch_2 = GRU(int(hidden_size/2), activation='linear',
                               return_sequences=return_sequences, go_backwards=True)(x)
                x = concatenate([branch_1, branch_2])
                x = activation(x)
                return x
            else:
                x = GRU(hidden_size, activation='linear',
                        return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    else:
        def _encoder(x):
            if bidirectional:
                branch_1 = LSTM(int(hidden_size/2), activation='linear',
                                return_sequences=return_sequences, go_backwards=False)(x)
                branch_2 = LSTM(int(hidden_size/2), activation='linear',
                                return_sequences=return_sequences, go_backwards=True)(x)
                x = concatenate([branch_1, branch_2])
                x = activation(x)
                return x
            else:
                x = LSTM(hidden_size, activation='linear',
                         return_sequences=return_sequences)(x)
                x = activation(x)
                return x
    return _encoder 
Example 52
Project: Neural-Chatbot   Author: saurabhmathur96   File: sequence_blocks.py    GNU General Public License v3.0 5 votes vote down vote up
def AttentionDecoder(hidden_size, activation=None, return_sequences=True, bidirectional=False, use_gru=True):
    if activation is None:
        activation = ELU()
    if use_gru:
        def _decoder(x, attention):
            if bidirectional:
                branch_1 = AttentionWrapper(GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences,
                                                go_backwards=False), attention, single_attention_param=True)(x)
                branch_2 = AttentionWrapper(GRU(int(hidden_size/2), activation='linear', return_sequences=return_sequences,
                                                go_backwards=True), attention, single_attention_param=True)(x)
                x = concatenate([branch_1, branch_2])
                return activation(x)
            else:
                x = AttentionWrapper(GRU(hidden_size, activation='linear',
                                         return_sequences=return_sequences), attention, single_attention_param=True)(x)
                x = activation(x)
                return x
    else:
        def _decoder(x, attention):
            if bidirectional:
                branch_1 = AttentionWrapper(LSTM(int(hidden_size/2), activation='linear', return_sequences=return_sequences,
                                                 go_backwards=False), attention, single_attention_param=True)(x)
                branch_2 = AttentionWrapper(LSTM(hidden_size, activation='linear', return_sequences=return_sequences,
                                                go_backwards=True), attention, single_attention_param=True)(x)
                x = concatenate([branch_1, branch_2])
                x = activation(x)
                return x
            else:
                x = AttentionWrapper(LSTM(hidden_size, activation='linear', return_sequences=return_sequences),
                                     attention, single_attention_param=True)(x)
                x = activation(x)
                return x

    return _decoder 
Example 53
Project: unet-pspnet-unet2d-segment-implement   Author: qianmingduowan   File: pspnet.py    GNU General Public License v3.0 5 votes vote down vote up
def pyramid_pooling_block(input_tensor, bin_sizes):
    concat_list = [input_tensor]
    h = input_tensor.shape[1].value
    w = input_tensor.shape[2].value

    for bin_size in bin_sizes:
        x = AveragePooling2D(pool_size=(h//bin_size, w//bin_size), strides=(h//bin_size, w//bin_size))(input_tensor)
        x = Conv2D(512, kernel_size=1)(x)
        x = Lambda(lambda x: tf.image.resize_images(x, (h, w)))(x)

        concat_list.append(x)

    return concatenate(concat_list) 
Example 54
Project: CBAM-keras   Author: kobiso   File: resnext.py    MIT License 5 votes vote down vote up
def __grouped_convolution_block(input, grouped_channels, cardinality, strides, weight_decay=5e-4):
    ''' Adds a grouped convolution block. It is an equivalent block from the paper
    Args:
        input: input tensor
        grouped_channels: grouped number of filters
        cardinality: cardinality factor describing the number of groups
        strides: performs strided convolution for downscaling if > 1
        weight_decay: weight decay term
    Returns: a keras tensor
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    group_list = []

    if cardinality == 1:
        # with cardinality 1, it is a standard convolution
        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
        x = BatchNormalization(axis=channel_axis)(x)
        x = LeakyReLU()(x)
        return x

    for c in range(cardinality):
        x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels]
                   if K.image_data_format() == 'channels_last' else
                   lambda z: z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input)

        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)

        group_list.append(x)

    group_merge = concatenate(group_list, axis=channel_axis)
    x = BatchNormalization(axis=channel_axis)(group_merge)
    x = LeakyReLU()(x)

    return x 
Example 55
Project: CBAM-keras   Author: kobiso   File: densenet.py    MIT License 5 votes vote down vote up
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4,
                  grow_nb_filters=True, return_concat_list=False, attention_module=None):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
    Args:
        x: keras tensor
        nb_layers: the number of layers of conv_block to append to the model.
        nb_filter: number of filters
        growth_rate: growth rate
        bottleneck: bottleneck block
        dropout_rate: dropout rate
        weight_decay: weight decay factor
        grow_nb_filters: flag to decide to allow number of filters to grow
        return_concat_list: return the list of feature maps along with the actual output
    Returns: keras tensor with nb_layers of conv_block appended
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x_list = [x]

    for i in range(nb_layers):
        cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
        x_list.append(cb)

        x = concatenate([x, cb], axis=concat_axis)

        if grow_nb_filters:
            nb_filter += growth_rate

    # attention_module
    if attention_module is not None:
        x = attach_attention_module(x, attention_module)

    if return_concat_list:
        return x, nb_filter, x_list
    else:
        return x, nb_filter 
Example 56
Project: CBAM-keras   Author: kobiso   File: densenet-checkpoint.py    MIT License 5 votes vote down vote up
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4,
                  grow_nb_filters=True, return_concat_list=False, attention_module=None):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
    Args:
        x: keras tensor
        nb_layers: the number of layers of conv_block to append to the model.
        nb_filter: number of filters
        growth_rate: growth rate
        bottleneck: bottleneck block
        dropout_rate: dropout rate
        weight_decay: weight decay factor
        grow_nb_filters: flag to decide to allow number of filters to grow
        return_concat_list: return the list of feature maps along with the actual output
    Returns: keras tensor with nb_layers of conv_block appended
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x_list = [x]

    for i in range(nb_layers):
        cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
        x_list.append(cb)

        x = concatenate([x, cb], axis=concat_axis)

        if grow_nb_filters:
            nb_filter += growth_rate

	# attention_module
    if attention_module == 'se_block':
        x = se_block(x)
    if attention_module == 'cbam_block':
        x = cbam_block(x)

    if return_concat_list:
        return x, nb_filter, x_list
    else:
        return x, nb_filter 
Example 57
Project: CBAM-keras   Author: kobiso   File: resnext-checkpoint.py    MIT License 5 votes vote down vote up
def __grouped_convolution_block(input, grouped_channels, cardinality, strides, weight_decay=5e-4):
    ''' Adds a grouped convolution block. It is an equivalent block from the paper
    Args:
        input: input tensor
        grouped_channels: grouped number of filters
        cardinality: cardinality factor describing the number of groups
        strides: performs strided convolution for downscaling if > 1
        weight_decay: weight decay term
    Returns: a keras tensor
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    group_list = []

    if cardinality == 1:
        # with cardinality 1, it is a standard convolution
        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
        x = BatchNormalization(axis=channel_axis)(x)
        x = LeakyReLU()(x)
        return x

    for c in range(cardinality):
        x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels]
                   if K.image_data_format() == 'channels_last' else
                   lambda z: z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input)

        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)

        group_list.append(x)

    group_merge = concatenate(group_list, axis=channel_axis)
    x = BatchNormalization(axis=channel_axis)(group_merge)
    x = LeakyReLU()(x)

    return x 
Example 58
Project: DLToy   Author: Spground   File: keras_yolo.py    GNU General Public License v3.0 5 votes vote down vote up
def yolo_boxes_to_corners(box_xy, box_wh):
    """Convert YOLO box predictions to bounding box corners."""
    box_mins = box_xy - (box_wh / 2.)
    box_maxes = box_xy + (box_wh / 2.)

    return K.concatenate([
        box_mins[..., 1:2],  # y_min
        box_mins[..., 0:1],  # x_min
        box_maxes[..., 1:2],  # y_max
        box_maxes[..., 0:1]  # x_max
    ]) 
Example 59
Project: lsun_2017   Author: ternaus   File: unet_optimized.py    MIT License 5 votes vote down vote up
def get_unet0(num_start_filters=32):
    inputs = Input((img_rows, img_cols, num_channels))
    conv1 = ConvBN2(inputs, num_start_filters)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = ConvBN2(pool1, 2 * num_start_filters)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = ConvBN2(pool2, 4 * num_start_filters)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = ConvBN2(pool3, 8 * num_start_filters)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = ConvBN2(pool4, 16 * num_start_filters)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4])
    conv6 = ConvBN2(up6, 8 * num_start_filters)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3])
    conv7 = ConvBN2(up7, 4 * num_start_filters)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
    conv8 = ConvBN2(up8, 2 * num_start_filters)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(up9)
    conv9 = BatchNormalization()(conv9)
    conv9 = Activation('selu')(conv9)
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(conv9)
    crop9 = Cropping2D(cropping=((16, 16), (16, 16)))(conv9)
    conv9 = BatchNormalization()(crop9)
    conv9 = Activation('selu')(conv9)

    conv10 = Conv2D(num_mask_channels, (1, 1))(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    return model 
Example 60
Project: lsun_2017   Author: ternaus   File: unet.py    MIT License 5 votes vote down vote up
def get_unet0(num_start_filters=32):
    inputs = Input((img_rows, img_cols, num_channels))
    conv1 = ConvBN2(inputs, num_start_filters)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = ConvBN2(pool1, 2 * num_start_filters)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = ConvBN2(pool2, 4 * num_start_filters)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = ConvBN2(pool3, 8 * num_start_filters)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = ConvBN2(pool4, 16 * num_start_filters)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4])
    conv6 = ConvBN2(up6, 8 * num_start_filters)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3])
    conv7 = ConvBN2(up7, 4 * num_start_filters)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
    conv8 = ConvBN2(up8, 2 * num_start_filters)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(up9)
    conv9 = BatchNormalization()(conv9)
    conv9 = Activation('selu')(conv9)
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(conv9)
    crop9 = Cropping2D(cropping=((16, 16), (16, 16)))(conv9)
    conv9 = BatchNormalization()(crop9)
    conv9 = Activation('selu')(conv9)

    conv10 = Conv2D(num_mask_channels, (1, 1))(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    return model 
Example 61
Project: MSCNN_Keras   Author: raven-dehaze-work   File: net_model.py    GNU General Public License v3.0 5 votes vote down vote up
def _build_fineNet(input_img, coarseNet):
    """
    建立fineNet
    :param input_img: 输入图片的tensor
    :param coarseNet: coarseNet的Tensor
    :return: fineNet
    """
    # paper中的fine net 卷积kernel为4. 但经查看作者提供的源代码,第一层设置的6
    conv1 = Conv2D(6, (7, 7), padding='same', activation='relu', name='fineNet/conv1')(input_img)
    pool1 = MaxPooling2D((2, 2), name='fineNet/pool1')(conv1)
    upsample1 = UpSampling2D((2, 2), name='fineNet/upsample1')(pool1)

    # 级联coarseNet
    concat = concatenate([upsample1, coarseNet], axis=3, name='concat')
    normalize1 = BatchNormalization(axis=3, name='fineNet/bn1')(concat)
    # dropout1 = Dropout(0.5, name='fineNet/dropout1')(normalize1)

    conv2 = Conv2D(5, (5, 5), padding='same', activation='relu',name='fineNet/conv2')(normalize1)
    pool2 = MaxPooling2D((2, 2), name='fineNet/pool2')(conv2)
    upsample2 = UpSampling2D((2, 2), name='fineNet/upsample2')(pool2)
    normalize2 = BatchNormalization(axis=3, name='fineNet/bn2')(upsample2)
    # dropout2 = Dropout(0.5, name='fineNet/dropout2')(normalize2)

    conv3 = Conv2D(10, (3, 3), padding='same', activation='relu',name='fineNet/conv3')(normalize2)
    pool3 = MaxPooling2D((2, 2), name='fineNet/pool3')(conv3)
    upsample3 = UpSampling2D((2, 2), name='fineNet/upsample3')(pool3)
    # dropout3 = Dropout(0.5, name='fineNet/dropout3')(upsample3)

    linear = LinearCombine(1, name='fineNet/linear_combine')(upsample3)
    return linear 
Example 62
Project: fmow-challenge   Author: pfr-opensource   File: densenet.py    Apache License 2.0 5 votes vote down vote up
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4,
                  grow_nb_filters=True, return_concat_list=False):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
    Args:
        x: keras tensor
        nb_layers: the number of layers of conv_block to append to the model.
        nb_filter: number of filters
        growth_rate: growth rate
        bottleneck: bottleneck block
        dropout_rate: dropout rate
        weight_decay: weight decay factor
        grow_nb_filters: flag to decide to allow number of filters to grow
        return_concat_list: return the list of feature maps along with the actual output
    Returns: keras tensor with nb_layers of conv_block appended
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x_list = [x]

    for i in range(nb_layers):
        cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
        x_list.append(cb)

        x = concatenate([x, cb], axis=concat_axis)

        if grow_nb_filters:
            nb_filter += growth_rate

    if return_concat_list:
        return x, nb_filter, x_list
    else:
        return x, nb_filter 
Example 63
Project: keras-SparseNet   Author: titu1994   File: sparsenet.py    MIT License 5 votes vote down vote up
def _dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1e-4,
                 grow_nb_filters=True, return_concat_list=False):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
    Args:
        x: keras tensor
        nb_layers: the number of layers of conv_block to append to the model.
        nb_filter: number of filters
        growth_rate: growth rate
        bottleneck: bottleneck block
        dropout_rate: dropout rate
        weight_decay: weight decay factor
        grow_nb_filters: flag to decide to allow number of filters to grow
        return_concat_list: return the list of feature maps along with the actual output
    Returns: keras tensor with nb_layers of conv_block appended
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x_list = [x]
    channel_list = [nb_filter]

    for i in range(nb_layers):
        #nb_channels = sum(_exponential_index_fetch(channel_list))

        x = _conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
        x_list.append(x)

        fetch_outputs = _exponential_index_fetch(x_list)
        x = concatenate(fetch_outputs, axis=concat_axis)

        channel_list.append(growth_rate)

    if grow_nb_filters:
        nb_filter = sum(_exponential_index_fetch(channel_list))

    if return_concat_list:
        return x, nb_filter, x_list
    else:
        return x, nb_filter 
Example 64
Project: pc2pix   Author: roatienza   File: ptcloud_stacked_ae.py    MIT License 5 votes vote down vote up
def compression_layer(self, x, y, maxpool=True):
        if maxpool:
            y = MaxPooling1D()(y)
        x = concatenate([x, y])

        y = Conv1D(filters=64,
                   kernel_size=1,
                   activation='relu',
                   padding='same')(x)
        return x, y 
Example 65
Project: parapred   Author: eliberis   File: model.py    MIT License 5 votes vote down vote up
def ab_ag_seq_model(max_ag_len, max_cdr_len):
    input_ag = Input(shape=(max_ag_len, NUM_FEATURES))
    ag_seq = Masking()(input_ag)

    enc_ag = Bidirectional(LSTM(128, dropout=0.1, recurrent_dropout=0.1),
                           merge_mode='concat')(ag_seq)

    input_ab = Input(shape=(max_cdr_len, NUM_FEATURES))
    label_mask = Input(shape=(max_cdr_len,))

    seq = Masking()(input_ab)

    loc_fts = MaskedConvolution1D(64, 5, padding='same', activation='elu')(seq)

    glb_fts = Bidirectional(LSTM(256, dropout=0.15, recurrent_dropout=0.2,
                                 return_sequences=True),
                            merge_mode='concat')(loc_fts)

    enc_ag_rep = RepeatVector(max_cdr_len)(enc_ag)
    ab_ag_repr = concatenate([glb_fts, enc_ag_rep])
    ab_ag_repr = MaskingByLambda(mask_by_input(label_mask))(ab_ag_repr)
    ab_ag_repr = Dropout(0.3)(ab_ag_repr)

    aa_probs = TimeDistributed(Dense(1, activation='sigmoid'))(ab_ag_repr)
    model = Model(inputs=[input_ag, input_ab, label_mask], outputs=aa_probs)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['binary_accuracy', false_pos, false_neg],
                  sample_weight_mode="temporal")
    return model 
Example 66
Project: Ruban   Author: RubanSeven   File: densenet.py    Apache License 2.0 5 votes vote down vote up
def dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=None):
    for i in range(nb_layers):
        cb = conv_block(x, growth_rate, dropout_rate)
        x = concatenate([x, cb], axis=-1)
        nb_filter += growth_rate
    return x, nb_filter 
Example 67
Project: kaggle-carvana-2017   Author: killthekitten   File: models.py    MIT License 4 votes vote down vote up
def get_vgg_7conv(input_shape):
    img_input = Input(input_shape)
    vgg16_base = VGG16(input_tensor=img_input, include_top=False)
    for l in vgg16_base.layers:
        l.trainable = True
    conv1 = vgg16_base.get_layer("block1_conv2").output
    conv2 = vgg16_base.get_layer("block2_conv2").output
    conv3 = vgg16_base.get_layer("block3_conv3").output
    pool3 = vgg16_base.get_layer("block3_pool").output

    conv4 = Conv2D(384, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block4_conv1")(pool3)
    conv4 = Conv2D(384, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block4_conv2")(conv4)
    pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(conv4)

    conv5 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block5_conv1")(pool4)
    conv5 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block5_conv2")(conv5)
    pool5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(conv5)

    conv6 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block6_conv1")(pool5)
    conv6 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block6_conv2")(conv6)
    pool6 = MaxPooling2D((2, 2), strides=(2, 2), name='block6_pool')(conv6)

    conv7 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block7_conv1")(pool6)
    conv7 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal", name="block7_conv2")(conv7)

    up8 = concatenate([Conv2DTranspose(384, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv7), conv6], axis=3)
    conv8 = Conv2D(384, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up8)

    up9 = concatenate([Conv2DTranspose(256, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv8), conv5], axis=3)
    conv9 = Conv2D(256, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up9)

    up10 = concatenate([Conv2DTranspose(192, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv9), conv4], axis=3)
    conv10 = Conv2D(192, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up10)

    up11 = concatenate([Conv2DTranspose(128, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv10), conv3], axis=3)
    conv11 = Conv2D(128, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up11)

    up12 = concatenate([Conv2DTranspose(64, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv11), conv2], axis=3)
    conv12 = Conv2D(64, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up12)

    up13 = concatenate([Conv2DTranspose(32, (3, 3), activation="relu", kernel_initializer="he_normal", strides=(2, 2), padding='same')(conv12), conv1], axis=3)
    conv13 = Conv2D(32, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up13)

    conv13 = Conv2D(1, (1, 1))(conv13)
    conv13 = Activation("sigmoid")(conv13)
    model = Model(img_input, conv13)
    return model 
Example 68
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn_all.py    MIT License 4 votes vote down vote up
def unet(input_shape):
    inputs = Input(input_shape)
    conv0 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(inputs)
    conv0 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv0)
    conv0 = Conv2D(32, kernel_size=(2, 2), strides=(1, 1), activation='relu')(conv0)

    conv1 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv0)
    conv1 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(pool1)
    conv2 = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(pool2)
    conv3 = Conv2D(128, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(256, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(pool3)
    conv4 = Conv2D(256, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D(512, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(pool4)
    conv5 = Conv2D(512, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv5)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=3)
    conv6 = Conv2D(256, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(up6)
    conv6 = Conv2D(256, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv6)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=3)
    conv7 = Conv2D(128, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(up7)
    conv7 = Conv2D(128, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv7)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=3)
    conv8 = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(up8)
    conv8 = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv8)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1], axis=3)
    conv9 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(up9)
    conv9 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(conv9)

#    conv10 = Conv2D(nb_classes, kernel_size=(1, 1), strides=(1, 1), padding='same')(conv9)

    deconv10 = Conv2DTranspose(nb_classes, kernel_size=(2, 2), strides=(1, 1), activation='relu', trainable=False)(conv9)
    conv10 = Conv2D(nb_classes, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same')(deconv10)
    conv10 = Conv2D(nb_classes, kernel_size=(3, 3), strides=(1, 1), padding='same')(conv10)

    model = Model(inputs=inputs, outputs=conv10)
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss=softmax_sparse_crossentropy_ignoring_first_label,
                  optimizer=adam,
                  metrics=[sparse_accuracy])
    return model


# Global settings 
Example 69
Project: head-detection-using-yolo   Author: pranoyr   File: backend.py    MIT License 4 votes vote down vote up
def __init__(self, input_size):

        # define some auxiliary variables and the fire module
        sq1x1  = "squeeze1x1"
        exp1x1 = "expand1x1"
        exp3x3 = "expand3x3"
        relu   = "relu_"

        def fire_module(x, fire_id, squeeze=16, expand=64):
            s_id = 'fire' + str(fire_id) + '/'

            x     = Conv2D(squeeze, (1, 1), padding='valid', name=s_id + sq1x1)(x)
            x     = Activation('relu', name=s_id + relu + sq1x1)(x)

            left  = Conv2D(expand,  (1, 1), padding='valid', name=s_id + exp1x1)(x)
            left  = Activation('relu', name=s_id + relu + exp1x1)(left)

            right = Conv2D(expand,  (3, 3), padding='same',  name=s_id + exp3x3)(x)
            right = Activation('relu', name=s_id + relu + exp3x3)(right)

            x = concatenate([left, right], axis=3, name=s_id + 'concat')

            return x

        # define the model of SqueezeNet
        input_image = Input(shape=(input_size, input_size, 3))

        x = Conv2D(64, (3, 3), strides=(2, 2), padding='valid', name='conv1')(input_image)
        x = Activation('relu', name='relu_conv1')(x)
        x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)

        x = fire_module(x, fire_id=2, squeeze=16, expand=64)
        x = fire_module(x, fire_id=3, squeeze=16, expand=64)
        x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool3')(x)

        x = fire_module(x, fire_id=4, squeeze=32, expand=128)
        x = fire_module(x, fire_id=5, squeeze=32, expand=128)
        x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(x)

        x = fire_module(x, fire_id=6, squeeze=48, expand=192)
        x = fire_module(x, fire_id=7, squeeze=48, expand=192)
        x = fire_module(x, fire_id=8, squeeze=64, expand=256)
        x = fire_module(x, fire_id=9, squeeze=64, expand=256)

        self.feature_extractor = Model(input_image, x)  
        self.feature_extractor.load_weights(SQUEEZENET_BACKEND_PATH) 
Example 70
Project: RLDonkeycar   Author: downingbots   File: Keras.py    MIT License 4 votes vote down vote up
def default_imu(num_outputs, num_imu_inputs):
    '''
    Notes: this model depends on concatenate which failed on keras < 2.0.8
    '''

    from keras.layers import Input, Dense
    from keras.models import Model
    from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
    from keras.layers import Activation, Dropout, Flatten, Cropping2D, Lambda
    from keras.layers.merge import concatenate
    
    img_in = Input(shape=(120,160,3), name='img_in')
    imu_in = Input(shape=(num_imu_inputs,), name="imu_in")
    
    x = img_in
    x = Cropping2D(cropping=((60,0), (0,0)))(x) #trim 60 pixels off top
    #x = Lambda(lambda x: x/127.5 - 1.)(x) # normalize and re-center
    x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='relu')(x)
    x = Dropout(.1)(x)
    
    y = imu_in
    y = Dense(14, activation='relu')(y)
    y = Dense(14, activation='relu')(y)
    y = Dense(14, activation='relu')(y)
    
    z = concatenate([x, y])
    z = Dense(50, activation='relu')(z)
    z = Dropout(.1)(z)
    z = Dense(50, activation='relu')(z)
    z = Dropout(.1)(z)

    outputs = [] 
    
    for i in range(num_outputs):
        outputs.append(Dense(1, activation='linear', name='out_' + str(i))(z))
        
    model = Model(inputs=[img_in, imu_in], outputs=outputs)
    
    model.compile(optimizer='adam',
                  loss='mse')
    
    return model 
Example 71
Project: brats2017   Author: QTIM-Lab   File: model.py    MIT License 4 votes vote down vote up
def n_net_3d(input_shape, output_shape, initial_convolutions_num=3, downsize_filters_factor=1, pool_size=(2, 2, 2), initial_learning_rate=0.00001, dropout=.25, filter_shape=(3,3,3), num_outputs=1, deconvolution=True, regression=True):

    # Convenience variables.
    # For now, we assume that the modalties are ordered by nesting priority.
    output_modalities = output_shape[0]

    # Original input
    inputs = Input(input_shape)

    # Change the space of the input data into something a bit more generalized using consecutive convolutions.
    initial_conv = Conv3D(int(8/downsize_filters_factor), filter_shape, activation='relu', padding='same', data_format='channels_first')(inputs)
    initial_conv = BatchNormalization()(initial_conv)
    if initial_convolutions_num > 1:
        for conv_num in xrange(initial_convolutions_num-1):

            initial_conv = Conv3D(int(8/downsize_filters_factor), filter_shape, activation='relu', padding='same', data_format='channels_first')(initial_conv)
            initial_conv = BatchNormalization()(initial_conv)

    # Cascading U-Nets
    input_list = [initial_conv] * output_modalities
    output_list = [None] * output_modalities
    for modality in xrange(output_modalities):

        for output in output_list:
            if output is not None:
                input_list[modality] = concatenate([input_list[modality], output], axis=1)

        print '\n'
        print 'MODALITY', modality, 'INPUT LIST', input_list[modality]
        print '\n'

        output_list[modality] = u_net_3d(input_shape=input_shape, input_tensor=input_list[modality], downsize_filters_factor=downsize_filters_factor*4, pool_size=(2, 2, 2), initial_learning_rate=initial_learning_rate, dropout=dropout, filter_shape=(3,3,3), num_outputs=1, deconvolution=True, regression=True)

    # Concatenate results
    print output_list
    final_output = output_list[0]
    if len(output_list) > 1:
        for output in output_list[1:]:
            final_output = concatenate([final_output, output], axis=1)

    # Get cost
    if regression:
        act = Activation('relu')(final_output)
        model = Model(inputs=inputs, outputs=act)
        model.compile(optimizer=Adam(lr=initial_learning_rate), loss=msq_loss, metrics=[msq])
    else:
        act = Activation('sigmoid')(final_output)
        model = Model(inputs=inputs, outputs=act)
        model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coef_loss, metrics=[dice_coef])

    return model 
Example 72
Project: sicon   Author: aasensio   File: model_concat.py    MIT License 4 votes vote down vote up
def keepsize(nx, ny, nd, nq, noise, activation='relu', n_filters=32, l2_reg=1e-7):
    """ keepsize - Concatenate
    """

    def minires(inputs, n_filters):
        x = ReflectionPadding2D()(inputs)
        x = Conv2D(int(n_filters), (3, 3), padding='valid', 
            kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
        x = ELU(alpha=1.0)(x)
        x = ReflectionPadding2D()(x)
        x = Conv2D(n_filters, (3, 3), padding='valid', 
            kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
        return x

    def myblock(inputs, n_filters):
        x = ReflectionPadding2D()(inputs)
        x = Conv2D(n_filters, (3, 3), padding='valid', 
            kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
        xo = ELU(alpha=1.0)(x)
        x = ReflectionPadding2D()(xo)
        x = Conv2D(n_filters, (3, 3), padding='valid', 
            kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg))(x)
        x = ELU(alpha=1.0)(x)
        x = add([x, xo])
        return x

    inputs = Input(shape=(nx, ny, nd)) # depth goes last in TensorFlow
    x = GaussianNoise(noise)(inputs)

# mode: concatenate
    x1 = myblock(x, n_filters)
    x1 = minires(x1, int(nq/7))

    x2 = myblock(x, n_filters)
    x2 = minires(x2, int(nq/7))

    x3 = myblock(x, n_filters)
    x3 = minires(x3, int(nq/7))

    x4 = myblock(x, n_filters)
    x4 = minires(x4, int(nq/7))

    x5 = myblock(x, n_filters)
    x5 = minires(x5, int(nq/7))

    x6 = myblock(x, n_filters)
    x6 = minires(x6, int(nq/7))

    x7 = myblock(x, n_filters)
    x7 = minires(x7, int(nq/7))

    final = concatenate([x1, x2, x3, x4, x5, x6, x7])

    return Model(inputs=inputs, outputs=final) 
Example 73
Project: wmh_ibbmTum   Author: hongweilibran   File: train_leave_one_out.py    GNU General Public License v3.0 4 votes vote down vote up
def get_unet(img_shape = None, first5=True):
        inputs = Input(shape = img_shape)
        concat_axis = -1

        if first5: filters = 5
        else: filters = 3
        conv1 = conv_bn_relu(64, filters, inputs)
        conv1 = conv_bn_relu(64, filters, conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = conv_bn_relu(96, 3, pool1)
        conv2 = conv_bn_relu(96, 3, conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = conv_bn_relu(128, 3, pool2)
        conv3 = conv_bn_relu(128, 3, conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = conv_bn_relu(256, 3, pool3)
        conv4 = conv_bn_relu(256, 4, conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = conv_bn_relu(512, 3, pool4)
        conv5 = conv_bn_relu(512, 3, conv5)

        up_conv5 = UpSampling2D(size=(2, 2))(conv5)
        ch, cw = get_crop_shape(conv4, up_conv5)
        crop_conv4 = Cropping2D(cropping=(ch,cw))(conv4)
        up6 = concatenate([up_conv5, crop_conv4], axis=concat_axis)
        conv6 = conv_bn_relu(256, 3, up6)
        conv6 = conv_bn_relu(256, 3, conv6)

        up_conv6 = UpSampling2D(size=(2, 2))(conv6)
        ch, cw = get_crop_shape(conv3, up_conv6)
        crop_conv3 = Cropping2D(cropping=(ch,cw))(conv3)
        up7 = concatenate([up_conv6, crop_conv3], axis=concat_axis)
        conv7 = conv_bn_relu(128, 3, up7)
        conv7 = conv_bn_relu(128, 3, conv7)

        up_conv7 = UpSampling2D(size=(2, 2))(conv7)
        ch, cw = get_crop_shape(conv2, up_conv7)
        crop_conv2 = Cropping2D(cropping=(ch,cw))(conv2)
        up8 = concatenate([up_conv7, crop_conv2], axis=concat_axis)
        conv8 = conv_bn_relu(96, 3, up8)
        conv8 = conv_bn_relu(96, 3, conv8)

        up_conv8 = UpSampling2D(size=(2, 2))(conv8)
        ch, cw = get_crop_shape(conv1, up_conv8)
        crop_conv1 = Cropping2D(cropping=(ch,cw))(conv1)
        up9 = concatenate([up_conv8, crop_conv1], axis=concat_axis)
        conv9 = conv_bn_relu(64, 3, up9)
        conv9 = conv_bn_relu(64, 3, conv9)

        ch, cw = get_crop_shape(inputs, conv9)
        conv9 = ZeroPadding2D(padding=(ch, cw))(conv9)
        conv10 = Conv2D(1, 1, activation='sigmoid', padding='same')(conv9) #, kernel_initializer='he_normal'
        model = Model(inputs=inputs, outputs=conv10)
        model.compile(optimizer=Adam(lr=(2e-4)), loss=dice_coef_loss)

        return model 
Example 74
Project: wmh_ibbmTum   Author: hongweilibran   File: test_leave_one_out.py    GNU General Public License v3.0 4 votes vote down vote up
def get_unet(img_shape = None, first5=True):
        inputs = Input(shape = img_shape)
        concat_axis = -1

        if first5: filters = 5
        else: filters = 3
        conv1 = conv_bn_relu(64, filters, inputs)
        conv1 = conv_bn_relu(64, filters, conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = conv_bn_relu(96, 3, pool1)
        conv2 = conv_bn_relu(96, 3, conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = conv_bn_relu(128, 3, pool2)
        conv3 = conv_bn_relu(128, 3, conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = conv_bn_relu(256, 3, pool3)
        conv4 = conv_bn_relu(256, 4, conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = conv_bn_relu(512, 3, pool4)
        conv5 = conv_bn_relu(512, 3, conv5)

        up_conv5 = UpSampling2D(size=(2, 2))(conv5)
        ch, cw = get_crop_shape(conv4, up_conv5)
        crop_conv4 = Cropping2D(cropping=(ch,cw))(conv4)
        up6 = concatenate([up_conv5, crop_conv4], axis=concat_axis)
        conv6 = conv_bn_relu(256, 3, up6)
        conv6 = conv_bn_relu(256, 3, conv6)

        up_conv6 = UpSampling2D(size=(2, 2))(conv6)
        ch, cw = get_crop_shape(conv3, up_conv6)
        crop_conv3 = Cropping2D(cropping=(ch,cw))(conv3)
        up7 = concatenate([up_conv6, crop_conv3], axis=concat_axis)
        conv7 = conv_bn_relu(128, 3, up7)
        conv7 = conv_bn_relu(128, 3, conv7)

        up_conv7 = UpSampling2D(size=(2, 2))(conv7)
        ch, cw = get_crop_shape(conv2, up_conv7)
        crop_conv2 = Cropping2D(cropping=(ch,cw))(conv2)
        up8 = concatenate([up_conv7, crop_conv2], axis=concat_axis)
        conv8 = conv_bn_relu(96, 3, up8)
        conv8 = conv_bn_relu(96, 3, conv8)

        up_conv8 = UpSampling2D(size=(2, 2))(conv8)
        ch, cw = get_crop_shape(conv1, up_conv8)
        crop_conv1 = Cropping2D(cropping=(ch,cw))(conv1)
        up9 = concatenate([up_conv8, crop_conv1], axis=concat_axis)
        conv9 = conv_bn_relu(64, 3, up9)
        conv9 = conv_bn_relu(64, 3, conv9)

        ch, cw = get_crop_shape(inputs, conv9)
        conv9 = ZeroPadding2D(padding=(ch, cw))(conv9)
        conv10 = Conv2D(1, 1, activation='sigmoid', padding='same')(conv9) #, kernel_initializer='he_normal'
        model = Model(inputs=inputs, outputs=conv10)
        model.compile(optimizer=Adam(lr=(2e-4)), loss=dice_coef_loss)

        return model 
Example 75
Project: wmh_ibbmTum   Author: hongweilibran   File: test_leave_one_out.py    GNU General Public License v3.0 4 votes vote down vote up
def GE3T_preprocessing(FLAIR_image, T1_image):

    channel_num = 2
    start_cut = 46
    num_selected_slice = np.shape(FLAIR_image)[0]
    image_rows_Dataset = np.shape(FLAIR_image)[1]
    image_cols_Dataset = np.shape(FLAIR_image)[2]
    FLAIR_image = np.float32(FLAIR_image)
    T1_image = np.float32(T1_image)

    brain_mask_FLAIR = np.ndarray((np.shape(FLAIR_image)[0],image_rows_Dataset, image_cols_Dataset), dtype=np.float32)
    brain_mask_T1 = np.ndarray((np.shape(FLAIR_image)[0],image_rows_Dataset, image_cols_Dataset), dtype=np.float32)
    imgs_two_channels = np.ndarray((num_selected_slice, rows_standard, cols_standard, channel_num), dtype=np.float32)
    imgs_mask_two_channels = np.ndarray((num_selected_slice, rows_standard, cols_standard,1), dtype=np.float32)
    FLAIR_image_suitable = np.ndarray((num_selected_slice, rows_standard, cols_standard), dtype=np.float32)
    T1_image_suitable = np.ndarray((num_selected_slice, rows_standard, cols_standard), dtype=np.float32)

    # FLAIR --------------------------------------------
    brain_mask_FLAIR[FLAIR_image >=thresh_FLAIR] = 1
    brain_mask_FLAIR[FLAIR_image < thresh_FLAIR] = 0
    for iii in range(np.shape(FLAIR_image)[0]):
  
        brain_mask_FLAIR[iii,:,:] = scipy.ndimage.morphology.binary_fill_holes(brain_mask_FLAIR[iii,:,:])  #fill the holes inside brain
        #------Gaussion Normalization
    FLAIR_image -=np.mean(FLAIR_image[brain_mask_FLAIR == 1])      #Gaussion Normalization
    FLAIR_image /=np.std(FLAIR_image[brain_mask_FLAIR == 1])

    FLAIR_image_suitable[...] = np.min(FLAIR_image)
    FLAIR_image_suitable[:, :, (cols_standard/2-image_cols_Dataset/2):(cols_standard/2+image_cols_Dataset/2)] = FLAIR_image[:, start_cut:start_cut+rows_standard, :]
   
    # T1 -----------------------------------------------
    brain_mask_T1[T1_image >=thresh_T1] = 1
    brain_mask_T1[T1_image < thresh_T1] = 0
    for iii in range(np.shape(T1_image)[0]):
 
        brain_mask_T1[iii,:,:] = scipy.ndimage.morphology.binary_fill_holes(brain_mask_T1[iii,:,:])  #fill the holes inside brain
        #------Gaussion Normalization
    T1_image -=np.mean(T1_image[brain_mask_T1 == 1])      #Gaussion Normalization
    T1_image /=np.std(T1_image[brain_mask_T1 == 1])

    T1_image_suitable[...] = np.min(T1_image)
    T1_image_suitable[:, :, (cols_standard-image_cols_Dataset)/2:(cols_standard+image_cols_Dataset)/2] = T1_image[:, start_cut:start_cut+rows_standard, :]
    #---------------------------------------------------
    FLAIR_image_suitable  = FLAIR_image_suitable[..., np.newaxis]
    T1_image_suitable  = T1_image_suitable[..., np.newaxis]
    
    imgs_two_channels = np.concatenate((FLAIR_image_suitable, T1_image_suitable), axis = 3)
    #print(np.shape(imgs_two_channels))
    return imgs_two_channels 
Example 76
Project: 3D-GAN-for-MRI   Author: joellliu   File: generator.py    MIT License 4 votes vote down vote up
def UNetGenerator(input_dim, output_channels, depth=4, nb_base_filters=32, batch_normalization=True,
                  deconvolution=True, pool_size=(2, 2, 2), activation_name='relu'):

    # depth: the depth of the U-shape structure
    # nb_base_filters: The number of filters that the first layer in the convolution network will have. Following
    # layers will contain a multiple of this number.

    # -------------------
    # build network
    # -------------------

    inputs = Input(batch_shape=input_dim)
    current_layer = inputs
    levels = list()

    # contracting path
    # for each level: Convlayer1 -> Convlayer2(double channels) -> maxpool(halve resolution)
    # for the deepest level: Convlayer1 -> Convlayer2
    for layer_depth in range(depth):
        layer1 = create_convolution_block(input_layer=current_layer, n_filters=nb_base_filters*(2**layer_depth),
                                          batch_normalization=batch_normalization)
        layer2 = create_convolution_block(input_layer=layer1, n_filters=nb_base_filters*(2**layer_depth)*2,
                                          batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    # expanding path
    # add levels with up-convolution or up-sampling
    # Upconv -> Convlayer1 -> Convlayer2
    for layer_depth in range(depth-2, -1, -1):
        up_convolution = get_up_convolution(pool_size=pool_size, deconvolution=deconvolution,
                                            n_filters=current_layer._keras_shape[4])(current_layer)
        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=4)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[4],
                                                 input_layer=concat, batch_normalization=batch_normalization)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[4],
                                                 input_layer=current_layer,
                                                 batch_normalization=batch_normalization)

    final_convolution = Conv3D(output_channels, (1, 1, 1))(current_layer)
    act = Activation(activation_name)(final_convolution)
    unet_generator = Model(inputs=inputs, outputs=act)
    return unet_generator 
Example 77
Project: nlp_research   Author: zhufz   File: esim.py    MIT License 4 votes vote down vote up
def __call__(self, x_query, x_sample, reuse = tf.AUTO_REUSE, **kwargs):
        #embedding_sequence_q1 = BatchNormalization(axis=2)(x_query)
        #embedding_sequence_q2 = BatchNormalization(axis=2)(x_sample)
        #final_embedding_sequence_q1 = SpatialDropout1D(0.25)(embedding_sequence_q1)
        #final_embedding_sequence_q2 = SpatialDropout1D(0.25)(embedding_sequence_q2)

        #################### 输入编码input encoding #######################
        #分别对query和sample进行双向编码
        rnn_layer_q1 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(x_query)
        rnn_layer_q2 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(x_sample)
        #rnn_layer_q1 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(final_embedding_sequence_q1)
        #rnn_layer_q2 = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(final_embedding_sequence_q2)

        ############## 局部推理local inference modeling ###################
        #计算dot attention
        attention = Dot(axes=-1)([rnn_layer_q1, rnn_layer_q2])
        #分别计算query和sample进行attention后的结果
        w_attn_1 = Lambda(lambda x: softmax(x, axis=1))(attention)
        w_attn_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2))(attention))
        align_layer_1 = Dot(axes=1)([w_attn_1, rnn_layer_q1])
        align_layer_2 = Dot(axes=1)([w_attn_2, rnn_layer_q2])

        ############# 推理组合Inference Composition #######################

        subtract_layer_1 = subtract([rnn_layer_q1, align_layer_1])
        subtract_layer_2 = subtract([rnn_layer_q2, align_layer_2])

        multiply_layer_1 = multiply([rnn_layer_q1, align_layer_1])
        multiply_layer_2 = multiply([rnn_layer_q2, align_layer_2])

        m_q1 = concatenate([rnn_layer_q1, align_layer_1, subtract_layer_1, multiply_layer_1])
        m_q2 = concatenate([rnn_layer_q2, align_layer_2, subtract_layer_2, multiply_layer_2])

        ############### 编码+池化 #######################
        v_q1_i = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(m_q1)
        v_q2_i = Bidirectional(LSTM(self.recurrent_units, return_sequences=True))(m_q2)

        avgpool_q1 = GlobalAveragePooling1D()(v_q1_i)
        avgpool_q2 = GlobalAveragePooling1D()(v_q2_i)
        maxpool_q1 = GlobalMaxPooling1D()(v_q1_i)
        maxpool_q2 = GlobalMaxPooling1D()(v_q2_i)

        merged_q1 = concatenate([avgpool_q1, maxpool_q1])
        merged_q2 = concatenate([avgpool_q2, maxpool_q2])

        final_v = BatchNormalization()(concatenate([merged_q1, merged_q2]))
        #output = Dense(units=self.dense_units, activation='relu')(final_v)
        output = Dense(units=self.num_output, activation=None)(final_v)
        #output = BatchNormalization()(output)
        #output = Dropout(self.dropout_rate)(output)
        #output = tf.nn.dropout(output, self.keep_prob)
        #高级api tf.layer.dropout 与 keras的Dropout都使用dropout
        #tf.nn.dropout使用keep_prob
        #output = Dense(units=self.num_output, activation='sigmoid')(output)
        #output = Dense(units=self.num_output, activation=None)(output)
        #output = tf.squeeze(output, -1)
        return output 
Example 78
Project: Semantic-Relations-Classifier   Author: y95847frank   File: CNN.py    MIT License 4 votes vote down vote up
def build_model(self):
        # maxset_len = 100, max_pos = 52 (30+22), pos_dim = 50
        
        #input1 is for position1 embedding layer
        input1 = Input(shape = (self.options['maxsen_len'], ))
        x = Embedding(self.options['max_pos'], self.options['pos_dim'], input_length = self.options['maxsen_len'])(input1)
        
        #input2 is for position2 embedding layer
        input2 = Input(shape = (self.options['maxsen_len'], ))
        y = Embedding(self.options['max_pos'], self.options['pos_dim'], input_length = self.options['maxsen_len'])(input2)

        #input3 is for word embedding layer
        input3 = Input(shape = (self.options['maxsen_len'], ))
        z = Embedding(self.options['vocab_size'], self.options['emb_dim'], input_length = self.options['maxsen_len'], weights = [self.options['embedding']], trainable=False)(input3)
        

        #input4 = Input(shape = (1, ))
        #a = Embedding(self.options['POS_size'], self.options['emb_dim'], input_length = 1)(input4)

        #input5 = Input(shape = (1, ))
        #b = Embedding(self.options['POS_size'], self.options['emb_dim'], input_length = 1)(input5)
        
        input6 = Input(shape = (8, ))
        c = Embedding(self.options['vocab_size'], self.options['emb_dim'], input_length = 8, weights = [self.options['embedding']], trainable=False)(input6)

        merge = concatenate([x, y, z])

        submodel = Conv1D(self.options['num_filter'], 2, padding='valid', input_shape=(self.options['maxsen_len'], self.options['emb_dim'] + self.options['pos_dim'] * 2 ))(merge)
        
        submodel = GlobalMaxPooling1D()(submodel)

        submodel = Reshape((1, 500))(submodel)

        submodel = Dense(300, activation='tanh')(submodel)
        
        final_merge = concatenate([c, submodel], axis = 1)

        final_merge = Flatten()(final_merge)
        
        #final_merge = Dropout(0.25)(final_merge)
        
        #output = Dense(128, activation='relu')(final_merge)
        output = Dense(self.options['n_class'], activation='softmax')(final_merge)


        return Model(inputs=[input1, input2, input3, input6], outputs = output) 
Example 79
Project: Urban3d   Author: topcoderinc   File: a02_zf_unet_model.py    MIT License 4 votes vote down vote up
def ZF_Seg_Inception_ResNet_v2_288x288():
    from keras.models import Model
    from keras.layers.merge import concatenate
    from keras.layers.convolutional import UpSampling2D
    from keras.layers import Conv2D
    from keras import backend as K

    if K.image_dim_ordering() == 'th':
        inputs = (3, 288, 288)
        axis = 1
    else:
        inputs = (288, 288, 3)
        axis = -1

    base_model = InceptionResNetV2(include_top=False, input_shape=inputs, weights='imagenet')

    if 0:
        conv1 = base_model.get_layer('activation_3').output
        conv2 = base_model.get_layer('activation_5').output
        conv3 = base_model.get_layer('block35_10_ac').output
        conv4 = base_model.get_layer('block17_20_ac').output
        conv5 = base_model.get_layer('conv_7b_ac').output
        for i in range(len(base_model.layers)):
            print(i, base_model.layers[i].name)
        exit()
    else:
        conv1 = base_model.layers[9].output
        conv2 = base_model.layers[16].output
        conv3 = base_model.layers[260].output
        conv4 = base_model.layers[594].output
        conv5 = base_model.layers[779].output

    up6 = concatenate([UpSampling2D()(conv5), conv4], axis=axis)
    conv6 = multi_conv_layer(up6, 2, 256, 0.0, True)

    up7 = concatenate([UpSampling2D()(conv6), conv3], axis=axis)
    conv7 = multi_conv_layer(up7, 2, 256, 0.0, True)

    up8 = concatenate([UpSampling2D()(conv7), conv2], axis=axis)
    conv8 = multi_conv_layer(up8, 2, 128, 0.0, True)

    up9 = concatenate([UpSampling2D()(conv8), conv1], axis=axis)
    conv9 = multi_conv_layer(up9, 2, 64, 0.0, True)

    up10 = concatenate([UpSampling2D()(conv9), base_model.input], axis=axis)
    conv10 = multi_conv_layer(up10, 2, 48, 0.2, True)

    x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10)
    model = Model(base_model.input, x)

    return model 
Example 80
Project: pc2pix   Author: roatienza   File: ptcloud_stacked_ae.py    MIT License 4 votes vote down vote up
def build_encoder(self, filters=64, activation='linear'):

        self.inputs = Input(shape=self.input_shape, name='encoder_input')
        x = self.inputs
        y = self.inputs
        strides = 2
        maxpool = True
        x1 = self.encoder_layer(x, filters, strides=1, dilation_rate=1)
        x2 = self.encoder_layer(x, filters, strides=1, dilation_rate=2)
        x4 = self.encoder_layer(x, filters, strides=1, dilation_rate=4)
        x8 = self.encoder_layer(x, filters, strides=1, dilation_rate=8)
        x = concatenate([x1, x2, x4, x8])
        x, y = self.compression_layer(x, y, maxpool=False)

        x = self.encoder_layer(x, 128, strides=2, dilation_rate=1)

        x1 = self.encoder_layer(x, filters, strides=1, dilation_rate=1)
        x2 = self.encoder_layer(x, filters, strides=1, dilation_rate=2)
        x4 = self.encoder_layer(x, filters, strides=1, dilation_rate=4)
        x8 = self.encoder_layer(x, filters, strides=1, dilation_rate=8)
        x = concatenate([x1, x2, x4, x8])
        x, y = self.compression_layer(x, y, maxpool=True)

        x = self.encoder_layer(x, 128, strides=2, dilation_rate=1)

        x1 = self.encoder_layer(x, filters, strides=1, dilation_rate=1)
        x2 = self.encoder_layer(x, filters, strides=1, dilation_rate=2)
        x4 = self.encoder_layer(x, filters, strides=1, dilation_rate=4)
        x8 = self.encoder_layer(x, filters, strides=1, dilation_rate=8)
        x = concatenate([x1, x2, x4, x8])
        x, y = self.compression_layer(x, y, maxpool=True)

        x = self.encoder_layer(x, 128, strides=2, dilation_rate=1)

        x1 = self.encoder_layer(x, filters, strides=1, dilation_rate=1)
        x2 = self.encoder_layer(x, filters, strides=1, dilation_rate=2)
        x4 = self.encoder_layer(x, filters, strides=1, dilation_rate=4)
        x8 = self.encoder_layer(x, filters, strides=1, dilation_rate=8)
        x = concatenate([x1, x2, x4, x8])
        x, y = self.compression_layer(x, y, maxpool=True)

        x = self.encoder_layer(x, 32)
        shape = K.int_shape(x)

        x = Flatten()(x)
        # x = Dense(128, activation='relu')(x)
        # experimental tanh activation, revert to none or linear if needed
        outputs = Dense(self.latent_dim, activation=activation, name='ae_encoder_out')(x)
        path = os.path.join(self.model_dir, "ae_encoder.png")
        self.encoder = Model(self.inputs, outputs, name='ae_encoder')

        self.encoder.summary()
        plot_model(self.encoder, to_file=path, show_shapes=True)

        return shape, filters