Python keras.layers.merge.Add() Examples

The following are code examples for showing how to use keras.layers.merge.Add(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: srinets.py    Apache License 2.0 6 votes vote down vote up
def spatial_residual_inception(inputs, base_filters=256):
    x_short = Conv2D(base_filters + 64, (1, 1), use_bias=False, activation=None)(inputs)
    x_short = Activation("relu")(x_short)

    x_conv1x1 = Conv2D(base_filters + 64, (1, 1), use_bias=False, activation=None)(x_short)

    x_conv3x3 = Conv2D(base_filters, (1, 1), use_bias=False, activation=None)(x_short)
    x_conv3x3 = Conv2D(base_filters + 32, (1, 3), padding="same", use_bias=False, activation=None)(x_conv3x3)
    x_conv3x3 = Conv2D(base_filters + 64, (3, 1), padding="same", use_bias=False, activation=None)(x_conv3x3)

    x_conv7x7 = Conv2D(base_filters, (1, 1), use_bias=False, activation=None)(x_short)
    x_conv7x7 = Conv2D(base_filters + 32, (1, 7), padding="same", use_bias=False, activation=None)(x_conv7x7)
    x_conv7x7 = Conv2D(base_filters + 64, (7, 1), padding="same", use_bias=False, activation=None)(x_conv7x7)

    x_conv = Concatenate()([x_conv1x1, x_conv3x3, x_conv7x7])
    x_conv = Conv2D(base_filters+64, (1, 1), use_bias=False, activation=None)(x_conv)

    x = Add()([x_short, x_conv])
    return Activation("relu")(x) 
Example 2
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: unets.py    Apache License 2.0 6 votes vote down vote up
def convolutional_residual_block(inputs, n_filters, weight_decay=1e-4, kernel_initializer="he_normal", bn_epsilon=1e-3, bn_momentum=0.99):
    x = conv_bn_act_block(inputs, n_filters, weight_decay, kernel_initializer, bn_epsilon, bn_momentum)
    x = conv_bn_act_block(x, n_filters, weight_decay, kernel_initializer, bn_epsilon, bn_momentum)
    x = Conv2D(n_filters, kernel_size=(3, 3), padding="same", activation=None, use_bias=False,
               kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)
    x = Add()([inputs, x])
    _x = x
    x = conv_bn_act_block(_x, n_filters, weight_decay, kernel_initializer, bn_epsilon, bn_momentum)
    x = conv_bn_act_block(x, n_filters, weight_decay, kernel_initializer, bn_epsilon, bn_momentum)
    x = Conv2D(n_filters, kernel_size=(3, 3), padding="same", activation=None, use_bias=False,
               kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)
    x = Add()([_x, x])
    x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(x)
    x = Activation("relu")

    return x 
Example 3
Project: alphazero   Author: witchu   File: keras_model.py    Apache License 2.0 6 votes vote down vote up
def _build_residual_block(args, x):
    cnn_filter_num = args['cnn_filter_num']
    cnn_filter_size = args['cnn_filter_size']
    l2_reg = args['l2_reg']
    
    in_x = x
    x = Conv2D(filters=cnn_filter_num, kernel_size=cnn_filter_size, padding="same",
                data_format="channels_first", kernel_regularizer=l2(l2_reg))(x)
    x = BatchNormalization(axis=1)(x)
    x = Activation("relu")(x)
    x = Conv2D(filters=cnn_filter_num, kernel_size=cnn_filter_size, padding="same",
                data_format="channels_first", kernel_regularizer=l2(l2_reg))(x)
    x = BatchNormalization(axis=1)(x)
    x = Add()([in_x, x])
    x = Activation("relu")(x)
    return x 
Example 4
Project: neural-tweet-search   Author: jinfengr   File: attention_model.py    Apache License 2.0 6 votes vote down vote up
def add_conv_layer(input_list, layer_name, nb_filters, kernel_size, padding, dropout_rate=0.1,
                   activation='relu', strides=1, attention_level=0, conv_option="normal", prev_conv_tensors=None):
    conv_layer = Convolution1D(filters=nb_filters, kernel_size=kernel_size, padding=padding,
                               activation=activation, strides=strides, name=layer_name)
    max_pooling_layer = GlobalMaxPooling1D()
    dropout_layer = Dropout(dropout_rate)
    output_list, conv_output_list = [], []
    for i in range(len(input_list)):
        input = input_list[i]
        conv_tensor = conv_layer(input)
        if conv_option == "ResNet":
            conv_tensor = Add()([conv_tensor, prev_conv_tensors[i][-1]])
        dropout_tensor = dropout_layer(conv_tensor)
        #conv_pooling_tensor = max_pooling_layer(conv_tensor)
        output_list.append(dropout_tensor)
        #conv_output_list.append(conv_pooling_tensor)
        conv_output_list.append(conv_tensor)
    return output_list, conv_output_list 
Example 5
Project: cyclegan_keras   Author: alecGraves   File: models.py    The Unlicense 6 votes vote down vote up
def conv_block(x0, scale):
    x = Conv2D(int(64*scale), (1, 1))(x0)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(int(64*scale), (3, 3), padding='same')(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(int(256*scale), (1, 1))(x)
    x = InstanceNormalization()(x)

    x1 = Conv2D(int(256*scale), (1, 1))(x0)
    x1 = InstanceNormalization()(x1)

    x = Add()([x, x1])
    x = LeakyReLU()(x)
    return x 
Example 6
Project: PSPNet-Keras-tensorflow   Author: Vladkryvoruchko   File: layers_builder.py    MIT License 5 votes vote down vote up
def residual_short(prev_layer, level, pad=1, lvl=1, sub_lvl=1, modify_stride=False):
    prev_layer = Activation('relu')(prev_layer)
    block_1 = residual_conv(prev_layer, level,
                            pad=pad, lvl=lvl, sub_lvl=sub_lvl,
                            modify_stride=modify_stride)

    block_2 = short_convolution_branch(prev_layer, level,
                                       lvl=lvl, sub_lvl=sub_lvl,
                                       modify_stride=modify_stride)
    added = Add()([block_1, block_2])
    return added 
Example 7
Project: PSPNet-Keras-tensorflow   Author: Vladkryvoruchko   File: layers_builder.py    MIT License 5 votes vote down vote up
def residual_empty(prev_layer, level, pad=1, lvl=1, sub_lvl=1):
    prev_layer = Activation('relu')(prev_layer)

    block_1 = residual_conv(prev_layer, level, pad=pad,
                            lvl=lvl, sub_lvl=sub_lvl)
    block_2 = empty_branch(prev_layer)
    added = Add()([block_1, block_2])
    return added 
Example 8
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: srinets.py    Apache License 2.0 5 votes vote down vote up
def spatial_residual_inception_v2(inputs, base_filters=192, weight_decay=1e-4, kernel_initializer="he_normal"):
    x_short = Activation("relu")(inputs)
    x_short = Conv2D(base_filters + 64, (1, 1), use_bias=False, activation=None,
                     kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)

    # 1x1
    x_conv1x1 = Conv2D(base_filters + 64, (1, 1), use_bias=False, activation=None,
                       kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)

    x_conv3x3_1 = Conv2D(base_filters + 32, (1, 3), padding="same", use_bias=False, activation=None, dilation_rate=1,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)
    x_conv3x3_1 = Conv2D(base_filters + 64, (3, 1), padding="same", use_bias=False, activation=None, dilation_rate=1,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_conv3x3_1)

    x_conv3x3_5 = Conv2D(base_filters + 32, (1, 3), padding="same", use_bias=False, activation=None, dilation_rate=2,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)
    x_conv3x3_5 = Conv2D(base_filters + 64, (3, 1), padding="same", use_bias=False, activation=None, dilation_rate=2,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_conv3x3_5)

    x_conv5x5_1 = Conv2D(base_filters + 32, (1, 3), padding="same", use_bias=False, activation=None, dilation_rate=5,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)
    x_conv5x5_1 = Conv2D(base_filters + 64, (3, 1), padding="same", use_bias=False, activation=None, dilation_rate=5,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_conv5x5_1)

    x_conv5x5_5 = Conv2D(base_filters + 32, (1, 3), padding="same", use_bias=False, activation=None, dilation_rate=7,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)
    x_conv5x5_5 = Conv2D(base_filters + 64, (3, 1), padding="same", use_bias=False, activation=None, dilation_rate=7,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_conv5x5_5)

    x_conv = Concatenate()([x_conv1x1, x_conv3x3_1, x_conv3x3_5, x_conv5x5_1, x_conv5x5_5])

    x_short = Conv2D(base_filters + 64, (1, 1), use_bias=False, activation=None,
                     kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)
    x_conv = Conv2D(base_filters + 64, (1, 1), use_bias=False, activation=None,
                    kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_conv)

    x = Add()([x_short, x_conv])
    return Activation("relu")(x) 
Example 9
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: refinenets.py    Apache License 2.0 5 votes vote down vote up
def residual_conv_unit(inputs,
                       n_filters=256,
                       kernel_size=3,
                       weight_decay=1e-4,
                       kernel_initializer="he_normal",
                       bn_epsilon=1e-3,
                       bn_momentum=0.99):
    """ residual convolutional unit.
    :param inputs: 4-D tensor, shape of (batch_size, height, width, channel).
    :param n_filters: int, number of filters, default 256.
    :param kernel_size: int, default 3.
    :param weight_decay: float, default 1e-4.
    :param kernel_initializer: string, default "he_normal".
    :param bn_epsilon: float, default 1e-3.
    :param bn_momentum: float, default 0.99.

    :return: 4-D tensor, shape of (batch_size, height, width, channel).
    """
    x = Activation("relu")(inputs)
    x = Conv2D(n_filters, (kernel_size, kernel_size), padding="same", activation=None, use_bias=False,
               kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)
    x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(x)
    x = Activation("relu")(x)
    x = Conv2D(n_filters, (kernel_size, kernel_size), padding="same", activation=None, use_bias=False,
               kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)
    x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(x)
    x = Add()([x, inputs])

    return x 
Example 10
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: refinenets.py    Apache License 2.0 5 votes vote down vote up
def multi_resolution_fusion(high_inputs=None,
                            low_inputs=None,
                            n_filters=256,
                            weight_decay=1e-4,
                            kernel_initializer="he_normal",
                            bn_epsilon=1e-3,
                            bn_momentum=0.99):
    """ fuse multi resolution features.
    :param high_inputs: 4-D tensor,  shape of (batch_size, height, width, channel),
        features with high spatial resolutions.
    :param low_inputs: 4-D tensor,  shape of (batch_size, height, width, channel),
        features with low spatial resolutions.
    :param n_filters: int, number of filters, default 256.
    :param weight_decay: float, default 1e-4.
    :param kernel_initializer: string, default "he_normal".
    :param bn_epsilon: float, default 1e-3.
    :param bn_momentum: float, default 0.99.

    :return: 4-D tensor, shape of (batch_size, height, width, channel).
    """
    if high_inputs is None:
        fuse = Conv2D(n_filters, (3, 3), padding="same", activation=None, use_bias=False,
                      kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(low_inputs)
        fuse = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(fuse)
    else:
        conv_low = Conv2D(n_filters, (3, 3), padding="same", activation=None, use_bias=False,
                          kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(low_inputs)
        conv_low = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(conv_low)
        conv_high = Conv2D(n_filters, (3, 3), padding="same", activation=None, use_bias=False,
                           kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(high_inputs)
        conv_high = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(conv_high)
        conv_low = BilinearUpSampling(target_size=(int(conv_high.shape[1]), int(conv_high.shape[2])))(conv_low)
        fuse = Add()([conv_high, conv_low])

    return fuse 
Example 11
Project: deep_learning   Author: jarvisqi   File: layer_utils.py    MIT License 5 votes vote down vote up
def res_block(input_tensor, filters, kernel_size=(3, 3), strides=(1, 1), use_dropout=False):
    """实例化Keras Resnet块。
    
    Arguments:
        input_tensor {[type]} -- 输入张量
        filters {[type]} -- filters
    
    Keyword Arguments:
        kernel_size {tuple} -- [description] (default: {(3,3)})
        strides {tuple} -- [description] (default: {(1,1)})
        use_dropout {bool} -- [description] (default: {False})
    """
    x = ReflectionPadding2D((1, 1))(input_tensor)
    x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides)(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    if use_dropout:
        x=Dropout(0.5)(x)
    
    x = ReflectionPadding2D((1, 1))(x)
    x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides)(x)
    x = BatchNormalization()(x)

    merged = Add()([input_tensor, x])
    return merged 
Example 12
Project: cyclegan_keras   Author: alecGraves   File: models.py    The Unlicense 5 votes vote down vote up
def identity_block(x0, scale):
    x = Conv2D(int(64*scale), (1, 1))(x0)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(int(64*scale), (3, 3), padding='same')(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(int(256*scale), (1, 1))(x)
    x = InstanceNormalization()(x)

    x = Add()([x, x0])
    x = LeakyReLU()(x)
    return x 
Example 13
Project: Capstone_fog_vision   Author: sambalshikhar   File: res_blocks.py    MIT License 5 votes vote down vote up
def res_block(input, filters, kernel_size=(3,3), strides=(1,1), use_dropout=False):
    """
    Instanciate a Keras Resnet Block using sequential API.
    :param input: Input tensor
    :param filters: Number of filters to use
    :param kernel_size: Shape of the kernel for the convolution
    :param strides: Shape of the strides for the convolution
    :param use_dropout: Boolean value to determine the use of dropout
    :return: Keras Model
    """
    x = ReflectionPadding2D((1,1))(input)
    x = Conv2D(filters=filters,
               kernel_size=kernel_size,
               strides=strides,)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    if use_dropout:
        x = Dropout(0.5)(x)

    x = ReflectionPadding2D((1,1))(x)
    x = Conv2D(filters=filters,
                kernel_size=kernel_size,
                strides=strides,)(x)
    x = BatchNormalization()(x)

    # Two convolution layers followed by a direct connection between input and output
    merged = Add()([input, x]) 
Example 14
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: fcns.py    Apache License 2.0 4 votes vote down vote up
def FCN_16s(input_shape,
            n_class,
            encoder_name,
            encoder_weights=None,
            fc_num=4096,
            weight_decay=1e-4,
            kernel_initializer="he_normal",
            bn_epsilon=1e-3,
            bn_momentum=0.99,
            dropout=0.5):
    """ implementation of FCN-8s for semantic segmentation.
        ref: Long J, Shelhamer E, Darrell T. Fully Convolutional Networks for Semantic Segmentation[J].
            arXiv preprint arXiv:1411.4038, 2014.
    :param input_shape: tuple, i.e., (height, width, channel).
    :param n_class: int, number of class, must >= 2.
    :param encoder_name: string, name of encoder.
    :param encoder_weights: string, path of weights, default None.
    :param fc_num: int, number of filters of fully convolutions, default 4096.
    :param weight_decay: float, default 1e-4.
    :param kernel_initializer: string, default "he_normal".
    :param bn_epsilon: float, default 1e-3.
    :param bn_momentum: float, default 0.99.
    :param dropout: float, default 0.5.

    :return: a Keras Model instance.
     """
    # adapted from https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/voc-fcn16s/net.py
    encoder = build_encoder(input_shape=input_shape, encoder_name=encoder_name, encoder_weights=encoder_weights,
                                weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                                bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    p4 = encoder.get_layer(scope_table["pool4"]).output
    p5 = encoder.get_layer(scope_table["pool5"]).output

    # # # 1. merge pool5 & pool4
    # upsamples prediction from pool5
    x1 = Conv2D(fc_num, (7, 7), padding="same", activation="relu",
                kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(p5)
    x1 = Dropout(dropout)(x1)
    x1 = Conv2D(fc_num, (1, 1), padding="same", activation="relu",
                kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x1)
    x1 = Dropout(dropout)(x1)
    x1 = Conv2D(n_class, (1, 1), padding="same", activation=None,
                kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x1)
    x1 = Conv2DTranspose(n_class, (4,4), strides=(2,2), use_bias=False, activation=None,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x1)
    # upsamples from pool4
    x2 = Conv2D(n_class, (1,1), padding="same", activation=None,
                kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(p4)
    x1 = Add()([x1, x2])

    # # # 2. upsample and predict
    x1 = Conv2DTranspose(n_class, (32, 32), strides=(16, 16), use_bias=False, activation=False,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x1)
    output = Activation("softmax")(x1)

    fcn_16s_model = Model(encoder.input, output)
    return fcn_16s_model 
Example 15
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: xceptions.py    Apache License 2.0 4 votes vote down vote up
def separable_residual_block(inputs,
                           n_filters_list=[256, 256, 256],
                           block_id="entry_block2",
                           skip_type="sum",
                           stride=1,
                           rate=1,
                           weight_decay=1e-4,
                           kernel_initializer="he_normal",
                           bn_epsilon=1e-3,
                           bn_momentum=0.99):
    """ separable residual block
    :param inputs: 4-D tensor, shape of (batch_size, height, width, channel).
    :param n_filters_list: list of int, numbers of filters in the separable convolutions, default [256, 256, 256].
    :param block_id: string, default "entry_block2".
    :param skip_type: string, one of {"sum", "conv", "none"}, default "sum".
    :param stride: int, default 1.
    :param rate: int, default 1.
    :param weight_decay: float, default 1e-4.
    :param kernel_initializer: string, default "he_normal".
    :param bn_epsilon: float, default 1e-3.
    :param bn_momentum: float, default 0.99.

    :return: 4-D tensor, shape of (batch_size, height, width, channel).
    """
    x = Activation("relu", name=block_id+"sepconv1_act")(inputs)
    x = SeparableConv2D(n_filters_list[0], (3, 3), padding='same', use_bias=False,
                        name=block_id+'_sepconv1', dilation_rate=rate,
                        kernel_initializer=kernel_initializer, kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name=block_id+'_sepconv1_bn', epsilon=bn_epsilon, momentum=bn_momentum)(x)

    x = Activation('relu', name=block_id+'_sepconv2_act')(x)
    x = SeparableConv2D(n_filters_list[1], (3, 3), padding='same', use_bias=False,
                        name=block_id+'_sepconv2', dilation_rate=rate,
                        kernel_initializer=kernel_initializer, kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name=block_id+'_sepconv2_bn', epsilon=bn_epsilon, momentum=bn_momentum)(x)

    x = Activation("relu", name=block_id+"_sepconv3_act")(x)
    x = SeparableConv2D(n_filters_list[2], (3, 3), padding="same", use_bias=False,
                        strides=stride, name=block_id+"_sepconv3", dilation_rate=rate,
                        kernel_initializer=kernel_initializer, kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(name=block_id+"_sepconv3_bn", epsilon=bn_epsilon, momentum=bn_momentum)(x)

    if skip_type=="sum":
        x = Add()([inputs, x])
    elif skip_type=="conv":
        shortcut = Conv2D(n_filters_list[2], (1, 1), strides=stride, padding='same', use_bias=False,
                          kernel_initializer=kernel_initializer, kernel_regularizer=l2(weight_decay))(inputs)
        shortcut = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(shortcut)
        x = Add()([shortcut, x])
    else:
        x = x

    return x