Python keras.layers.merge.Add() Examples

The following are 30 code examples of keras.layers.merge.Add(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers.merge , or try the search function .
Example #1
Source File: keras_model.py    From alphazero with Apache License 2.0 6 votes vote down vote up
def _build_residual_block(args, x):
    cnn_filter_num = args['cnn_filter_num']
    cnn_filter_size = args['cnn_filter_size']
    l2_reg = args['l2_reg']
    
    in_x = x
    x = Conv2D(filters=cnn_filter_num, kernel_size=cnn_filter_size, padding="same",
                data_format="channels_first", kernel_regularizer=l2(l2_reg))(x)
    x = BatchNormalization(axis=1)(x)
    x = Activation("relu")(x)
    x = Conv2D(filters=cnn_filter_num, kernel_size=cnn_filter_size, padding="same",
                data_format="channels_first", kernel_regularizer=l2(l2_reg))(x)
    x = BatchNormalization(axis=1)(x)
    x = Add()([in_x, x])
    x = Activation("relu")(x)
    return x 
Example #2
Source File: models.py    From cyclegan_keras with The Unlicense 6 votes vote down vote up
def conv_block(x0, scale):
    x = Conv2D(int(64*scale), (1, 1))(x0)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(int(64*scale), (3, 3), padding='same')(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(int(256*scale), (1, 1))(x)
    x = InstanceNormalization()(x)

    x1 = Conv2D(int(256*scale), (1, 1))(x0)
    x1 = InstanceNormalization()(x1)

    x = Add()([x, x1])
    x = LeakyReLU()(x)
    return x 
Example #3
Source File: attention_model.py    From neural-tweet-search with Apache License 2.0 6 votes vote down vote up
def add_conv_layer(input_list, layer_name, nb_filters, kernel_size, padding, dropout_rate=0.1,
                   activation='relu', strides=1, attention_level=0, conv_option="normal", prev_conv_tensors=None):
    conv_layer = Convolution1D(filters=nb_filters, kernel_size=kernel_size, padding=padding,
                               activation=activation, strides=strides, name=layer_name)
    max_pooling_layer = GlobalMaxPooling1D()
    dropout_layer = Dropout(dropout_rate)
    output_list, conv_output_list = [], []
    for i in range(len(input_list)):
        input = input_list[i]
        conv_tensor = conv_layer(input)
        if conv_option == "ResNet":
            conv_tensor = Add()([conv_tensor, prev_conv_tensors[i][-1]])
        dropout_tensor = dropout_layer(conv_tensor)
        #conv_pooling_tensor = max_pooling_layer(conv_tensor)
        output_list.append(dropout_tensor)
        #conv_output_list.append(conv_pooling_tensor)
        conv_output_list.append(conv_tensor)
    return output_list, conv_output_list 
Example #4
Source File: layers_builder.py    From PSPNet-Keras-tensorflow with MIT License 5 votes vote down vote up
def residual_empty(prev_layer, level, pad=1, lvl=1, sub_lvl=1):
    prev_layer = Activation('relu')(prev_layer)

    block_1 = residual_conv(prev_layer, level, pad=pad,
                            lvl=lvl, sub_lvl=sub_lvl)
    block_2 = empty_branch(prev_layer)
    added = Add()([block_1, block_2])
    return added 
Example #5
Source File: resnet_helpers.py    From Keras-FCN with MIT License 5 votes vote down vote up
def atrous_conv_block(kernel_size, filters, stage, block, weight_decay=0., strides=(1, 1), atrous_rate=(2, 2), batch_momentum=0.99):
    '''conv_block is the block that has a conv layer at shortcut
    # Arguments
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    '''
    def f(input_tensor):
        nb_filter1, nb_filter2, nb_filter3 = filters
        if K.image_data_format() == 'channels_last':
            bn_axis = 3
        else:
            bn_axis = 1
        conv_name_base = 'res' + str(stage) + block + '_branch'
        bn_name_base = 'bn' + str(stage) + block + '_branch'

        x = Conv2D(nb_filter1, (1, 1), strides=strides,
                          name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', dilation_rate=atrous_rate,
                          name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)

        shortcut = Conv2D(nb_filter3, (1, 1), strides=strides,
                                 name=conv_name_base + '1', kernel_regularizer=l2(weight_decay))(input_tensor)
        shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1', momentum=batch_momentum)(shortcut)

        x = Add()([x, shortcut])
        x = Activation('relu')(x)
        return x
    return f 
Example #6
Source File: resnet_helpers.py    From Keras-FCN with MIT License 5 votes vote down vote up
def atrous_identity_block(kernel_size, filters, stage, block, weight_decay=0., atrous_rate=(2, 2), batch_momentum=0.99):
    '''The identity_block is the block that has no conv layer at shortcut
    # Arguments
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    '''
    def f(input_tensor):
        nb_filter1, nb_filter2, nb_filter3 = filters
        if K.image_data_format() == 'channels_last':
            bn_axis = 3
        else:
            bn_axis = 1
        conv_name_base = 'res' + str(stage) + block + '_branch'
        bn_name_base = 'bn' + str(stage) + block + '_branch'

        x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter2, (kernel_size, kernel_size), dilation_rate=atrous_rate,
                          padding='same', name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)

        x = Add()([x, input_tensor])
        x = Activation('relu')(x)
        return x
    return f 
Example #7
Source File: resnet_helpers.py    From Keras-FCN with MIT License 5 votes vote down vote up
def identity_block(kernel_size, filters, stage, block, weight_decay=0., batch_momentum=0.99):
    '''The identity_block is the block that has no conv layer at shortcut
    # Arguments
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    '''
    def f(input_tensor):
        nb_filter1, nb_filter2, nb_filter3 = filters
        if K.image_data_format() == 'channels_last':
            bn_axis = 3
        else:
            bn_axis = 1
        conv_name_base = 'res' + str(stage) + block + '_branch'
        bn_name_base = 'bn' + str(stage) + block + '_branch'

        x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter2, (kernel_size, kernel_size),
                          padding='same', name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)

        x = Add()([x, input_tensor])
        x = Activation('relu')(x)
        return x
    return f 
Example #8
Source File: decoder.py    From enet-keras with MIT License 5 votes vote down vote up
def build(inp, encoder, nc, valid_shapes):
    side = conv_block_side(inp)

    x = Lambda(
        interp,
        arguments={'shape': valid_shapes[3]},
        name='sub24_sum_interp')(encoder)

    main = ConvBN(
        filters=128,
        kernel_size=3,
        dilation_rate=2,
        padding='same',
        name='conv_sub2')(x)

    x = Add(name='sub12_sum')([main, side])
    x = Activation('relu')(x)

    x = Lambda(
        interp,
        arguments={'shape': valid_shapes[2]},
        name='sub12_sum_interp')(x)

    x = Conv2D(
        filters=nc,
        kernel_size=1,
        name='conv6_cls')(x)

    out = Lambda(
        interp,
        arguments={'shape': valid_shapes[0]},
        name='conv6_interp')(x)

    return out 
Example #9
Source File: MaskRCNN.py    From PyTorch-Luna16 with Apache License 2.0 5 votes vote down vote up
def compile(self, learning_rate, momentum):
        """Gets the model ready for training. Adds losses, regularization, and
        metrics. Then calls the Keras compile() function.
        """
        # Optimizer object
        optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum,
                                         clipnorm=5.0)
        # Add Losses
        # First, clear previously set losses to avoid duplication
        self.keras_model._losses = []
        self.keras_model._per_input_losses = {}
        loss_names = ["rpn_class_loss", "rpn_bbox_loss",
                      "mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
        for name in loss_names:
            layer = self.keras_model.get_layer(name)
            if layer.output in self.keras_model.losses:
                continue
            self.keras_model.add_loss(
                tf.reduce_mean(layer.output, keep_dims=True))

        # Add L2 Regularization
        # Skip gamma and beta weights of batch normalization layers.
        reg_losses = [keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
                      for w in self.keras_model.trainable_weights
                      if 'gamma' not in w.name and 'beta' not in w.name]
        self.keras_model.add_loss(tf.add_n(reg_losses))

        # Compile
        self.keras_model.compile(optimizer=optimizer, loss=[
                                 None] * len(self.keras_model.outputs))

        # Add metrics for losses
        for name in loss_names:
            if name in self.keras_model.metrics_names:
                continue
            layer = self.keras_model.get_layer(name)
            self.keras_model.metrics_names.append(name)
            self.keras_model.metrics_tensors.append(tf.reduce_mean(
                layer.output, keep_dims=True)) 
Example #10
Source File: MaskRCNN.py    From PyTorch-Luna16 with Apache License 2.0 5 votes vote down vote up
def conv_block(input_tensor, kernel_size, filters, stage, block,
               strides=(2, 2), use_bias=True):
    """conv_block is the block that has a conv layer at shortcut
    # Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
    And the shortcut should have subsample=(2,2) as well
    """
    nb_filter1, nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(nb_filter1, (1, 1), strides=strides,
                  name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
    x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
                  name=conv_name_base + '2b', use_bias=use_bias)(x)
    x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(nb_filter3, (1, 1), name=conv_name_base +
                  '2c', use_bias=use_bias)(x)
    x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)

    shortcut = Conv2D(nb_filter3, (1, 1), strides=strides,
                         name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
    shortcut = BatchNorm(axis=3, name=bn_name_base + '1')(shortcut)

    x = Add()([x, shortcut])
    x = Activation('relu', name='res' + str(stage) + block + '_out')(x)
    return x 
Example #11
Source File: MaskRCNN.py    From PyTorch-Luna16 with Apache License 2.0 5 votes vote down vote up
def identity_block(input_tensor, kernel_size, filters, stage, block,
                   use_bias=True):
    """The identity_block is the block that has no conv layer at shortcut
    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    """
    nb_filter1, nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
                  use_bias=use_bias)(input_tensor)
    x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
                  name=conv_name_base + '2b', use_bias=use_bias)(x)
    x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
                  use_bias=use_bias)(x)
    x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)

    x = Add()([x, input_tensor])
    x = Activation('relu', name='res' + str(stage) + block + '_out')(x)
    return x 
Example #12
Source File: model.py    From reversi-alpha-zero with MIT License 5 votes vote down vote up
def _build_residual_block(self, x):
        mc = self.config.model
        in_x = x
        x = Conv2D(filters=mc.cnn_filter_num, kernel_size=mc.cnn_filter_size, padding="same",
                   data_format="channels_first", kernel_regularizer=l2(mc.l2_reg))(x)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Conv2D(filters=mc.cnn_filter_num, kernel_size=mc.cnn_filter_size, padding="same",
                   data_format="channels_first", kernel_regularizer=l2(mc.l2_reg))(x)
        x = BatchNormalization(axis=1)(x)
        x = Add()([in_x, x])
        x = Activation("relu")(x)
        return x 
Example #13
Source File: models.py    From cyclegan_keras with The Unlicense 5 votes vote down vote up
def identity_block(x0, scale):
    x = Conv2D(int(64*scale), (1, 1))(x0)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(int(64*scale), (3, 3), padding='same')(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(int(256*scale), (1, 1))(x)
    x = InstanceNormalization()(x)

    x = Add()([x, x0])
    x = LeakyReLU()(x)
    return x 
Example #14
Source File: blocks.py    From CSBDeep with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def resnet_block(n_filter, kernel_size=(3,3), pool=(1,1), n_conv_per_block=2,
                 batch_norm=False, kernel_initializer='he_normal', activation='relu'):

    n_conv_per_block >= 2 or _raise(ValueError('required: n_conv_per_block >= 2'))
    len(pool) == len(kernel_size) or _raise(ValueError('kernel and pool sizes must match.'))
    n_dim = len(kernel_size)
    n_dim in (2,3) or _raise(ValueError('resnet_block only 2d or 3d.'))

    conv_layer = Conv2D if n_dim == 2 else Conv3D
    conv_kwargs = dict (
        padding            = 'same',
        use_bias           = not batch_norm,
        kernel_initializer = kernel_initializer,
    )
    channel_axis = -1 if backend_channels_last() else 1

    def f(inp):
        x = conv_layer(n_filter, kernel_size, strides=pool, **conv_kwargs)(inp)
        if batch_norm:
            x = BatchNormalization(axis=channel_axis)(x)
        x = Activation(activation)(x)

        for _ in range(n_conv_per_block-2):
            x = conv_layer(n_filter, kernel_size, **conv_kwargs)(x)
            if batch_norm:
                x = BatchNormalization(axis=channel_axis)(x)
            x = Activation(activation)(x)

        x = conv_layer(n_filter, kernel_size, **conv_kwargs)(x)
        if batch_norm:
            x = BatchNormalization(axis=channel_axis)(x)

        if any(p!=1 for p in pool) or n_filter != K.int_shape(inp)[-1]:
            inp = conv_layer(n_filter, (1,)*n_dim, strides=pool, **conv_kwargs)(inp)

        x = Add()([inp, x])
        x = Activation(activation)(x)
        return x

    return f 
Example #15
Source File: layer_utils.py    From deep_learning with MIT License 5 votes vote down vote up
def res_block(input_tensor, filters, kernel_size=(3, 3), strides=(1, 1), use_dropout=False):
    """实例化Keras Resnet块。
    
    Arguments:
        input_tensor {[type]} -- 输入张量
        filters {[type]} -- filters
    
    Keyword Arguments:
        kernel_size {tuple} -- [description] (default: {(3,3)})
        strides {tuple} -- [description] (default: {(1,1)})
        use_dropout {bool} -- [description] (default: {False})
    """
    x = ReflectionPadding2D((1, 1))(input_tensor)
    x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides)(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    if use_dropout:
        x=Dropout(0.5)(x)
    
    x = ReflectionPadding2D((1, 1))(x)
    x = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides)(x)
    x = BatchNormalization()(x)

    merged = Add()([input_tensor, x])
    return merged 
Example #16
Source File: multi.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def GetPoseModel(x, num_options, arm_size, gripper_size,
        dropout_rate=0.5, batchnorm=True):
    '''
    Make an "actor" network that takes in an encoded image and an "option"
    label and produces the next command to execute.
    '''
    img_shape = [int(d) for d in x.shape[1:]]
    img_in = Input(img_shape,name="policy_img_in")
    img0_in = Input(img_shape,name="policy_img0_in")
    arm = Input((arm_size,), name="ee_in")
    gripper = Input((gripper_size,), name="gripper_in")
    option_in = Input((48,), name="actor_o_in")

    ins = [img0_in, img_in, option_in, arm, gripper]
    x0, x = img0_in, img_in
    dr, bn = dropout_rate, False
    use_lrelu = False

    x = Concatenate(axis=-1)([x, x0])
    x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y = Concatenate()([arm, gripper])
    y = AddDense(y, 32, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y, 32, (8,8), add=False)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y2 = AddDense(option_in, 64, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y2, 64, (6,6), add=False)
    x = AddConv2D(x, 128, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    x = Flatten()(x)
    x = AddDense(x, 512, "relu", 0., output=True, bn=False)
    x = AddDense(x, 512, "relu", 0., output=True, bn=False)    # Same setup as the state decoders
    arm = AddDense(x, arm_size, "linear", 0., output=True)
    gripper = AddDense(x, gripper_size, "sigmoid", 0., output=True)
    actor = Model(ins, [arm, gripper], name="pose")
    return actor 
Example #17
Source File: husky.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def GetHuskyPoseModel(x, num_options, pose_size,
        dropout_rate=0.5, batchnorm=True):
    '''
    Make an "actor" network that takes in an encoded image and an "option"
    label and produces the next command to execute.
    '''
    xin = Input([int(d) for d in x.shape[1:]], name="pose_h_in")
    x0in = Input([int(d) for d in x.shape[1:]], name="pose_h0_in")

    pose_in = Input((pose_size,), name="pose_pose_in")
    option_in = Input((num_options,), name="pose_o_in")
    x = xin
    x0 = x0in
    dr, bn = dropout_rate, False
    use_lrelu = False

    x = Concatenate(axis=-1)([x, x0])
    x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y = pose_in
    y = AddDense(y, 32, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y, 32, (8,8), add=False)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y2 = AddDense(option_in, 64, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y2, 64, (6,6), add=False)
    x = AddConv2D(x, 128, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    x = Flatten()(x)
    x = AddDense(x, 512, "relu", dr, output=True, bn=bn)
    x = AddDense(x, 512, "relu", dr, output=True, bn=bn)    # Same setup as the state decoders


    pose = AddDense(x, pose_size, "linear", 0., output=True)
    pose = Model([x0in, xin, option_in, pose_in], [pose], name="pose")
    return pose 
Example #18
Source File: husky.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def GetHuskyActorModel(x, num_options, pose_size,
        dropout_rate=0.5, batchnorm=True):
    '''
    Make an "actor" network that takes in an encoded image and an "option"
    label and produces the next command to execute.
    '''
    xin = Input([int(d) for d in x.shape[1:]], name="actor_h_in")
    x0in = Input([int(d) for d in x.shape[1:]], name="actor_h0_in")

    pose_in = Input((pose_size,), name="actor_pose_in")
    option_in = Input((num_options,), name="actor_o_in")
    x = xin
    x0 = x0in
    dr, bn = dropout_rate, False
    use_lrelu = False

    x = Concatenate(axis=-1)([x, x0])
    x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y = pose_in
    y = AddDense(y, 32, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y, 32, (8,8), add=False)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y2 = AddDense(option_in, 64, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y2, 64, (6,6), add=False)
    x = AddConv2D(x, 128, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    x = Flatten()(x)
    x = AddDense(x, 512, "relu", dr, output=True, bn=bn)
    x = AddDense(x, 512, "relu", dr, output=True, bn=bn)    # Same setup as the state decoders


    pose = AddDense(x, pose_size, "linear", 0., output=True)
    actor = Model([x0in, xin, option_in, pose_in], [pose], name="actor")
    return actor 
Example #19
Source File: planner.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def DenseHelper(x, dense_size, dropout_rate, repeat):
    '''
    Add a repeated number of dense layers of the same size.
    '''
    for i in range(repeat):
        if i < repeat - 1:
            dr = 0.
        else:
            dr = dropout_rate
        AddDense(x, dense_size, "relu", dr)
    return x 
Example #20
Source File: planner.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def TileOnto(x,z,zlen,xsize,add=False):
    z = Reshape([1,1,zlen])(z)
    tile_shape = (int(1), int(xsize[0]), int(xsize[1]), 1)
    z = Lambda(lambda x: K.tile(x, tile_shape))(z)
    if not add:
        x = Concatenate(axis=-1)([x,z])
    else:
        x = Add()([x,z])
    return x 
Example #21
Source File: hypertree_model.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def add_images_with_tiled_vector_layer(images, vector, image_shape=None, vector_shape=None):
    """Tile a vector as if it were channels onto every pixel of an image.

    This version is designed to be used as layers within a Keras model.

    # Params
       images: a list of images to combine, must have equal dimensions
       vector: the 1D vector to tile onto every pixel
       image_shape: Tuple with 3 entries defining the shape (batch, height, width)
           images should be expected to have, do not specify the number
           of batches.
       vector_shape: Tuple with 3 entries defining the shape (batch, height, width)
           images should be expected to have, do not specify the number
           of batches.
    """
    with K.name_scope('add_images_with_tiled_vector_layer'):
        if not isinstance(images, list):
            images = [images]
        if vector_shape is None:
            # check if K.shape, K.int_shape, or vector.get_shape().as_list()[1:] is better
            # https://github.com/fchollet/keras/issues/5211
            vector_shape = K.int_shape(vector)[1:]
        if image_shape is None:
            # check if K.shape, K.int_shape, or image.get_shape().as_list()[1:] is better
            # https://github.com/fchollet/keras/issues/5211
            image_shape = K.int_shape(images[0])[1:]
        vector = Reshape([1, 1, vector_shape[-1]])(vector)
        tile_shape = (int(1), int(image_shape[0]), int(image_shape[1]), int(1))
        tiled_vector = Lambda(lambda x: K.tile(x, tile_shape))(vector)
        x = Add()([] + images + [tiled_vector])
    return x 
Example #22
Source File: model_connect4.py    From connect4-alpha-zero with MIT License 5 votes vote down vote up
def _build_residual_block(self, x):
        mc = self.config.model
        in_x = x
        x = Conv2D(filters=mc.cnn_filter_num, kernel_size=mc.cnn_filter_size, padding="same",
                   data_format="channels_first", kernel_regularizer=l2(mc.l2_reg))(x)
        x = BatchNormalization(axis=1)(x)
        x = Activation("relu")(x)
        x = Conv2D(filters=mc.cnn_filter_num, kernel_size=mc.cnn_filter_size, padding="same",
                   data_format="channels_first", kernel_regularizer=l2(mc.l2_reg))(x)
        x = BatchNormalization(axis=1)(x)
        x = Add()([in_x, x])
        x = Activation("relu")(x)
        return x 
Example #23
Source File: _pspnet_2.py    From image-segmentation-keras with MIT License 5 votes vote down vote up
def residual_empty(prev_layer, level, pad=1, lvl=1, sub_lvl=1):
    prev_layer = Activation('relu')(prev_layer)

    block_1 = residual_conv(prev_layer, level, pad=pad,
                            lvl=lvl, sub_lvl=sub_lvl)
    block_2 = empty_branch(prev_layer)
    added = Add()([block_1, block_2])
    return added 
Example #24
Source File: _pspnet_2.py    From image-segmentation-keras with MIT License 5 votes vote down vote up
def residual_short(prev_layer, level, pad=1, lvl=1, sub_lvl=1,
                   modify_stride=False):
    prev_layer = Activation('relu')(prev_layer)
    block_1 = residual_conv(prev_layer, level,
                            pad=pad, lvl=lvl, sub_lvl=sub_lvl,
                            modify_stride=modify_stride)

    block_2 = short_convolution_branch(prev_layer, level,
                                       lvl=lvl, sub_lvl=sub_lvl,
                                       modify_stride=modify_stride)
    added = Add()([block_1, block_2])
    return added 
Example #25
Source File: layers_builder.py    From PSPNet-Keras-tensorflow with MIT License 5 votes vote down vote up
def residual_short(prev_layer, level, pad=1, lvl=1, sub_lvl=1, modify_stride=False):
    prev_layer = Activation('relu')(prev_layer)
    block_1 = residual_conv(prev_layer, level,
                            pad=pad, lvl=lvl, sub_lvl=sub_lvl,
                            modify_stride=modify_stride)

    block_2 = short_convolution_branch(prev_layer, level,
                                       lvl=lvl, sub_lvl=sub_lvl,
                                       modify_stride=modify_stride)
    added = Add()([block_1, block_2])
    return added 
Example #26
Source File: multi.py    From costar_plan with Apache License 2.0 4 votes vote down vote up
def GetActorModel(x, num_options, arm_size, gripper_size,
        dropout_rate=0.5, batchnorm=True):
    '''
    Make an "actor" network that takes in an encoded image and an "option"
    label and produces the next command to execute.
    '''
    xin = Input([int(d) for d in x.shape[1:]], name="actor_h_in")
    x0in = Input([int(d) for d in x.shape[1:]], name="actor_h0_in")
    arm_in = Input((arm_size,), name="ee_in")
    gripper_in = Input((gripper_size,), name="gripper_in")
    option_in = Input((48,), name="actor_o_in")
    use_lrelu = False

    #dr, bn = dropout_rate, batchnorm
    x0, x = x0in, xin
    dr, bn = dropout_rate, False
    use_lrelu = False

    x = Concatenate(axis=-1)([x, x0])
    x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y = Concatenate()([arm_in, gripper_in])
    y = AddDense(y, 32, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y, 32, (8,8), add=False)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y2 = AddDense(option_in, 64, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y2, 64, (6,6), add=False)
    x = AddConv2D(x, 128, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    x = Flatten()(x)
    x = AddDense(x, 512, "relu", dr, output=True, bn=bn)
    x = AddDense(x, 512, "relu", dr, output=True, bn=bn)    # Same setup as the state decoders

    arm = AddDense(x, arm_size, "linear", 0., output=True, bn=False)
    gripper = AddDense(x, gripper_size, "sigmoid", 0., output=True, bn=False)
    #value = Dense(1, activation="sigmoid", name="V",)(x1)
    actor = Model([x0in, xin, arm_in, gripper_in, option_in], [arm, gripper], name="actor")
    return actor 
Example #27
Source File: nets.py    From CSBDeep with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def custom_unet(input_shape,
                last_activation,
                n_depth=2,
                n_filter_base=16,
                kernel_size=(3,3,3),
                n_conv_per_depth=2,
                activation="relu",
                batch_norm=False,
                dropout=0.0,
                pool_size=(2,2,2),
                n_channel_out=1,
                residual=False,
                prob_out=False,
                eps_scale=1e-3):
    """ TODO """

    if last_activation is None:
        raise ValueError("last activation has to be given (e.g. 'sigmoid', 'relu')!")

    all((s % 2 == 1 for s in kernel_size)) or _raise(ValueError('kernel size should be odd in all dimensions.'))

    channel_axis = -1 if backend_channels_last() else 1

    n_dim = len(kernel_size)
    conv = Conv2D if n_dim==2 else Conv3D

    input = Input(input_shape, name = "input")
    unet = unet_block(n_depth, n_filter_base, kernel_size,
                      activation=activation, dropout=dropout, batch_norm=batch_norm,
                      n_conv_per_depth=n_conv_per_depth, pool=pool_size)(input)

    final = conv(n_channel_out, (1,)*n_dim, activation='linear')(unet)
    if residual:
        if not (n_channel_out == input_shape[-1] if backend_channels_last() else n_channel_out == input_shape[0]):
            raise ValueError("number of input and output channels must be the same for a residual net.")
        final = Add()([final, input])
    final = Activation(activation=last_activation)(final)

    if prob_out:
        scale = conv(n_channel_out, (1,)*n_dim, activation='softplus')(unet)
        scale = Lambda(lambda x: x+np.float32(eps_scale))(scale)
        final = Concatenate(axis=channel_axis)([final,scale])

    return Model(inputs=input, outputs=final) 
Example #28
Source File: planner.py    From costar_plan with Apache License 2.0 4 votes vote down vote up
def TileArmAndGripper(x, arm_in, gripper_in, tile_width, tile_height,
        option=None, option_in=None,
        time_distributed=None, dim=64,
        concatenate=True):
    arm_size = int(arm_in.shape[-1])
    gripper_size = int(gripper_in.shape[-1])

    # handle error: options and grippers
    if option is None and option_in is not None \
        or option is not None and option_in is None:
            raise RuntimeError('must provide both #opts and input')

    # generate options and tile things together
    if option is None:
        robot = CombineArmAndGripper(arm_in, gripper_in, dim=dim)
        #reshape_size = arm_size+gripper_size
        reshape_size = dim
    else:
        robot = CombineArmAndGripperAndOption(arm_in,
                                              gripper_in,
                                              option_in,
                                              dim=dim)
        reshape_size = dim
        #reshape_size = arm_size+gripper_size+option

    # time distributed or not
    robot0 = robot
    if time_distributed is not None and time_distributed > 0:
        tile_shape = (1, 1, tile_width, tile_height, 1)
        robot = Reshape([time_distributed, 1, 1, reshape_size])(robot)
    else:
        tile_shape = (1, tile_width, tile_height, 1)
        robot = Reshape([1, 1, reshape_size])(robot)

    # finally perform the actual tiling
    robot = Lambda(lambda x: K.tile(x, tile_shape))(robot)
    if concatenate:
        x = Concatenate(axis=-1)([x,robot])
    else:
        x = Add()([x, robot])

    return x, robot0 
Example #29
Source File: planner.py    From costar_plan with Apache License 2.0 4 votes vote down vote up
def AddDense(x, size, activation, dropout_rate, output=False, momentum=MOMENTUM,
    constraint=3,
    bn=True,
    kr=0.,
    ar=0.,
    perm_drop=False):
    '''
    Add a single dense block with batchnorm and activation.

    Parameters:
    -----------
    x: input tensor
    size: number of dense neurons
    activation: activation fn to use
    dropout_rate: dropout to use after activation

    Returns:
    --------
    x: output tensor
    '''

    if isinstance(kr, float) and kr > 0:
        kr = keras.regularizers.l2(kr)
    elif isinstance(kr, float):
        kr = None
    else:
        kr = kr

    if isinstance(ar, float) and ar > 0:
        ar = keras.regularizers.l1(ar)
    elif isinstance(ar, float):
        ar = None
    else:
        ar = ar

    if constraint is not None:
        x = Dense(size, kernel_constraint=maxnorm(constraint),
                  kernel_regularizer=kr,
                  activity_regularizer=ar,)(x)
    else:
        x = Dense(size,
                  kernel_regularizer=kr,
                  activity_regularizer=ar,)(x)

    if not output and bn:
        #x = BatchNormalization(momentum=momentum)(x)
        x = InstanceNormalization()(x)

    if activation == "lrelu":
        x = LeakyReLU(alpha=0.2)(x)
    else:
        x = Activation(activation)(x)
    if dropout_rate > 0:
        if perm_drop:
            x = PermanentDropout(dropout_rate)(x)
        else:
            x = Dropout(dropout_rate)(x)
    return x 
Example #30
Source File: dvrk.py    From costar_plan with Apache License 2.0 4 votes vote down vote up
def GetJigsawsNextModel(x, num_options, dense_size, dropout_rate=0.5, batchnorm=True):
    '''
    Next actions
    '''

    xin = Input([int(d) for d in x.shape[1:]], name="Nx_prev_h_in")
    option_in = Input((1,), name="Nx_prev_o_in")
    x = xin
    x0 = x0in
    if len(x.shape) > 2:
        # Project
        x = AddConv2D(x, 32, [1,1], 1, dropout_rate, "same",
                bn=batchnorm,
                lrelu=True,
                name="Nx_project",
                constraint=None)
        x0 = AddConv2D(x0, 32, [1,1], 1, dropout_rate, "same",
                bn=batchnorm,
                lrelu=True,
                name="Nx_project0",
                constraint=None)
        x = Add()([x,x0])

        if num_options > 0:
            option_x = OneHot(num_options)(option_in)
            option_x = Flatten()(option_x)
            x = TileOnto(x, option_x, num_options, x.shape[1:3])

        # conv down
        x = AddConv2D(x, 64, [3,3], 1, dropout_rate, "valid",
                bn=batchnorm,
                lrelu=True,
                name="Nx_C64A",
                constraint=None)

        x = AddConv2D(x, 32, [3,3], 1, dropout_rate, "valid",
                bn=batchnorm,
                lrelu=True,
                name="Nx_C32A",
                constraint=None)
        # This is the hidden representation of the world, but it should be flat
        # for our classifier to work.
        x = Flatten()(x)

    # Next options
    x1 = AddDense(x, dense_size, "relu", dropout_rate, constraint=None,
            output=False,)
    x1 = AddDense(x1, dense_size, "relu", 0., constraint=None,
            output=False,)

    next_option_out = Dense(num_options,
            activation="sigmoid", name="lnext",)(x1)
    next_model = Model([x0in, xin, option_in], next_option_out, name="next")
    #next_model = Model([xin, option_in], next_option_out, name="next")
    return next_model