Python keras.layers.merge.Concatenate() Examples

The following are code examples for showing how to use keras.layers.merge.Concatenate(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: RMDL   Author: eric-erki   File: BuildModel.py    GNU General Public License v3.0 6 votes vote down vote up
def to_multi_gpu(model, n_gpus=2):
    """
    Given a keras [model], return an equivalent model which parallelizes
    the computation over [n_gpus] GPUs.

    Each GPU gets a slice of the input batch, applies the model on that slice
    and later the outputs of the models are concatenated to a single tensor,
    hence the user sees a model that behaves the same as the original.
    """

    with tf.device('/cpu:0'):
        x = Input(model.input_shape[1:], name="input1")

    towers = []
    for g in range(n_gpus):
        with tf.device('/gpu:' + str(g)):
            slice_g = Lambda(slice_batch,
                             lambda shape: shape,
                             arguments={'n_gpus':n_gpus, 'part':g})(x)
            towers.append(model(slice_g))

    with tf.device('/cpu:0'):
        merged = Concatenate(axis=0)(towers)

    return Model(inputs=[x], outputs=[merged]) 
Example 2
Project: PSPNet-Keras-tensorflow   Author: Vladkryvoruchko   File: layers_builder.py    MIT License 6 votes vote down vote up
def build_pyramid_pooling_module(res, input_shape):
    """Build the Pyramid Pooling Module."""
    # ---PSPNet concat layers with Interpolation
    feature_map_size = tuple(int(ceil(input_dim / 8.0))
                             for input_dim in input_shape)
    print("PSP module will interpolate to a final feature map size of %s" %
          (feature_map_size, ))

    interp_block1 = interp_block(res, 1, feature_map_size, input_shape)
    interp_block2 = interp_block(res, 2, feature_map_size, input_shape)
    interp_block3 = interp_block(res, 3, feature_map_size, input_shape)
    interp_block6 = interp_block(res, 6, feature_map_size, input_shape)

    # concat all these layers. resulted
    # shape=(1,feature_map_size_x,feature_map_size_y,4096)
    res = Concatenate()([res,
                         interp_block6,
                         interp_block3,
                         interp_block2,
                         interp_block1])
    return res 
Example 3
Project: keras-image-captioning   Author: danieljl   File: models.py    MIT License 6 votes vote down vote up
def build(self, vocabs=None):
        if self._keras_model:
            return
        if vocabs is None and self._word_vector_init is not None:
            raise ValueError('If word_vector_init is not None, build method '
                             'must be called with vocabs that are not None!')

        image_input, image_embedding = self._build_image_embedding()
        sentence_input, word_embedding = self._build_word_embedding(vocabs)
        sequence_input = Concatenate(axis=1)([image_embedding, word_embedding])
        sequence_output = self._build_sequence_model(sequence_input)

        model = Model(inputs=[image_input, sentence_input],
                      outputs=sequence_output)
        model.compile(optimizer=Adam(lr=self._learning_rate, clipnorm=5.0),
                      loss=categorical_crossentropy_from_logits,
                      metrics=[categorical_accuracy_with_variable_timestep])

        self._keras_model = model 
Example 4
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: srinets.py    Apache License 2.0 6 votes vote down vote up
def spatial_residual_inception(inputs, base_filters=256):
    x_short = Conv2D(base_filters + 64, (1, 1), use_bias=False, activation=None)(inputs)
    x_short = Activation("relu")(x_short)

    x_conv1x1 = Conv2D(base_filters + 64, (1, 1), use_bias=False, activation=None)(x_short)

    x_conv3x3 = Conv2D(base_filters, (1, 1), use_bias=False, activation=None)(x_short)
    x_conv3x3 = Conv2D(base_filters + 32, (1, 3), padding="same", use_bias=False, activation=None)(x_conv3x3)
    x_conv3x3 = Conv2D(base_filters + 64, (3, 1), padding="same", use_bias=False, activation=None)(x_conv3x3)

    x_conv7x7 = Conv2D(base_filters, (1, 1), use_bias=False, activation=None)(x_short)
    x_conv7x7 = Conv2D(base_filters + 32, (1, 7), padding="same", use_bias=False, activation=None)(x_conv7x7)
    x_conv7x7 = Conv2D(base_filters + 64, (7, 1), padding="same", use_bias=False, activation=None)(x_conv7x7)

    x_conv = Concatenate()([x_conv1x1, x_conv3x3, x_conv7x7])
    x_conv = Conv2D(base_filters+64, (1, 1), use_bias=False, activation=None)(x_conv)

    x = Add()([x_short, x_conv])
    return Activation("relu")(x) 
Example 5
Project: DenseNet-Cifar10   Author: Kexiii   File: DenseNet.py    MIT License 6 votes vote down vote up
def dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1E-4):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
    Args:
        x: keras tensor
        nb_layers: the number of layers of conv_block to append to the model.
        nb_filter: number of filters
        growth_rate: growth rate
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_dim_ordering() == "th" else -1

    feature_list = [x]

    for i in range(nb_layers):
        x = conv_block(x, growth_rate, dropout_rate, weight_decay)
        feature_list.append(x)
        x = Concatenate(axis=concat_axis)(feature_list)
        nb_filter += growth_rate

    return x, nb_filter 
Example 6
Project: BipedalWalker-v2   Author: Kyziridis   File: DQNlalala.py    GNU General Public License v2.0 6 votes vote down vote up
def Actor(self):                     
        # Build Network for Actor
        #input=[lidar_input,state_input]
        lidar_input = Input(shape=(self.nx_lidar,))
        lidar_conv = Dense(self.layers[0], activation='relu', kernel_regularizer=self.k_r)(lidar_input)
        #pool = MaxPooling1D(4)(lidar_conv)
        #flat = Flatten()(lidar_conv)
               
        state_input = Input(shape=(self.nx_obs,))
        state_h1 = Dense(self.layers[0], activation='relu', kernel_regularizer=self.k_r)(state_input)
        gauss = GaussianNoise(1.0)(state_h1)
        #gauss = Flatten()(gauss)
        
        merged = Concatenate()([lidar_conv,gauss])
        #merged_reshaped = Reshape((256,1))(merged)
        merged_lstm = Dense(self.layers[1],activation='relu')(merged)
        gauss_ = GaussianNoise(1.0)(merged_lstm)
        output = Dense(self.ny, activation='tanh', kernel_initializer=self.k_init,\
                       bias_initializer=self.b_init)(gauss_)
        
        model = Model(input=[lidar_input,state_input], output=output)
        adam = Adam(lr=self.lr_actor)
        model.compile(loss='mse', optimizer=adam)
        return lidar_input,state_input, model 
Example 7
Project: BipedalWalker-v2   Author: Kyziridis   File: actor_Lstm.py    GNU General Public License v2.0 6 votes vote down vote up
def Actor(self):                     
        # Build Network for Actor
        #input=[lidar_input,state_input]
        lidar_input = Input(shape=(self.nx_lidar,1))
        lidar_conv = Conv1D(64, 4, activation='relu')(lidar_input)
        pool = MaxPooling1D(4)(lidar_conv)
        flat = Flatten()(pool)
               
        state_input = Input(shape=(self.nx_obs,))
        state_h1 = Dense(192, activation='relu')(state_input)
        
        merged = Concatenate()([flat,state_h1])
        merged_reshaped = Reshape((256,1))(merged)
        merged_lstm = LSTM(256,activation='relu',input_shape=(1,256,1))(merged_reshaped)
        output = Dense(self.ny, activation='tanh')(merged_lstm)
        
        model = Model(input=[lidar_input,state_input], output=output)
        adam = Adam(lr=self.lr_actor)
        model.compile(loss='mse', optimizer=adam)
        return lidar_input,state_input, model 
Example 8
Project: BipedalWalker-v2   Author: Kyziridis   File: PPO_e.py    GNU General Public License v2.0 6 votes vote down vote up
def sample_Critic(self):                     
        state_input = Input(shape=(self.nx,))
        action_input = Input(shape=(self.ny,))
        #
        h0 = Dense(self.layers[1], activation='relu',kernel_regularizer=self.k_r)(state_input)
        h1 = Dense(self.layers[1], activation='tanh',kernel_regularizer=self.k_r)(action_input)
        #
        conc = Concatenate()([h0,h1])
        #conc = Flatten()(conc)
        h2 = Dense(64, activation='relu', kernel_regularizer=self.k_r)(conc)
        out = Dense(1, activation='linear', kernel_regularizer=self.k_r,\
                    kernel_initializer=self.final_initializer)(h2)
        #       
        model  = Model(inputs=[state_input, action_input], outputs=out)
        adam  = Adam(lr=self.lr_critic)
        model.compile(loss="mse", optimizer=adam)
        return model 
Example 9
Project: BipedalWalker-v2   Author: Kyziridis   File: PPO_simple2.py    GNU General Public License v2.0 6 votes vote down vote up
def sample_Critic(self):                     
        state_input = Input(shape=(self.nx,))
        action_input = Input(shape=(self.ny,))
        #
        h0 = Dense(self.layers[1], activation='relu',kernel_regularizer=self.k_r)(state_input)
        h1 = Dense(self.layers[1], activation='tanh',kernel_regularizer=self.k_r)(action_input)
        #
        conc = Concatenate()([h0,h1])
        #conc = Flatten()(conc)
        h2 = Dense(64, activation='relu', kernel_regularizer=self.k_r)(conc)
        out = Dense(1, activation='linear', kernel_regularizer=self.k_r,\
                    kernel_initializer=self.final_initializer)(h2)
        #       
        model  = Model(inputs=[state_input, action_input], outputs=out)
        adam  = Adam(lr=LR)
        model.compile(loss="mse", optimizer=adam)
        return model 
Example 10
Project: BipedalWalker-v2   Author: Kyziridis   File: PPO_simple.py    GNU General Public License v2.0 6 votes vote down vote up
def sample_Critic(self):                     
        state_input = Input(shape=(self.nx,))
        action_input = Input(shape=(self.ny,))
        #
        h0 = Dense(self.layers[1], activation='relu',kernel_regularizer=self.k_r)(state_input)
        h1 = Dense(self.layers[1], activation='tanh',kernel_regularizer=self.k_r)(action_input)
        #
        conc = Concatenate()([h0,h1])
        #conc = Flatten()(conc)
        h2 = Dense(64, activation='relu', kernel_regularizer=self.k_r)(conc)
        out = Dense(1, activation='linear', kernel_regularizer=self.k_r,\
                    kernel_initializer=self.final_initializer)(h2)
        #       
        model  = Model(input=[state_input, action_input], output=out)
        adam  = Adam(lr=self.lr_critic)
        model.compile(loss="mse", optimizer=adam)
        return model 
Example 11
Project: faceswap   Author: deepfakes   File: dlight.py    GNU General Public License v3.0 6 votes vote down vote up
def upscale2x_hyb(self, inp, filters, kernel_size=3, padding='same',
                  sr_ratio=0.5, scale_factor=2, interpolation='bilinear',
                  res_block_follows=False, **kwargs):
    """Hybrid Upscale Layer"""
    name = self.get_name("upscale2x_hyb")
    var_x = inp

    sr_filters = int(filters * sr_ratio)
    upscale_filters = filters - sr_filters

    var_x_sr = self.upscale(var_x, upscale_filters, kernel_size=kernel_size,
                            padding=padding, scale_factor=scale_factor,
                            res_block_follows=res_block_follows, **kwargs)
    if upscale_filters > 0:
        var_x_us = self.conv2d(var_x, upscale_filters,  kernel_size=3, padding=padding,
                               name="{}_conv2d".format(name), **kwargs)
        var_x_us = UpSampling2D(size=(scale_factor, scale_factor), interpolation=interpolation,
                                name="{}_upsampling2D".format(name))(var_x_us)
        var_x = Concatenate(name="{}_concatenate".format(name))([var_x_sr, var_x_us])
    else:
        var_x = var_x_sr

    return var_x 
Example 12
Project: PADify   Author: rodrigobressan   File: multi_gpu.py    MIT License 6 votes vote down vote up
def to_multi_gpu(model, n_gpus=2):
    if n_gpus ==1:
        return model

    with tf.device('/cpu:0'):
        x = Input(model.input_shape[1:])
    towers = []
    for g in range(n_gpus):
        with tf.device('/gpu:' + str(g)):
            slice_g = Lambda(slice_batch, lambda shape: shape, arguments={'n_gpus':n_gpus, 'part':g})(x)
            towers.append(model(slice_g))

    with tf.device('/cpu:0'):
        # Deprecated
        #merged = merge(towers, mode='concat', concat_axis=0)
        merged = Concatenate(axis=0)(towers)
    return Model(inputs=[x], outputs=merged) 
Example 13
Project: cyclegan-keras-art-attrs   Author: hollygrimm   File: cyclegan_attr_model.py    MIT License 5 votes vote down vote up
def build_generator(self, shape, name):

        def conv2d(layer_input, filters, f_size=4):
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            d = InstanceNormalization()(d)
            return d

        def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
            u = UpSampling2D(size=2)(layer_input)
            u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
            if dropout_rate:
                u = Dropout(dropout_rate)(u)
            u = InstanceNormalization()(u)
            u = Concatenate()([u, skip_input])
            return u

        d0 = Input(shape=shape)

        d1 = conv2d(d0, self.gf)
        d2 = conv2d(d1, self.gf*2)
        d3 = conv2d(d2, self.gf*4)
        d4 = conv2d(d3, self.gf*8)

        u1 = deconv2d(d4, d3, self.gf*4)
        u2 = deconv2d(u1, d2, self.gf*2)
        u3 = deconv2d(u2, d1, self.gf)

        u4 = UpSampling2D(size=2)(u3)
        output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u4)

        return Model(d0, output_img, name=name) 
Example 14
Project: DEXTR-KerasTensorflow   Author: scaelles   File: classifiers.py    GNU General Public License v3.0 5 votes vote down vote up
def build_pyramid_pooling_module(res, input_shape, nb_classes, sigmoid=False, output_size=None):
    """Build the Pyramid Pooling Module."""
    # ---PSPNet concat layers with Interpolation
    feature_map_size = tuple(int(ceil(input_dim / 8.0)) for input_dim in input_shape)
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    print("PSP module will interpolate to a final feature map size of %s" %
          (feature_map_size, ))

    interp_block1 = psp_block(res, 1, feature_map_size, input_shape)
    interp_block2 = psp_block(res, 2, feature_map_size, input_shape)
    interp_block3 = psp_block(res, 3, feature_map_size, input_shape)
    interp_block6 = psp_block(res, 6, feature_map_size, input_shape)

    # concat all these layers. resulted
    res = Concatenate()([interp_block1,
                         interp_block2,
                         interp_block3,
                         interp_block6,
                         res])
    x = Conv2D(512, (1, 1), strides=(1, 1), padding="same", name="class_psp_reduce_conv", use_bias=False)(res)
    x = resnet.BN(bn_axis, name="class_psp_reduce_bn")(x)
    x = Activation('relu')(x)

    x = Conv2D(nb_classes, (1, 1), strides=(1, 1), name="class_psp_final_conv")(x)

    if output_size:
        x = Upsampling(output_size)(x)

    if sigmoid:
        x = Activation('sigmoid')(x)
    return x 
Example 15
Project: hfusion   Author: SenticNet   File: hfusion.py    MIT License 5 votes vote down vote up
def output_of_lambda2(input_shape):
    return (input_shape[0], audio_dim)







# ################################################################################
# #Level 2

# #concatenate level 1 output to be sent to hfusion
# fused_tensor=Concatenate(axis=2)([context_1_2,context_1_3,context_2_3]) 
Example 16
Project: text-classifier   Author: shibing624   File: deep_model.py    Apache License 2.0 5 votes vote down vote up
def cnn_model(max_len=400,
              vocabulary_size=20000,
              embedding_dim=128,
              hidden_dim=128,
              num_filters=512,
              filter_sizes="3,4,5",
              num_classses=4,
              dropout=0.5):
    print("Creating text CNN Model...")
    # a tensor
    inputs = Input(shape=(max_len,), dtype='int32')
    # emb
    embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim,
                          input_length=max_len, name="embedding")(inputs)
    # convolution block
    if "," in filter_sizes:
        filter_sizes = filter_sizes.split(",")
    else:
        filter_sizes = [3, 4, 5]
    conv_blocks = []
    for sz in filter_sizes:
        conv = Convolution1D(filters=num_filters,
                             kernel_size=int(sz),
                             strides=1,
                             padding='valid',
                             activation='relu')(embedding)
        conv = MaxPooling1D()(conv)
        conv = Flatten()(conv)
        conv_blocks.append(conv)
    conv_concate = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
    dropout_layer = Dropout(dropout)(conv_concate)
    output = Dense(hidden_dim, activation='relu')(dropout_layer)
    output = Dense(num_classses, activation='softmax')(output)
    # model
    model = Model(inputs=inputs, outputs=output)
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    model.summary()
    return model 
Example 17
Project: vess2ret   Author: costapt   File: models.py    MIT License 5 votes vote down vote up
def concatenate_layers(inputs, concat_axis, mode='concat'):
    if KERAS_2:
        assert mode == 'concat', "Only concatenation is supported in this wrapper"
        return Concatenate(axis=concat_axis)(inputs)
    else:
        return merge(inputs=inputs, concat_axis=concat_axis, mode=mode) 
Example 18
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: srinets.py    Apache License 2.0 5 votes vote down vote up
def spatial_residual_inception_v2(inputs, base_filters=192, weight_decay=1e-4, kernel_initializer="he_normal"):
    x_short = Activation("relu")(inputs)
    x_short = Conv2D(base_filters + 64, (1, 1), use_bias=False, activation=None,
                     kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)

    # 1x1
    x_conv1x1 = Conv2D(base_filters + 64, (1, 1), use_bias=False, activation=None,
                       kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)

    x_conv3x3_1 = Conv2D(base_filters + 32, (1, 3), padding="same", use_bias=False, activation=None, dilation_rate=1,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)
    x_conv3x3_1 = Conv2D(base_filters + 64, (3, 1), padding="same", use_bias=False, activation=None, dilation_rate=1,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_conv3x3_1)

    x_conv3x3_5 = Conv2D(base_filters + 32, (1, 3), padding="same", use_bias=False, activation=None, dilation_rate=2,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)
    x_conv3x3_5 = Conv2D(base_filters + 64, (3, 1), padding="same", use_bias=False, activation=None, dilation_rate=2,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_conv3x3_5)

    x_conv5x5_1 = Conv2D(base_filters + 32, (1, 3), padding="same", use_bias=False, activation=None, dilation_rate=5,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)
    x_conv5x5_1 = Conv2D(base_filters + 64, (3, 1), padding="same", use_bias=False, activation=None, dilation_rate=5,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_conv5x5_1)

    x_conv5x5_5 = Conv2D(base_filters + 32, (1, 3), padding="same", use_bias=False, activation=None, dilation_rate=7,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)
    x_conv5x5_5 = Conv2D(base_filters + 64, (3, 1), padding="same", use_bias=False, activation=None, dilation_rate=7,
                         kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_conv5x5_5)

    x_conv = Concatenate()([x_conv1x1, x_conv3x3_1, x_conv3x3_5, x_conv5x5_1, x_conv5x5_5])

    x_short = Conv2D(base_filters + 64, (1, 1), use_bias=False, activation=None,
                     kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_short)
    x_conv = Conv2D(base_filters + 64, (1, 1), use_bias=False, activation=None,
                    kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x_conv)

    x = Add()([x_short, x_conv])
    return Activation("relu")(x) 
Example 19
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: pspnets.py    Apache License 2.0 5 votes vote down vote up
def pyramid_scene_pooling(inputs,
                          feature_map_shape,
                          weight_decay=1e-4,
                          kernel_initializer="he_normal",
                          bn_epsilon=1e-3,
                          bn_momentum=0.99):
    """ PSP module.
    :param inputs: 4-D tensor, shape of (batch_size, height, width, channel).
    :param feature_map_shape: tuple, target shape of feature map.
    :param weight_decay: float, default 1e-4.
    :param kernel_initializer: string, default "he_normal".
    :param bn_epsilon: float, default 1e-3.
    :param bn_momentum: float, default 0.99.

    :return: 4-D tensor, shape of (batch_size, height, width, channel).
    """
    interp_block1 = interp_block(inputs, feature_map_shape, level=1,
                                 weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                                 bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    interp_block2 = interp_block(inputs, feature_map_shape, level=2,
                                 weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                                 bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    interp_block3 = interp_block(inputs, feature_map_shape, level=3,
                                 weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                                 bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    interp_block6 = interp_block(inputs, feature_map_shape, level=6,
                                 weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                                 bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)

    return Concatenate()([interp_block1, interp_block2, interp_block3, interp_block6]) 
Example 20
Project: rnn-bitflyer-predictor   Author: GINK03   File: 400-model.py    MIT License 5 votes vote down vote up
def getModel():
  TIME_SIZE     = 20
  PERD_SIZE     = 14
  input_tensor1 = Input(shape=(1, 20))
  x1             = Bi(LSTM(256, dropout=0.0, recurrent_dropout=0.1, activation='relu', recurrent_activation='tanh', return_sequences=False))(input_tensor1)

  input_tensor2 = Input(shape=(5, 20))
  x2             = Bi(LSTM(256, dropout=0.0, recurrent_dropout=0.2, activation='relu', recurrent_activation='tanh', return_sequences=False))(input_tensor2)
  
  input_tensor3 = Input(shape=(5, 20))
  x3             = Bi(LSTM(256, dropout=0.0, recurrent_dropout=0.2, activation='relu', recurrent_activation='tanh', return_sequences=False))(input_tensor3)
  
  x             = Concatenate(axis=1)([x1, x2, x3])
  x             = Dense(1024, activation='relu')(x)
  #x             = x1
  print(x.shape)
  x             = RepeatVector(14)(x)
  x             = Bi(LSTM(256, return_sequences=True))(x)
  x             = Dense(256, activation='relu')(x)
  x             = Dense(1, activation='linear')(x)
  prediction    = Reshape( (1, 14) )(x)

  print(prediction.shape)
  model         = Model([input_tensor1, input_tensor2, input_tensor3], prediction)
  #model.compile(Adam(), loss=root_mean_squared_error)
  model.compile(Adam(), loss='mae')
  return model 
Example 21
Project: deep_pt_srl   Author: dfalci   File: lstm_model.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def create(self, tokenMatrix, predMatrix):

        nn = None

        inputSentence = Input(shape=(None,), dtype='int32', name='InputSentence')
        inputAux = Input(shape=(None,), batch_shape=(None, None, 5), name='InputAux')
        inputPredicate = Input(shape=(None,), dtype='int32', name='InputPredicate')

        embedding = Embedding(tokenMatrix.shape[0], self.config.embeddingSize, weights=[tokenMatrix], trainable=self.config.trainableEmbeddings, name='Embedding')(inputSentence)
        embeddingPredicate = Embedding(predMatrix.shape[0], self.config.embeddingSize,  weights=[predMatrix], trainable=self.config.trainableEmbeddings, name='EmbeddingPred')(inputPredicate)

        conc = Concatenate(axis=-1, name='concatenate')([embedding, embeddingPredicate, inputAux])

        bi = Bidirectional(LSTM(self.config.lstmCells, activation=self.config.activation, recurrent_activation=self.config.recurrentActivation, recurrent_dropout=self.config.recurrentDropout, dropout=self.config.dropout, return_sequences=True))(conc)
        bi = Dropout(self.config.dropout)(bi)

        bi = Bidirectional(LSTM(self.config.lstmCells, activation=self.config.activation, recurrent_activation=self.config.recurrentActivation, recurrent_dropout=self.config.recurrentDropout, dropout=self.config.dropout, return_sequences=True))(bi)
        bi = Dropout(self.config.dropout)(bi)

        bi = Bidirectional(LSTM(self.config.lstmCells, activation=self.config.activation, recurrent_activation=self.config.recurrentActivation, recurrent_dropout=self.config.recurrentDropout, dropout=self.config.dropout, return_sequences=True))(bi)
        bi = Dropout(self.config.dropout)(bi)

        bi = Bidirectional(LSTM(self.config.lstmCells, activation=self.config.activation, recurrent_activation=self.config.recurrentActivation, recurrent_dropout=self.config.recurrentDropout, dropout=self.config.dropout, return_sequences=True))(bi)
        bi = Dropout(self.config.dropout)(bi)

        output = TimeDistributed(Dense(units=self.config.classes, activation='softmax'), name='output')(bi)

        nn = Model(inputs=[inputSentence, inputPredicate, inputAux], outputs=[output])

        nn.compile(optimizer=self.config.optimizer, loss=self.config.lossFunction, metrics=['accuracy'])
        return nn 
Example 22
Project: Malware-GAN   Author: yanminglai   File: MalGAN_v1.py    GNU General Public License v3.0 5 votes vote down vote up
def build_generator(self):

        example = Input(shape=(self.apifeature_dims,))
        noise = Input(shape=(self.apifeature_dims,))
        x = Concatenate([example, noise], axis=1)
        for dim in self.generator_layers[1:]:
            x = Dense(dim)(x)
            x = Activation(activation='Sigmoid')(x)
        x = Maximum([example, x])
        generator = Model([example, noise], x, name='generator')
        generator.summary()
        return generator 
Example 23
Project: Malware-GAN   Author: yanminglai   File: MalGAN_v2.py    GNU General Public License v3.0 5 votes vote down vote up
def build_generator(self):

        example = Input(shape=(self.apifeature_dims,))
        noise = Input(shape=(self.z_dims,))
        x = Concatenate(axis=1)([example, noise])
        for dim in self.generator_layers[1:]:
            x = Dense(dim)(x)
        x = Activation(activation='sigmoid')(x)
        x = Maximum()([example, x])
        generator = Model([example, noise], x, name='generator')
        generator.summary()
        return generator 
Example 24
Project: Malware-GAN   Author: yanminglai   File: exp.py    GNU General Public License v3.0 5 votes vote down vote up
def build_generator(self):

        example = Input(shape=(self.apifeature_dims,))
        noise = Input(shape=(self.z_dims,))
        x = Concatenate(axis=1)([example, noise])
        for dim in self.generator_layers[1:]:
            x = Dense(dim)(x)
            x = Activation(activation='sigmoid')(x)
        x = Maximum()([example, x])
        generator = Model([example, noise], x, name='generator')
        generator.summary()
        return generator 
Example 25
Project: Malware-GAN   Author: yanminglai   File: MalGAN__v3.py    GNU General Public License v3.0 5 votes vote down vote up
def build_generator(self):

        example = Input(shape=(self.apifeature_dims,))
        noise = Input(shape=(self.z_dims,))
        x = Concatenate(axis=1)([example, noise])
        for dim in self.generator_layers[1:]:
            x = Dense(dim)(x)
        x = Activation(activation='sigmoid')(x)
        x = Maximum()([example, x])
        generator = Model([example, noise], x, name='generator')
        generator.summary()
        return generator 
Example 26
Project: lrn   Author: bzhangGo   File: models.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build(self):
        # build word embedding
        word_ids = Input(batch_shape=(None, None), dtype='int32', name='word_input')
        print(self._embeddings)
        if self._embeddings is None:
            word_embeddings = Embedding(input_dim=self._word_vocab_size,
                                        output_dim=self._word_embedding_dim,
                                        mask_zero=True,
                                        name='word_embedding', trainable=False)(word_ids)
        else:
            word_embeddings = Embedding(input_dim=self._embeddings.shape[0],
                                        output_dim=self._embeddings.shape[1],
                                        mask_zero=True,
                                        weights=[self._embeddings],
                                        name='word_embedding', trainable=False)(word_ids)

        # build character based word embedding
        char_ids = Input(batch_shape=(None, None, None), dtype='int32', name='char_input')
        char_embeddings = Embedding(input_dim=self._char_vocab_size,
                                    output_dim=self._char_embedding_dim,
                                    mask_zero=True,
                                    name='char_embedding')(char_ids)
        char_embeddings = TimeDistributed(Bidirectional(rnn.get_rnn_layer(
            self._cell_type, self._char_lstm_size, return_sequences=False)))(char_embeddings)

        elmo_embeddings = Input(shape=(None, 1024), dtype='float32')

        word_embeddings = Concatenate()([word_embeddings, char_embeddings, elmo_embeddings])

        word_embeddings = Dropout(self._dropout)(word_embeddings)
        z = Bidirectional(rnn.get_rnn_layer(
            self._cell_type, self._word_lstm_size, return_sequences=True))(word_embeddings)
        z = Dense(self._fc_dim, activation='tanh')(z)

        crf = CRF(self._num_labels, sparse_target=False)
        loss = crf.loss_function
        pred = crf(z)

        model = Model(inputs=[word_ids, char_ids, elmo_embeddings], outputs=pred)

        return model, loss 
Example 27
Project: BipedalWalker-v2   Author: Kyziridis   File: DQNlalala.py    GNU General Public License v2.0 5 votes vote down vote up
def Critic(self):                     
        # Build Network for Critic       
        #input=[lidar_input,state_input,action_input]
        lidar_input = Input(shape=(self.nx_lidar,))
        lidar_conv = Dense(self.layers[0], activation='relu', kernel_regularizer=self.k_r)(lidar_input)
        #flat= Flatten()(lidar_conv)
        
        state_input = Input(shape=(self.nx_obs,))
        state_h1 = Dense(self.layers[0], activation='relu', kernel_regularizer=self.k_r)(state_input)
        #state_h1 = Flatten()(state_h1)
               
        merge1 = Concatenate()([lidar_conv,state_h1])
        #merged_dense = Dense(self.layers[0], activation='relu')(merge1)

        action_input = Input(shape=(self.ny,))
        #action_h1    = Dense(64, activation='relu')(action_input)
        
        merge2 = Concatenate()([merge1,action_input])
        #merge2reshaped = Reshape((320,1))(merge2)
        merge_lstm = Dense(self.layers[1], activation='relu')(merge2)
        output= Dense(1,activation='linear', kernel_initializer=self.k_init,\
                      bias_initializer=self.b_init)(merge_lstm)
        
        model  = Model(input=[lidar_input,state_input,action_input], output=output)
        adam  = Adam(lr=self.lr_critic)
        model.compile(loss="mse", optimizer=adam)
        return lidar_input,state_input, action_input, model 
Example 28
Project: BipedalWalker-v2   Author: Kyziridis   File: actor_Lstm.py    GNU General Public License v2.0 5 votes vote down vote up
def Critic(self):                     
        # Build Network for Critic       
        #input=[lidar_input,state_input,action_input]
        lidar_input = Input(shape=(self.nx_lidar,1))
        lidar_conv = Conv1D(64, 4, activation='relu',input_shape=(self.nx_lidar,1))(lidar_input)
        pool = MaxPooling1D(4)(lidar_conv)
        flat= Flatten()(pool)
        
        state_input = Input(shape=(self.nx_obs,))
        state_h1 = Dense(192, activation='relu')(state_input)
        
        action_input = Input(shape=(self.ny,))
        action_h1    = Dense(64, activation='relu')(action_input)
        
        merge1 = Concatenate()([flat,state_h1])
        merged_dense = Dense(256, activation='relu')(merge1)
        
        merge2 = Concatenate()([merged_dense,action_h1])
        merge2reshaped = Reshape((320,1))(merge2)
        merge_lstm = LSTM(320, activation='relu',input_shape=(1,320,1))(merge2reshaped)
        output= Dense(1,activation='linear')(merge_lstm)
        
        model  = Model(input=[lidar_input,state_input,action_input], output=output)
        adam  = Adam(lr=self.lr_critic)
        model.compile(loss="mse", optimizer=adam)
        return lidar_input,state_input, action_input, model 
Example 29
Project: BipedalWalker-v2   Author: Kyziridis   File: ddpg_batch.py    GNU General Public License v2.0 5 votes vote down vote up
def Actor(self):
        # Build Network for Actor
        #input=[lidar_input,state_input]
        old_act = Input(shape=(4,))
        advantage = Input(shape=(1,))
        #
        lidar_input = Input(shape=(self.nx_lidar,))
        lidar_conv = Dense(self.layers[0], activation='relu', kernel_regularizer=self.k_r)(lidar_input)
        #pool = MaxPooling1D(4)(lidar_conv)
        #flat = Flatten()(lidar_conv)
        lidar_conv = Dropout(0.05)(lidar_conv)
        #
        state_input = Input(shape=(self.nx_obs,))
        state_h1 = Dense(self.layers[0], activation='relu', kernel_regularizer=self.k_r)(state_input)
        #state_h1 = GaussianNoise(1.0)(state_h1)
        #gauss = Flatten()(gauss)
        #
        merged = Concatenate()([lidar_conv,state_h1])
        if self.lstm:
            merged = Reshape((self.layers[0]*2,1))(merged)
            merged_lstm = LSTM(self.layers[1],activation='relu',kernel_regularizer=self.k_r ,\
                                kernel_initializer=self.initializer)(merged)
        else:
            merged_lstm = Dense(self.layers[1],activation='relu',kernel_regularizer=self.k_r ,\
                                kernel_initializer=self.initializer)(merged)

        output = Dense(self.ny, activation='tanh', kernel_regularizer=self.k_r,\
                       kernel_initializer=self.final_initializer)(merged_lstm)

        ##############
        model = Model(input=[old_act,advantage,lidar_input,state_input], output=output)
        adam = Adam(lr=self.lr_actor, decay=0.5)
        model.compile(loss=actor_loss(advantage,old_act), optimizer=adam)
        return lidar_input,state_input, model 
Example 30
Project: fake-news-detector   Author: shibing624   File: textcnn_model.py    Apache License 2.0 5 votes vote down vote up
def create_model(self):
        print("Creating text CNN Model...")
        # a tensor
        inputs = Input(shape=(self.max_len,), dtype='int32')
        # emb
        embedding = Embedding(input_dim=self.vocabulary_size,
                              output_dim=self.embedding_dim,
                              input_length=self.max_len,
                              name="embedding")(inputs)
        # convolution block
        conv_blocks = []
        for sz in self.filter_sizes:
            conv = Convolution1D(filters=self.num_filters,
                                 kernel_size=int(sz),
                                 strides=1,
                                 padding='valid',
                                 activation='relu')(embedding)
            conv = MaxPooling1D()(conv)
            conv = Flatten()(conv)
            conv_blocks.append(conv)
        conv_concate = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
        dropout_layer = Dropout(rate=self.dropout)(conv_concate)
        output = Dense(self.hidden_dim, activation='relu')(dropout_layer)
        output = Dense(self.num_classes, activation='softmax')(output)
        # model
        model = Model(inputs=inputs, outputs=output)
        model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
        model.summary()
        return model 
Example 31
Project: CDRextraction   Author: Xls1994   File: can_model.py    Apache License 2.0 5 votes vote down vote up
def CAN(maxlen,max_features,embedding_size,WordEM,
                maxlen2,max_features2,embedding_size2,WordEM2):

    nb_filter = 100
    filter_sizes = (1,2,3,4)
    convs = []
    left_input = Input(shape=(maxlen,), dtype='int32', name='left_input')
    inputs = Embedding(max_features, embedding_size, input_length=maxlen,
                       weights=[WordEM],
                     )(left_input)
    right_input = Input(shape=(maxlen2,), dtype='int32', name='right_input')
    right = Embedding(max_features2, embedding_size2, input_length=maxlen2,trainable=True,
                      weights=[WordEM2],

                      )(right_input)
    for fsz in filter_sizes:
        conv = Conv1D(filters=nb_filter,
                      kernel_size=fsz,
                      padding='valid',
                      activation='relu',
                      )(inputs)

        # phrase-level attention
        pool =AttMemoryLayer(name='patt_'+str(fsz))([right,conv])
        relation =Flatten()(right)
        pool =Concatenate(1)([pool,relation])
        convs.append(pool)
    if len(filter_sizes) > 1:
        out = Concatenate(axis=1)(convs)
    else:
        out = convs[0]

   
    out =Dense(100,activation='relu')(out)
    out =Dropout(0.5)(out)
    out =Dense(50,activation='relu')(out)
    out =Dropout(0.5)(out)
    predict = Dense(2, activation='softmax')(out)

    model = Model(inputs=[left_input, right_input], outputs=predict)
    return model 
Example 32
Project: tensorflow-nlp-examples   Author: Hironsan   File: model.py    MIT License 5 votes vote down vote up
def build(self):
        left_context = Input(batch_shape=(None, None), dtype='int32')
        mention = Input(batch_shape=(None, None), dtype='int32')
        mention_char = Input(batch_shape=(None, None, None), dtype='int32')
        right_context = Input(batch_shape=(None, None), dtype='int32')

        embeddings = Embedding(input_dim=self._embeddings.shape[0],
                               output_dim=self._embeddings.shape[1],
                               mask_zero=True,
                               weights=[self._embeddings])
        left_embeddings = embeddings(left_context)
        mention_embeddings = embeddings(mention)
        right_embeddings = embeddings(right_context)
        char_embeddings = Embedding(input_dim=self._char_vocab_size,
                                    output_dim=self._char_emb_size,
                                    mask_zero=True
                                    )(mention_char)

        char_embeddings = TimeDistributed(Bidirectional(LSTM(self._char_lstm_units)))(char_embeddings)
        mention_embeddings = Concatenate(axis=-1)([mention_embeddings, char_embeddings])

        x1 = Bidirectional(LSTM(units=self._word_lstm_units))(left_embeddings)
        x2 = Bidirectional(LSTM(units=self._word_lstm_units))(mention_embeddings)
        x3 = Bidirectional(LSTM(units=self._word_lstm_units))(right_embeddings)

        x = Concatenate()([x1, x2, x3])
        x = BatchNormalization()(x)
        x = Dense(self._word_lstm_units, activation='tanh')(x)
        pred = Dense(self._num_labels, activation='softmax')(x)

        model = Model(inputs=[left_context, mention, mention_char, right_context], outputs=[pred])

        return model 
Example 33
Project: ecir2019-qac   Author: jantrienes   File: baseline_cnn_cv.py    MIT License 5 votes vote down vote up
def build_model(dropout_prob, filter_sizes, num_filters, hidden_dims, optimizer, lr, decay):
    input_shape = (SEQUENCE_LENGTH, EMBEDDING_DIM)
    model_input = Input(shape=input_shape)

    z = model_input
    z = Dropout(dropout_prob[0])(z)

    # Convolutional block
    conv_blocks = []
    for sz in filter_sizes:
        conv = Convolution1D(filters=num_filters,
                             kernel_size=sz,
                             padding="valid",
                             activation="relu",
                             strides=1)(z)
        conv = MaxPooling1D(pool_size=2)(conv)
        conv = Flatten()(conv)
        conv_blocks.append(conv)
    z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]

    z = Dropout(dropout_prob[1])(z)

    for dim in hidden_dims:
        z = Dense(dim, activation="relu")(z)
    model_output = Dense(1, activation="sigmoid")(z)

    model = Model(model_input, model_output)

    opt = None
    if optimizer == 'adam':
        opt = Adam(lr=lr, decay=decay)
    else:
        raise ValueError('Unknown optimizer {}'.format(optimizer))

    model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

    return model 
Example 34
Project: faceswap   Author: deepfakes   File: dlight.py    GNU General Public License v3.0 5 votes vote down vote up
def encoder(self):
        """ DeLight Encoder Network """
        input_ = Input(shape=self.input_shape)
        var_x = input_

        var_x1 = self.blocks.conv(var_x, self.encoder_filters // 2)
        var_x2 = AveragePooling2D()(var_x)
        var_x2 = LeakyReLU(0.1)(var_x2)
        var_x = Concatenate()([var_x1, var_x2])

        var_x1 = self.blocks.conv(var_x, self.encoder_filters)
        var_x2 = AveragePooling2D()(var_x)
        var_x2 = LeakyReLU(0.1)(var_x2)
        var_x = Concatenate()([var_x1, var_x2])

        var_x1 = self.blocks.conv(var_x, self.encoder_filters * 2)
        var_x2 = AveragePooling2D()(var_x)
        var_x2 = LeakyReLU(0.1)(var_x2)
        var_x = Concatenate()([var_x1, var_x2])

        var_x1 = self.blocks.conv(var_x, self.encoder_filters * 4)
        var_x2 = AveragePooling2D()(var_x)
        var_x2 = LeakyReLU(0.1)(var_x2)
        var_x = Concatenate()([var_x1, var_x2])

        var_x1 = self.blocks.conv(var_x, self.encoder_filters * 8)
        var_x2 = AveragePooling2D()(var_x)
        var_x2 = LeakyReLU(0.1)(var_x2)
        var_x = Concatenate()([var_x1, var_x2])

        var_x = Dense(self.encoder_dim)(Flatten()(var_x))
        var_x = Dropout(0.05)(var_x)
        var_x = Dense(4 * 4 * 1024)(var_x)
        var_x = Dropout(0.05)(var_x)
        var_x = Reshape((4, 4, 1024))(var_x)

        return KerasModel(input_, var_x) 
Example 35
Project: cifar-10-cnn   Author: BIGBALLON   File: densenet_multi_gpu.py    MIT License 5 votes vote down vote up
def to_multi_gpu(model, n_gpus=2):
    if n_gpus ==1:
        return model
    
    with tf.device('/cpu:0'):
        x = Input(model.input_shape[1:])
    towers = []
    for g in range(n_gpus):
        with tf.device('/gpu:' + str(g)):
            slice_g = Lambda(slice_batch, lambda shape: shape, arguments={'n_gpus':n_gpus, 'part':g})(x)
            towers.append(model(slice_g))

    with tf.device('/cpu:0'):
        merged = Concatenate(axis=0)(towers)
    return Model(inputs=[x], outputs=merged) 
Example 36
Project: ncc   Author: spcl   File: train_task_devmap.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def init(self, seed: int, maxlen: int, embedding_dim: int, dense_layer_size: int):
        from keras.layers import Input, LSTM, Dense
        from keras.layers.merge import Concatenate
        from keras.layers.normalization import BatchNormalization
        from keras.models import Model

        np.random.seed(seed)

        # Keras model
        inp = Input(shape=(maxlen, embedding_dim,), dtype="float32", name="code_in")
        x = LSTM(embedding_dim, implementation=1, return_sequences=True, name="lstm_1")(inp)
        x = LSTM(embedding_dim, implementation=1, name="lstm_2")(x)
        langmodel_out = Dense(2, activation="sigmoid")(x)

        # Auxiliary inputs. wgsize and dsize.
        auxiliary_inputs = Input(shape=(2,))
        x = Concatenate()([auxiliary_inputs, x])
        x = BatchNormalization()(x)
        x = Dense(dense_layer_size, activation="relu")(x)
        out = Dense(2, activation="sigmoid")(x)

        self.model = Model(inputs=[auxiliary_inputs, inp], outputs=[out, langmodel_out])
        self.model.compile(
            optimizer="adam",
            metrics=['accuracy'],
            loss=["categorical_crossentropy", "categorical_crossentropy"],
            loss_weights=[1., .2])
        print('\tbuilt Keras model')

        return self 
Example 37
Project: phinet   Author: sremedios   File: phinet.py    MIT License 4 votes vote down vote up
def phinet(n_classes, n_channels=1, learning_rate=1e-3):

    inputs = Input(shape=(None,None,None,n_channels))

    x = Conv3D(8, (3,3,3), strides=(2,2,2), padding='same')(inputs)
    x = MaxPooling3D(pool_size=(3,3,3), strides=(1,1,1), padding='same')(x)

    x = Conv3D(16, (3,3,3), strides=(2,2,2), padding='same')(x)
    x = BatchNormalization()(x)
    y = Activation('relu')(x)
    x = Conv3D(16, (3,3,3), strides=(1,1,1), padding='same')(y)
    x = BatchNormalization()(x)
    x = add([x, y])
    x = Activation('relu')(x)

    # this block will pool a handful of times to get the "big picture" 
    y = MaxPooling3D(pool_size=(5,5,5), strides=(2,2,2), padding='same')(inputs)
    y = AveragePooling3D(pool_size=(3,3,3), strides=(2,2,2), padding='same')(y)
    y = Conv3D(16, (3,3,3), strides=(1,1,1), padding='same')(y)

    # this layer will preserve original signal
    z = Conv3D(8, (3,3,3), strides=(2,2,2), padding='same')(inputs)
    z = Conv3D(12, (3,3,3), strides=(2,2,2), padding='same')(z)
    z = Conv3D(16, (3,3,3), strides=(1,1,1), padding='same')(z)

    x = Concatenate(axis=4)([x, y, z])

    # global avg pooling before FC
    x = GlobalAveragePooling3D()(x)
    x = Dense(n_classes)(x)

    pred = Activation('softmax')(x)
    
    model = Model(inputs=inputs, outputs=pred)

    model.compile(optimizer=Adam(lr=learning_rate),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    print(model.summary())

    return model 
Example 38
Project: pre-trained-keras-example   Author: innolitics   File: train.py    MIT License 4 votes vote down vote up
def get_model(pretrained_model, all_character_names):
    if pretrained_model == 'inception':
        model_base = keras.applications.inception_v3.InceptionV3(include_top=False, input_shape=(*IMG_SIZE, 3), weights='imagenet')
        output = Flatten()(model_base.output)
    elif pretrained_model == 'xception':
        model_base = keras.applications.xception.Xception(include_top=False, input_shape=(*IMG_SIZE, 3), weights='imagenet')
        output = Flatten()(model_base.output)
    elif pretrained_model == 'resnet50':
        model_base = keras.applications.resnet50.ResNet50(include_top=False, input_shape=(*IMG_SIZE, 3), weights='imagenet')
        output = Flatten()(model_base.output)
    elif pretrained_model == 'vgg19':
        model_base = keras.applications.vgg19.VGG19(include_top=False, input_shape=(*IMG_SIZE, 3), weights='imagenet')
        output = Flatten()(model_base.output)
    elif pretrained_model == 'all':
        input = Input(shape=(*IMG_SIZE, 3))
        inception_model = keras.applications.inception_v3.InceptionV3(include_top=False, input_tensor=input, weights='imagenet')
        xception_model = keras.applications.xception.Xception(include_top=False, input_tensor=input, weights='imagenet')
        resnet_model = keras.applications.resnet50.ResNet50(include_top=False, input_tensor=input, weights='imagenet')

        flattened_outputs = [Flatten()(inception_model.output),
                             Flatten()(xception_model.output),
                             Flatten()(resnet_model.output)]
        output = Concatenate()(flattened_outputs)
        model_base = Model(input, output)

    output = BatchNormalization()(output)
    output = Dropout(0.5)(output)
    output = Dense(128, activation='relu')(output)
    output = BatchNormalization()(output)
    output = Dropout(0.5)(output)
    output = Dense(len(all_character_names), activation='softmax')(output)
    model = Model(model_base.input, output)
    for layer in model_base.layers:
        layer.trainable = False
    model.summary(line_length=200)

    # Generate a plot of a model
    import pydot
    pydot.find_graphviz = lambda: True
    from keras.utils import plot_model
    plot_model(model, show_shapes=True, to_file='../model_pdfs/{}.pdf'.format(pretrained_model))

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model 
Example 39
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: deeplabs.py    Apache License 2.0 4 votes vote down vote up
def Deeplab_v3p(input_shape,
                n_class,
                encoder_name,
                encoder_weights=None,
                weight_decay=1e-4,
                kernel_initializer="he_normal",
                bn_epsilon=1e-3,
                bn_momentum=0.99):
    """ implementation of Deeplab v3+ for semantic segmentation.
        ref: Chen et al. Chen L C, Zhu Y, Papandreou G, et al. Encoder-Decoder with Atrous Separable
             Convolution for Semantic Image Segmentation[J]. arXiv preprint arXiv:1802.02611, 2018.,
             2018, arXiv:1802.02611.
    :param input_shape: tuple, i.e., (height, width, channel).
    :param n_class: int, number of class, must >= 2.
    :param encoder_name: string, name of encoder.
    :param encoder_weights: string, path of weights, default None.
    :param weight_decay: float, default 1e-4.
    :param kernel_initializer: string, default "he_normal".
    :param bn_epsilon: float, default 1e-3.
    :param bn_momentum: float, default 0.99.

    :return: a Keras Model instance.
    """
    encoder = build_encoder(input_shape, encoder_name, encoder_weights=encoder_weights,
                            weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                            bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    net = encoder.get_layer(scope_table[encoder_name]["pool4"]).output
    net = atrous_spatial_pyramid_pooling(net, n_filters=256, rates=[6, 12, 18], imagelevel=True,
                                         weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                                         bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    net = Conv2D(256, (1, 1), use_bias=False, activation=None, kernel_regularizer=l2(weight_decay),
                 kernel_initializer=kernel_initializer)(net)
    net = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(net)
    net = Activation("relu")(net)
    net = Dropout(0.1)(net)
    decoder_features = BilinearUpSampling(target_size=(input_shape[0] // 4, input_shape[1] // 4))(net)

    encoder_features = encoder.get_layer(scope_table[encoder_name]["pool2"]).output
    encoder_features = Conv2D(48, (1, 1), use_bias=False, activation=None, kernel_regularizer=l2(weight_decay),
                              kernel_initializer=kernel_initializer)(encoder_features)
    encoder_features = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(encoder_features)
    encoder_features = Activation("relu")(encoder_features)
    net = Concatenate()([encoder_features, decoder_features])

    net = separable_conv_bn(net, 256, 'decoder_conv1', depth_activation=True,
                            weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                            bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    net = separable_conv_bn(net, 256, 'decoder_conv2', depth_activation=True,
                            weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                            bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    net = Dropout(0.1)(net)

    net = BilinearUpSampling(target_size=(input_shape[0], input_shape[1]))(net)
    output = Conv2D(n_class, (1, 1), activation=None, kernel_regularizer=l2(weight_decay),
                    kernel_initializer=kernel_initializer)(net)
    output = Activation("softmax")(output)

    return Model(encoder.input, output) 
Example 40
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: srinets.py    Apache License 2.0 4 votes vote down vote up
def sri_net(input_shape,
            n_class,
            encoder_name="resnet_v2_101",
            encoder_weights=None,
            weight_decay=1e-4,
            kernel_initializer="he_normal",
            bn_epsilon=1e-3,
            bn_momentum=0.99):
    """ spatial residual inception net.
    :param input_shape: tuple, i.e., (height, width, channel).
    :param n_class: int, number of classes, at least 2.
    :param encoder_name: string, default "resnet_v2_101".
    :param encoder_weights: string, path of weights.
    :param weight_decay: float, default 1e-4.
    :param kernel_initializer: string, default "he_normal".
    :param bn_epsilon: float, default 1e-3.
    :param bn_momentum: float, default 0.99.

    :return: a Keras Model instance.
    """
    encoder = build_encoder(input_shape=input_shape, encoder_name=encoder_name, encoder_weights=encoder_weights,
                            weight_decay=weight_decay, kernel_initializer=kernel_initializer,
                            bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    p2 = encoder.get_layer(scope_table[encoder_name]["pool2"]).output  # 64 channels
    p3 = encoder.get_layer(scope_table[encoder_name]["pool3"]).output  # 256 channels
    p4 = encoder.get_layer(scope_table[encoder_name]["pool4"]).output  # 512 channels

    # 32->64
    net = spatial_residual_inception_v2(p4, 192, weight_decay=weight_decay, kernel_initializer=kernel_initializer)
    net = BilinearUpSampling(target_size=(input_shape[0] // 8, input_shape[0] // 8))(net)

    # 64->128
    p3 = Conv2D(int(net.shape[-1]//4), (1, 1), use_bias=False, activation=None,
                kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(p3)
    p3 = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(p3)
    p3 = Activation("relu")(p3)
    net = Concatenate()([net, p3])
    net = spatial_residual_inception_v2(net, 192, weight_decay=weight_decay, kernel_initializer=kernel_initializer)
    net = BilinearUpSampling(target_size=(input_shape[0] // 4, input_shape[0] // 4))(net)

    # 128->512
    p2 = Conv2D(int(net.shape[-1]//4), (1, 1), use_bias=False, activation=None,
                kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(p2)
    p2 = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(p2)
    p2 = Activation("relu")(p2)
    net = Concatenate()([net, p2])
    net = spatial_residual_inception_v2(net, 192, weight_decay=weight_decay, kernel_initializer=kernel_initializer)
    net = BilinearUpSampling(target_size=(input_shape[0], input_shape[1]))(net)

    net = Conv2D(256, (3, 3), use_bias=False, activation=None, padding="same",
                 kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(net)
    net = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(net)
    net = Activation("relu")(net)

    output = Conv2D(n_class, (1, 1), activation=None,
                    kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(net)
    output = Activation("softmax")(output)

    return Model(encoder.input, output) 
Example 41
Project: lrn   Author: bzhangGo   File: models.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def build(self):
        # build word embedding
        word_ids = Input(batch_shape=(None, None), dtype='int32', name='word_input')
        inputs = [word_ids]
        if self._embeddings is None:
            word_embeddings = Embedding(input_dim=self._word_vocab_size,
                                        output_dim=self._word_embedding_dim,
                                        mask_zero=True,
                                        name='word_embedding')(word_ids)
        else:
            word_embeddings = Embedding(input_dim=self._embeddings.shape[0],
                                        output_dim=self._embeddings.shape[1],
                                        mask_zero=True,
                                        weights=[self._embeddings],
                                        name='word_embedding')(word_ids)

        # build character based word embedding
        if self._use_char:
            char_ids = Input(batch_shape=(None, None, None), dtype='int32', name='char_input')
            inputs.append(char_ids)
            char_embeddings = Embedding(input_dim=self._char_vocab_size,
                                        output_dim=self._char_embedding_dim,
                                        mask_zero=True,
                                        name='char_embedding')(char_ids)
            char_embeddings = TimeDistributed(Bidirectional(rnn.get_rnn_layer(
                self._cell_type, self._char_lstm_size, return_sequences=False)))(char_embeddings)
            word_embeddings = Concatenate()([word_embeddings, char_embeddings])

        word_embeddings = Dropout(self._dropout)(word_embeddings)
        z = Bidirectional(rnn.get_rnn_layer(
            self._cell_type, self._word_lstm_size, return_sequences=True))(word_embeddings)
        z = Dense(self._fc_dim, activation='tanh')(z)

        if self._use_crf:
            crf = CRF(self._num_labels, sparse_target=False)
            loss = crf.loss_function
            pred = crf(z)
        else:
            loss = 'categorical_crossentropy'
            pred = Dense(self._num_labels, activation='softmax')(z)

        model = Model(inputs=inputs, outputs=pred)

        return model, loss 
Example 42
Project: CDRextraction   Author: Xls1994   File: can_model.py    Apache License 2.0 4 votes vote down vote up
def CAN_V2 (maxlen,max_features,embedding_size,WordEM,
                maxlen2,max_features2,embedding_size2,WordEM2):
    nb_filter = 100
    filter_sizes = (1,2,3,4)
    convs = []
    left_input = Input(shape=(maxlen,), dtype='int32', name='left_input')
    inputs = Embedding(max_features, embedding_size, input_length=maxlen,
                       weights=[WordEM],
                     )(left_input)
    right_input = Input(shape=(maxlen2,), dtype='int32', name='right_input')
    right = Embedding(max_features2, embedding_size2, input_length=maxlen2,trainable=True,
                      weights=[WordEM2],

                      )(right_input)
    for fsz in filter_sizes:
        conv = Conv1D(filters=nb_filter,
                      kernel_size=fsz,
                      padding='valid',
                      activation='relu',
                      )(inputs)

        # phrase-level attention
        relation = Flatten()(right)
        emb2 = RepeatVector(maxlen-fsz+1)(relation)
        conv = concatenate([conv, emb2], 2)
        pool = AttentivePoolingLayer(name="Attention")(conv)

        convs.append(pool)
    if len(filter_sizes) > 1:
        out = Concatenate(axis=1)(convs)
    else:
        out = convs[0]

   
    out =Dense(100,activation='relu')(out)
    out =Dropout(0.5)(out)
    out =Dense(50,activation='relu')(out)
    out =Dropout(0.5)(out)
    predict = Dense(2, activation='softmax')(out)

    model = Model(inputs=[left_input, right_input], outputs=predict)
    return model 
Example 43
Project: DeepPhotoshop   Author: KhyatiGanatra   File: pconv_model_ori.py    MIT License 4 votes vote down vote up
def build_pconv_unet(self, train_bn=True, lr=0.0002):      

        # INPUTS
        inputs_img = Input((self.img_rows, self.img_cols, 3))
        inputs_mask = Input((self.img_rows, self.img_cols, 3))
        
        # ENCODER
        def encoder_layer(img_in, mask_in, filters, kernel_size, bn=True):
            conv, mask = PConv2D(filters, kernel_size, strides=2, padding='same')([img_in, mask_in])
            if bn:
                conv = BatchNormalization(name='EncBN'+str(encoder_layer.counter))(conv, training=train_bn)
            conv = Activation('relu')(conv)
            encoder_layer.counter += 1
            return conv, mask
        encoder_layer.counter = 0
        
        e_conv1, e_mask1 = encoder_layer(inputs_img, inputs_mask, 64, 7, bn=False)
        e_conv2, e_mask2 = encoder_layer(e_conv1, e_mask1, 128, 5)
        e_conv3, e_mask3 = encoder_layer(e_conv2, e_mask2, 256, 5)
        e_conv4, e_mask4 = encoder_layer(e_conv3, e_mask3, 512, 3)
        e_conv5, e_mask5 = encoder_layer(e_conv4, e_mask4, 512, 3)
        e_conv6, e_mask6 = encoder_layer(e_conv5, e_mask5, 512, 3)
        e_conv7, e_mask7 = encoder_layer(e_conv6, e_mask6, 512, 3)
        e_conv8, e_mask8 = encoder_layer(e_conv7, e_mask7, 512, 3)
        
        # DECODER
        def decoder_layer(img_in, mask_in, e_conv, e_mask, filters, kernel_size, bn=True):
            up_img = UpSampling2D(size=(2,2))(img_in)
            up_mask = UpSampling2D(size=(2,2))(mask_in)
            concat_img = Concatenate(axis=3)([e_conv,up_img])
            concat_mask = Concatenate(axis=3)([e_mask,up_mask])
            conv, mask = PConv2D(filters, kernel_size, padding='same')([concat_img, concat_mask])
            if bn:
                conv = BatchNormalization()(conv)
            conv = LeakyReLU(alpha=0.2)(conv)
            return conv, mask
            
        d_conv9, d_mask9 = decoder_layer(e_conv8, e_mask8, e_conv7, e_mask7, 512, 3)
        d_conv10, d_mask10 = decoder_layer(d_conv9, d_mask9, e_conv6, e_mask6, 512, 3)
        d_conv11, d_mask11 = decoder_layer(d_conv10, d_mask10, e_conv5, e_mask5, 512, 3)
        d_conv12, d_mask12 = decoder_layer(d_conv11, d_mask11, e_conv4, e_mask4, 512, 3)
        d_conv13, d_mask13 = decoder_layer(d_conv12, d_mask12, e_conv3, e_mask3, 256, 3)
        d_conv14, d_mask14 = decoder_layer(d_conv13, d_mask13, e_conv2, e_mask2, 128, 3)
        d_conv15, d_mask15 = decoder_layer(d_conv14, d_mask14, e_conv1, e_mask1, 64, 3)
        d_conv16, d_mask16 = decoder_layer(d_conv15, d_mask15, inputs_img, inputs_mask, 3, 3, bn=False)
        outputs = Conv2D(3, 1, activation = 'sigmoid')(d_conv16)        
        
        # Setup the model inputs / outputs
        model = Model(inputs=[inputs_img, inputs_mask], outputs=outputs)

        # Compile the model
        model.compile(
            optimizer = Adam(lr=lr),
            loss=self.loss_total(inputs_mask)
        )

        return model 
Example 44
Project: DeepPhotoshop   Author: KhyatiGanatra   File: pconv_model.py    MIT License 4 votes vote down vote up
def build_pconv_unet(self, train_bn=True, lr=0.0002):      

        # INPUTS
        inputs_img = Input((self.img_rows, self.img_cols, 3))
        inputs_mask = Input((self.img_rows, self.img_cols, 3))
        
        # ENCODER
        def encoder_layer(img_in, mask_in, filters, kernel_size, bn=True):
            conv, mask = PConv2D(filters, kernel_size, strides=2, padding='same')([img_in, mask_in])
            if bn:
                conv = BatchNormalization(name='EncBN'+str(encoder_layer.counter))(conv, training=train_bn)
            conv = Activation('relu')(conv)
            encoder_layer.counter += 1
            return conv, mask
        encoder_layer.counter = 0
        
        e_conv1, e_mask1 = encoder_layer(inputs_img, inputs_mask, 64, 7, bn=False)
        e_conv2, e_mask2 = encoder_layer(e_conv1, e_mask1, 128, 5)
        e_conv3, e_mask3 = encoder_layer(e_conv2, e_mask2, 256, 5)
        e_conv4, e_mask4 = encoder_layer(e_conv3, e_mask3, 512, 3)
        e_conv5, e_mask5 = encoder_layer(e_conv4, e_mask4, 512, 3)
        e_conv6, e_mask6 = encoder_layer(e_conv5, e_mask5, 512, 3)
        e_conv7, e_mask7 = encoder_layer(e_conv6, e_mask6, 512, 3)
        e_conv8, e_mask8 = encoder_layer(e_conv7, e_mask7, 512, 3)
        
        # DECODER
        def decoder_layer(img_in, mask_in, e_conv, e_mask, filters, kernel_size, bn=True):
            up_img = UpSampling2D(size=(2,2))(img_in)
            up_mask = UpSampling2D(size=(2,2))(mask_in)
            concat_img = Concatenate(axis=3)([e_conv,up_img])
            concat_mask = Concatenate(axis=3)([e_mask,up_mask])
            conv, mask = PConv2D(filters, kernel_size, padding='same')([concat_img, concat_mask])
            if bn:
                conv = BatchNormalization()(conv)
            conv = LeakyReLU(alpha=0.2)(conv)
            return conv, mask
            
        d_conv9, d_mask9 = decoder_layer(e_conv8, e_mask8, e_conv7, e_mask7, 512, 3)
        d_conv10, d_mask10 = decoder_layer(d_conv9, d_mask9, e_conv6, e_mask6, 512, 3)
        d_conv11, d_mask11 = decoder_layer(d_conv10, d_mask10, e_conv5, e_mask5, 512, 3)
        d_conv12, d_mask12 = decoder_layer(d_conv11, d_mask11, e_conv4, e_mask4, 512, 3)
        d_conv13, d_mask13 = decoder_layer(d_conv12, d_mask12, e_conv3, e_mask3, 256, 3)
        d_conv14, d_mask14 = decoder_layer(d_conv13, d_mask13, e_conv2, e_mask2, 128, 3)
        d_conv15, d_mask15 = decoder_layer(d_conv14, d_mask14, e_conv1, e_mask1, 64, 3)
        d_conv16, d_mask16 = decoder_layer(d_conv15, d_mask15, inputs_img, inputs_mask, 3, 3, bn=False)
        outputs = Conv2D(3, 1, activation = 'sigmoid')(d_conv16)        
        
        # Setup the model inputs / outputs
        model = Model(inputs=[inputs_img, inputs_mask], outputs=outputs)

        # Compile the model
        model.compile(
            optimizer = Adam(lr=lr),
            loss=self.loss_total(inputs_mask)
        )

        return model