Python keras.layers.merge.Concatenate() Examples

The following are 30 code examples of keras.layers.merge.Concatenate(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers.merge , or try the search function .
Example #1
Source File: DenseNet.py    From DenseNet-Cifar10 with MIT License 6 votes vote down vote up
def dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1E-4):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
    Args:
        x: keras tensor
        nb_layers: the number of layers of conv_block to append to the model.
        nb_filter: number of filters
        growth_rate: growth rate
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_dim_ordering() == "th" else -1

    feature_list = [x]

    for i in range(nb_layers):
        x = conv_block(x, growth_rate, dropout_rate, weight_decay)
        feature_list.append(x)
        x = Concatenate(axis=concat_axis)(feature_list)
        nb_filter += growth_rate

    return x, nb_filter 
Example #2
Source File: dvrk.py    From costar_plan with Apache License 2.0 6 votes vote down vote up
def MakeJigsawsMultiDecoder(model, decoder, num_images=4, h_dim=(12,16)):
    '''
    Make multiple images
    '''
    h = Input((h_dim[0], h_dim[1], 64),name="h_in")

    xs = []
    for i in range(num_images):
        xi = h
        xi = AddConv2D(xi, 64, [5, 5], stride=1,
                dropout_rate=0.)
        xi = AddConv2D(xi, model.encoder_channels, [5, 5], stride=1,
                dropout_rate=0.)
        xi = decoder(xi)
        img_x = Lambda(
            lambda y: K.expand_dims(y, 1),
            name="img_hypothesis_%d"%i)(xi)
        xs.append(img_x)
    img_out = Concatenate(axis=1)(xs)

    mm = Model(h, img_out, name="multi")
    mm.compile(loss="mae", optimizer=model.getOptimizer())

    return mm 
Example #3
Source File: model.py    From WeSTClass with Apache License 2.0 6 votes vote down vote up
def ConvolutionLayer(input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False, vocab_sz=None,
                     embedding_matrix=None, word_embedding_dim=100, hidden_dim=20, act='relu', init='ones'):
    x = Input(shape=(input_shape,), name='input')
    z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), name="embedding", 
                    weights=[embedding_matrix], trainable=word_trainable)(x)
    conv_blocks = []
    for sz in filter_sizes:
        conv = Convolution1D(filters=num_filters,
                             kernel_size=sz,
                             padding="valid",
                             activation=act,
                             strides=1,
                             kernel_initializer=init)(z)
        conv = GlobalMaxPooling1D()(conv)
        conv_blocks.append(conv)
    z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
    z = Dense(hidden_dim, activation="relu")(z)
    y = Dense(n_classes, activation="softmax")(z)
    return Model(inputs=x, outputs=y, name='classifier') 
Example #4
Source File: dense.py    From costar_plan with Apache License 2.0 6 votes vote down vote up
def GetLSTMEncoder(xin, uin, dense_size, lstm_size, dense_layers=1,
        lstm_layers=1):
    '''
    Get LSTM encoder.
    '''
    x = xin
    for _ in xrange(dense_layers):
        if uin is not None:
            x = Concatenate(axis=-1)([x, uin])
        x = TimeDistributed(Dense(dense_size))(x)
        x = TimeDistributed(Activation('relu'))(x)
    for i in xrange(lstm_layers):
        if i == lstm_layers - 1:
            sequence_out = False
        else:
            sequence_out = True
        #sequence_out = True
        x = LSTM(lstm_size, return_sequences=sequence_out)(x)
        x = Activation('relu')(x)
    return x 
Example #5
Source File: models.py    From WeSHClass with Apache License 2.0 6 votes vote down vote up
def ConvolutionLayer(x, input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False,
                     vocab_sz=None,
                     embedding_matrix=None, word_embedding_dim=100, hidden_dim=100, act='relu', init='ones'):
    if embedding_matrix is not None:
        z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,),
                      weights=[embedding_matrix], trainable=word_trainable)(x)
    else:
        z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), trainable=word_trainable)(x)
    conv_blocks = []
    for sz in filter_sizes:
        conv = Convolution1D(filters=num_filters,
                             kernel_size=sz,
                             padding="valid",
                             activation=act,
                             strides=1,
                             kernel_initializer=init)(z)
        conv = GlobalMaxPooling1D()(conv)
        conv_blocks.append(conv)
    z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
    z = Dense(hidden_dim, activation="relu")(z)
    y = Dense(n_classes, activation="softmax")(z)
    return Model(inputs=x, outputs=y) 
Example #6
Source File: BuildModel.py    From RMDL with GNU General Public License v3.0 6 votes vote down vote up
def to_multi_gpu(model, n_gpus=2):
    """
    Given a keras [model], return an equivalent model which parallelizes
    the computation over [n_gpus] GPUs.

    Each GPU gets a slice of the input batch, applies the model on that slice
    and later the outputs of the models are concatenated to a single tensor,
    hence the user sees a model that behaves the same as the original.
    """

    with tf.device('/cpu:0'):
        x = Input(model.input_shape[1:], name="input1")

    towers = []
    for g in range(n_gpus):
        with tf.device('/gpu:' + str(g)):
            slice_g = Lambda(slice_batch,
                             lambda shape: shape,
                             arguments={'n_gpus':n_gpus, 'part':g})(x)
            towers.append(model(slice_g))

    with tf.device('/cpu:0'):
        merged = Concatenate(axis=0)(towers)

    return Model(inputs=[x], outputs=[merged]) 
Example #7
Source File: models.py    From keras-image-captioning with MIT License 6 votes vote down vote up
def build(self, vocabs=None):
        if self._keras_model:
            return
        if vocabs is None and self._word_vector_init is not None:
            raise ValueError('If word_vector_init is not None, build method '
                             'must be called with vocabs that are not None!')

        image_input, image_embedding = self._build_image_embedding()
        sentence_input, word_embedding = self._build_word_embedding(vocabs)
        sequence_input = Concatenate(axis=1)([image_embedding, word_embedding])
        sequence_output = self._build_sequence_model(sequence_input)

        model = Model(inputs=[image_input, sentence_input],
                      outputs=sequence_output)
        model.compile(optimizer=Adam(lr=self._learning_rate, clipnorm=5.0),
                      loss=categorical_crossentropy_from_logits,
                      metrics=[categorical_accuracy_with_variable_timestep])

        self._keras_model = model 
Example #8
Source File: _pspnet_2.py    From image-segmentation-keras with MIT License 6 votes vote down vote up
def build_pyramid_pooling_module(res, input_shape):
    """Build the Pyramid Pooling Module."""
    # ---PSPNet concat layers with Interpolation
    feature_map_size = tuple(int(ceil(input_dim / 8.0))
                             for input_dim in input_shape)

    interp_block1 = interp_block(res, 1, feature_map_size, input_shape)
    interp_block2 = interp_block(res, 2, feature_map_size, input_shape)
    interp_block3 = interp_block(res, 3, feature_map_size, input_shape)
    interp_block6 = interp_block(res, 6, feature_map_size, input_shape)

    # concat all these layers. resulted
    # shape=(1,feature_map_size_x,feature_map_size_y,4096)
    res = Concatenate()([res,
                         interp_block6,
                         interp_block3,
                         interp_block2,
                         interp_block1])
    return res 
Example #9
Source File: demand_simulation_mnist.py    From DeepIV with MIT License 6 votes vote down vote up
def conv_embedding(images, output, other_features = [], dropout_rate=0.1,
                   embedding_dropout=0.1, embedding_l2=0.05, constrain_norm=True):
    print("Building conv net")
    x_embedding = architectures.convnet(images, Dense(64, activation='linear'),
                        dropout_rate=embedding_dropout,
                        activations='relu',
                        l2_rate=embedding_l2, constrain_norm=constrain_norm)

    if len(other_features) > 0:
        embedd = Concatenate(axis=1)([x_embedding] + other_features)
    else:
        embedd = x_embedding
    out = architectures.feed_forward_net(embedd, output,
                        hidden_layers=[32],
                        dropout_rate=dropout_rate,
                        activations='relu', constrain_norm=constrain_norm)
    return out 
Example #10
Source File: layers_builder.py    From PSPNet-Keras-tensorflow with MIT License 6 votes vote down vote up
def build_pyramid_pooling_module(res, input_shape):
    """Build the Pyramid Pooling Module."""
    # ---PSPNet concat layers with Interpolation
    feature_map_size = tuple(int(ceil(input_dim / 8.0))
                             for input_dim in input_shape)
    print("PSP module will interpolate to a final feature map size of %s" %
          (feature_map_size, ))

    interp_block1 = interp_block(res, 1, feature_map_size, input_shape)
    interp_block2 = interp_block(res, 2, feature_map_size, input_shape)
    interp_block3 = interp_block(res, 3, feature_map_size, input_shape)
    interp_block6 = interp_block(res, 6, feature_map_size, input_shape)

    # concat all these layers. resulted
    # shape=(1,feature_map_size_x,feature_map_size_y,4096)
    res = Concatenate()([res,
                         interp_block6,
                         interp_block3,
                         interp_block2,
                         interp_block1])
    return res 
Example #11
Source File: husky.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def GetHuskyPoseModel(x, num_options, pose_size,
        dropout_rate=0.5, batchnorm=True):
    '''
    Make an "actor" network that takes in an encoded image and an "option"
    label and produces the next command to execute.
    '''
    xin = Input([int(d) for d in x.shape[1:]], name="pose_h_in")
    x0in = Input([int(d) for d in x.shape[1:]], name="pose_h0_in")

    pose_in = Input((pose_size,), name="pose_pose_in")
    option_in = Input((num_options,), name="pose_o_in")
    x = xin
    x0 = x0in
    dr, bn = dropout_rate, False
    use_lrelu = False

    x = Concatenate(axis=-1)([x, x0])
    x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y = pose_in
    y = AddDense(y, 32, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y, 32, (8,8), add=False)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y2 = AddDense(option_in, 64, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y2, 64, (6,6), add=False)
    x = AddConv2D(x, 128, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    x = Flatten()(x)
    x = AddDense(x, 512, "relu", dr, output=True, bn=bn)
    x = AddDense(x, 512, "relu", dr, output=True, bn=bn)    # Same setup as the state decoders


    pose = AddDense(x, pose_size, "linear", 0., output=True)
    pose = Model([x0in, xin, option_in, pose_in], [pose], name="pose")
    return pose 
Example #12
Source File: multi.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def GetPoseModel(x, num_options, arm_size, gripper_size,
        dropout_rate=0.5, batchnorm=True):
    '''
    Make an "actor" network that takes in an encoded image and an "option"
    label and produces the next command to execute.
    '''
    img_shape = [int(d) for d in x.shape[1:]]
    img_in = Input(img_shape,name="policy_img_in")
    img0_in = Input(img_shape,name="policy_img0_in")
    arm = Input((arm_size,), name="ee_in")
    gripper = Input((gripper_size,), name="gripper_in")
    option_in = Input((48,), name="actor_o_in")

    ins = [img0_in, img_in, option_in, arm, gripper]
    x0, x = img0_in, img_in
    dr, bn = dropout_rate, False
    use_lrelu = False

    x = Concatenate(axis=-1)([x, x0])
    x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y = Concatenate()([arm, gripper])
    y = AddDense(y, 32, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y, 32, (8,8), add=False)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y2 = AddDense(option_in, 64, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y2, 64, (6,6), add=False)
    x = AddConv2D(x, 128, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    x = Flatten()(x)
    x = AddDense(x, 512, "relu", 0., output=True, bn=False)
    x = AddDense(x, 512, "relu", 0., output=True, bn=False)    # Same setup as the state decoders
    arm = AddDense(x, arm_size, "linear", 0., output=True)
    gripper = AddDense(x, gripper_size, "sigmoid", 0., output=True)
    actor = Model(ins, [arm, gripper], name="pose")
    return actor 
Example #13
Source File: planner.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def AddOptionTiling(x, option_length, option_in, height, width):
    tile_shape = (1, width, height, 1)
    option = Reshape([1,1,option_length])(option_in)
    option = Lambda(lambda x: K.tile(x, tile_shape))(option)
    x = Concatenate(
            axis=-1,
            name="add_option_%dx%d"%(width,height),
        )([x, option])
    return x 
Example #14
Source File: models.py    From delft with Apache License 2.0 5 votes vote down vote up
def __init__(self, config, ntags=None):

        # build input, directly feed with word embedding by the data generator
        word_input = Input(shape=(None, config.word_embedding_size), name='word_input')

        # build character based embedding
        char_input = Input(shape=(None, config.max_char_length), dtype='int32', name='char_input')
        char_embeddings = TimeDistributed(Embedding(input_dim=config.char_vocab_size,
                                    output_dim=config.char_embedding_size,
                                    #mask_zero=True,
                                    #embeddings_initializer=RandomUniform(minval=-0.5, maxval=0.5),
                                    name='char_embeddings'
                                    ))(char_input)

        chars = TimeDistributed(Bidirectional(LSTM(config.num_char_lstm_units, return_sequences=False)))(char_embeddings)

        # length of sequence not used for the moment (but used for f1 communication)
        length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')

        # combine characters and word embeddings
        x = Concatenate()([word_input, chars])
        x = Dropout(config.dropout)(x)

        x = Bidirectional(LSTM(units=config.num_word_lstm_units, 
                               return_sequences=True, 
                               recurrent_dropout=config.recurrent_dropout))(x)
        x = Dropout(config.dropout)(x)
        x = Dense(config.num_word_lstm_units, activation='tanh')(x)
        x = Dense(ntags)(x)
        self.crf = ChainCRF()
        pred = self.crf(x)

        self.model = Model(inputs=[word_input, char_input, length_input], outputs=[pred])
        self.config = config 
Example #15
Source File: models.py    From delft with Apache License 2.0 5 votes vote down vote up
def __init__(self, config, ntags=None):

        # build input, directly feed with word embedding by the data generator
        word_input = Input(shape=(None, config.word_embedding_size), name='word_input')

        # build character based embedding
        char_input = Input(shape=(None, config.max_char_length), dtype='int32', name='char_input')
        char_embeddings = TimeDistributed(Embedding(input_dim=config.char_vocab_size,
                                    output_dim=config.char_embedding_size,
                                    mask_zero=True,
                                    #embeddings_initializer=RandomUniform(minval=-0.5, maxval=0.5),
                                    name='char_embeddings'
                                    ))(char_input)

        chars = TimeDistributed(Bidirectional(LSTM(config.num_char_lstm_units, return_sequences=False)))(char_embeddings)

        # length of sequence not used for the moment (but used for f1 communication)
        length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')

        # combine characters and word embeddings
        x = Concatenate()([word_input, chars])
        x = Dropout(config.dropout)(x)

        x = Bidirectional(GRU(units=config.num_word_lstm_units, 
                               return_sequences=True, 
                               recurrent_dropout=config.recurrent_dropout))(x)
        x = Dropout(config.dropout)(x)
        x = Bidirectional(GRU(units=config.num_word_lstm_units, 
                               return_sequences=True, 
                               recurrent_dropout=config.recurrent_dropout))(x)
        x = Dense(config.num_word_lstm_units, activation='tanh')(x)
        x = Dense(ntags)(x)
        self.crf = ChainCRF()
        pred = self.crf(x)

        self.model = Model(inputs=[word_input, char_input, length_input], outputs=[pred])
        self.config = config 
Example #16
Source File: model.py    From tensorflow-nlp-examples with MIT License 5 votes vote down vote up
def build(self):
        left_context = Input(batch_shape=(None, None), dtype='int32')
        mention = Input(batch_shape=(None, None), dtype='int32')
        mention_char = Input(batch_shape=(None, None, None), dtype='int32')
        right_context = Input(batch_shape=(None, None), dtype='int32')

        embeddings = Embedding(input_dim=self._embeddings.shape[0],
                               output_dim=self._embeddings.shape[1],
                               mask_zero=True,
                               weights=[self._embeddings])
        left_embeddings = embeddings(left_context)
        mention_embeddings = embeddings(mention)
        right_embeddings = embeddings(right_context)
        char_embeddings = Embedding(input_dim=self._char_vocab_size,
                                    output_dim=self._char_emb_size,
                                    mask_zero=True
                                    )(mention_char)

        char_embeddings = TimeDistributed(Bidirectional(LSTM(self._char_lstm_units)))(char_embeddings)
        mention_embeddings = Concatenate(axis=-1)([mention_embeddings, char_embeddings])

        x1 = Bidirectional(LSTM(units=self._word_lstm_units))(left_embeddings)
        x2 = Bidirectional(LSTM(units=self._word_lstm_units))(mention_embeddings)
        x3 = Bidirectional(LSTM(units=self._word_lstm_units))(right_embeddings)

        x = Concatenate()([x1, x2, x3])
        x = BatchNormalization()(x)
        x = Dense(self._word_lstm_units, activation='tanh')(x)
        pred = Dense(self._num_labels, activation='softmax')(x)

        model = Model(inputs=[left_context, mention, mention_char, right_context], outputs=[pred])

        return model 
Example #17
Source File: classifiers.py    From DEXTR-KerasTensorflow with GNU General Public License v3.0 5 votes vote down vote up
def build_pyramid_pooling_module(res, input_shape, nb_classes, sigmoid=False, output_size=None):
    """Build the Pyramid Pooling Module."""
    # ---PSPNet concat layers with Interpolation
    feature_map_size = tuple(int(ceil(input_dim / 8.0)) for input_dim in input_shape)
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    print("PSP module will interpolate to a final feature map size of %s" %
          (feature_map_size, ))

    interp_block1 = psp_block(res, 1, feature_map_size, input_shape)
    interp_block2 = psp_block(res, 2, feature_map_size, input_shape)
    interp_block3 = psp_block(res, 3, feature_map_size, input_shape)
    interp_block6 = psp_block(res, 6, feature_map_size, input_shape)

    # concat all these layers. resulted
    res = Concatenate()([interp_block1,
                         interp_block2,
                         interp_block3,
                         interp_block6,
                         res])
    x = Conv2D(512, (1, 1), strides=(1, 1), padding="same", name="class_psp_reduce_conv", use_bias=False)(res)
    x = resnet.BN(bn_axis, name="class_psp_reduce_bn")(x)
    x = Activation('relu')(x)

    x = Conv2D(nb_classes, (1, 1), strides=(1, 1), name="class_psp_final_conv")(x)

    if output_size:
        x = Upsampling(output_size)(x)

    if sigmoid:
        x = Activation('sigmoid')(x)
    return x 
Example #18
Source File: dlight.py    From faceswap with GNU General Public License v3.0 5 votes vote down vote up
def encoder(self):
        """ DeLight Encoder Network """
        input_ = Input(shape=self.input_shape)
        var_x = input_

        var_x1 = self.blocks.conv(var_x, self.encoder_filters // 2)
        var_x2 = AveragePooling2D()(var_x)
        var_x2 = LeakyReLU(0.1)(var_x2)
        var_x = Concatenate()([var_x1, var_x2])

        var_x1 = self.blocks.conv(var_x, self.encoder_filters)
        var_x2 = AveragePooling2D()(var_x)
        var_x2 = LeakyReLU(0.1)(var_x2)
        var_x = Concatenate()([var_x1, var_x2])

        var_x1 = self.blocks.conv(var_x, self.encoder_filters * 2)
        var_x2 = AveragePooling2D()(var_x)
        var_x2 = LeakyReLU(0.1)(var_x2)
        var_x = Concatenate()([var_x1, var_x2])

        var_x1 = self.blocks.conv(var_x, self.encoder_filters * 4)
        var_x2 = AveragePooling2D()(var_x)
        var_x2 = LeakyReLU(0.1)(var_x2)
        var_x = Concatenate()([var_x1, var_x2])

        var_x1 = self.blocks.conv(var_x, self.encoder_filters * 8)
        var_x2 = AveragePooling2D()(var_x)
        var_x2 = LeakyReLU(0.1)(var_x2)
        var_x = Concatenate()([var_x1, var_x2])

        var_x = Dense(self.encoder_dim)(Flatten()(var_x))
        var_x = Dropout(0.05)(var_x)
        var_x = Dense(4 * 4 * 1024)(var_x)
        var_x = Dropout(0.05)(var_x)
        var_x = Reshape((4, 4, 1024))(var_x)

        return KerasModel(input_, var_x) 
Example #19
Source File: husky.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def GetHuskyActorModel(x, num_options, pose_size,
        dropout_rate=0.5, batchnorm=True):
    '''
    Make an "actor" network that takes in an encoded image and an "option"
    label and produces the next command to execute.
    '''
    xin = Input([int(d) for d in x.shape[1:]], name="actor_h_in")
    x0in = Input([int(d) for d in x.shape[1:]], name="actor_h0_in")

    pose_in = Input((pose_size,), name="actor_pose_in")
    option_in = Input((num_options,), name="actor_o_in")
    x = xin
    x0 = x0in
    dr, bn = dropout_rate, False
    use_lrelu = False

    x = Concatenate(axis=-1)([x, x0])
    x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y = pose_in
    y = AddDense(y, 32, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y, 32, (8,8), add=False)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y2 = AddDense(option_in, 64, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y2, 64, (6,6), add=False)
    x = AddConv2D(x, 128, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    x = Flatten()(x)
    x = AddDense(x, 512, "relu", dr, output=True, bn=bn)
    x = AddDense(x, 512, "relu", dr, output=True, bn=bn)    # Same setup as the state decoders


    pose = AddDense(x, pose_size, "linear", 0., output=True)
    actor = Model([x0in, xin, option_in, pose_in], [pose], name="actor")
    return actor 
Example #20
Source File: densenet_multi_gpu.py    From cifar-10-cnn with MIT License 5 votes vote down vote up
def to_multi_gpu(model, n_gpus=2):
    if n_gpus ==1:
        return model
    
    with tf.device('/cpu:0'):
        x = Input(model.input_shape[1:])
    towers = []
    for g in range(n_gpus):
        with tf.device('/gpu:' + str(g)):
            slice_g = Lambda(slice_batch, lambda shape: shape, arguments={'n_gpus':n_gpus, 'part':g})(x)
            towers.append(model(slice_g))

    with tf.device('/cpu:0'):
        merged = Concatenate(axis=0)(towers)
    return Model(inputs=[x], outputs=merged) 
Example #21
Source File: train_task_devmap.py    From ncc with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def init(self, seed: int, maxlen: int, embedding_dim: int, dense_layer_size: int):
        from keras.layers import Input, LSTM, Dense
        from keras.layers.merge import Concatenate
        from keras.layers.normalization import BatchNormalization
        from keras.models import Model

        np.random.seed(seed)

        # Keras model
        inp = Input(shape=(maxlen, embedding_dim,), dtype="float32", name="code_in")
        x = LSTM(embedding_dim, implementation=1, return_sequences=True, name="lstm_1")(inp)
        x = LSTM(embedding_dim, implementation=1, name="lstm_2")(x)
        langmodel_out = Dense(2, activation="sigmoid")(x)

        # Auxiliary inputs. wgsize and dsize.
        auxiliary_inputs = Input(shape=(2,))
        x = Concatenate()([auxiliary_inputs, x])
        x = BatchNormalization()(x)
        x = Dense(dense_layer_size, activation="relu")(x)
        out = Dense(2, activation="sigmoid")(x)

        self.model = Model(inputs=[auxiliary_inputs, inp], outputs=[out, langmodel_out])
        self.model.compile(
            optimizer="adam",
            metrics=['accuracy'],
            loss=["categorical_crossentropy", "categorical_crossentropy"],
            loss_weights=[1., .2])
        print('\tbuilt Keras model')

        return self 
Example #22
Source File: models.py    From WeSHClass with Apache License 2.0 5 votes vote down vote up
def ensemble_classifier(self, level):
        outputs = self.ensemble(self.class_tree, level, self.input_shape[1], None)
        outputs = [ExpanLayer(-1)(output) if len(output.get_shape()) < 2 else output for output in outputs]
        z = Concatenate()(outputs) if len(outputs) > 1 else outputs[0]
        return Model(inputs=self.x, outputs=z) 
Example #23
Source File: models.py    From anago with MIT License 5 votes vote down vote up
def build(self):
        # build word embedding
        word_ids = Input(batch_shape=(None, None), dtype='int32', name='word_input')
        if self._embeddings is None:
            word_embeddings = Embedding(input_dim=self._word_vocab_size,
                                        output_dim=self._word_embedding_dim,
                                        mask_zero=True,
                                        name='word_embedding')(word_ids)
        else:
            word_embeddings = Embedding(input_dim=self._embeddings.shape[0],
                                        output_dim=self._embeddings.shape[1],
                                        mask_zero=True,
                                        weights=[self._embeddings],
                                        name='word_embedding')(word_ids)

        # build character based word embedding
        char_ids = Input(batch_shape=(None, None, None), dtype='int32', name='char_input')
        char_embeddings = Embedding(input_dim=self._char_vocab_size,
                                    output_dim=self._char_embedding_dim,
                                    mask_zero=True,
                                    name='char_embedding')(char_ids)
        char_embeddings = TimeDistributed(Bidirectional(LSTM(self._char_lstm_size)))(char_embeddings)

        elmo_embeddings = Input(shape=(None, 1024), dtype='float32')

        word_embeddings = Concatenate()([word_embeddings, char_embeddings, elmo_embeddings])

        word_embeddings = Dropout(self._dropout)(word_embeddings)
        z = Bidirectional(LSTM(units=self._word_lstm_size, return_sequences=True))(word_embeddings)
        z = Dense(self._fc_dim, activation='tanh')(z)

        crf = CRF(self._num_labels, sparse_target=False)
        loss = crf.loss_function
        pred = crf(z)

        model = Model(inputs=[word_ids, char_ids, elmo_embeddings], outputs=pred)

        return model, loss 
Example #24
Source File: model.py    From awesome-text-classification with MIT License 5 votes vote down vote up
def build(self):
        sequence_input = Input(shape=(self.max_sequence_length,), dtype='int32')
        if self.weights is None:
            embedding = Embedding(
                self.vocab_size + 1,  # due to mask_zero
                self.embedding_dim,
                input_length=self.max_sequence_length,
            )(sequence_input)
        else:
            embedding = Embedding(
                self.weights.shape[0],  # due to mask_zero
                self.weights.shape[1],
                input_length=self.max_sequence_length,
                weights=[self.weights],
            )(sequence_input)

        convs = []
        for filter_size, num_filter in zip(self.filter_sizes, self.num_filters):
            conv = Conv1D(filters=num_filter,
                          kernel_size=filter_size,
                          activation='relu')(embedding)
            pool = GlobalMaxPooling1D()(conv)
            convs.append(pool)

        z = Concatenate()(convs)
        z = Dense(self.num_units)(z)
        z = Dropout(self.keep_prob)(z)
        z = Activation('relu')(z)
        pred = Dense(self.num_tags, activation='softmax')(z)
        model = Model(inputs=[sequence_input], outputs=[pred])

        return model 
Example #25
Source File: keras_mt_shared_cnn.py    From Benchmarks with MIT License 5 votes vote down vote up
def init_export_network(num_classes,
                        in_seq_len,
                        vocab_size,
                        wv_space,
                        filter_sizes,
                        num_filters,
                        concat_dropout_prob,
                        emb_l2,
                        w_l2,
                        optimizer):


    # define network layers ----------------------------------------------------
    input_shape = tuple([in_seq_len])
    model_input = Input(shape=input_shape, name= "Input")
    # embedding lookup
    emb_lookup = Embedding(vocab_size,
                           wv_space,
                           input_length=in_seq_len,
                           name="embedding",
                           #embeddings_initializer=RandomUniform,
                           embeddings_regularizer=l2(emb_l2))(model_input)
    # convolutional layer and dropout
    conv_blocks = []
    for ith_filter,sz in enumerate(filter_sizes):
        conv = Convolution1D(filters=num_filters[ ith_filter ],
                             kernel_size=sz,
                             padding="same",
                             activation="relu",
                             strides=1,
                             # kernel_initializer ='lecun_uniform,
                             name=str(ith_filter) + "_thfilter")(emb_lookup)
        conv_blocks.append(GlobalMaxPooling1D()(conv))
    concat = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
    concat_drop = Dropout(concat_dropout_prob)(concat)

    # different dense layer per tasks
    FC_models = []
    for i in range(len(num_classes)):
        outlayer = Dense(num_classes[i], name= "Dense"+str(i), activation='softmax')( concat_drop )#, kernel_regularizer=l2(0.01))( concat_drop )
        FC_models.append(outlayer)


    # the multitsk model
    model = Model(inputs=model_input, outputs = FC_models)
    model.compile( loss= "sparse_categorical_crossentropy", optimizer= optimizer, metrics=[ "acc" ] )

    return model 
Example #26
Source File: densities.py    From DeepIV with MIT License 5 votes vote down vote up
def mixture_of_gaussian_output(x, n_components):
    mu = keras.layers.Dense(n_components, activation='linear')(x)
    log_sig = keras.layers.Dense(n_components, activation='linear')(x)
    pi = keras.layers.Dense(n_components, activation='softmax')(x)
    return Concatenate(axis=1)([pi, mu, log_sig]) 
Example #27
Source File: models.py    From indic_tagger with Apache License 2.0 5 votes vote down vote up
def build(self):
        # build word embedding
        word_ids = Input(batch_shape=(None, None), dtype='int32', name='word_input')
        if self._embeddings is None:
            word_embeddings = Embedding(input_dim=self._word_vocab_size,
                                        output_dim=self._word_embedding_dim,
                                        mask_zero=True,
                                        name='word_embedding')(word_ids)
        else:
            word_embeddings = Embedding(input_dim=self._embeddings.shape[0],
                                        output_dim=self._embeddings.shape[1],
                                        mask_zero=True,
                                        weights=[self._embeddings],
                                        name='word_embedding')(word_ids)

        # build character based word embedding
        char_ids = Input(batch_shape=(None, None, None), dtype='int32', name='char_input')
        char_embeddings = Embedding(input_dim=self._char_vocab_size,
                                    output_dim=self._char_embedding_dim,
                                    mask_zero=True,
                                    name='char_embedding')(char_ids)
        char_embeddings = TimeDistributed(Bidirectional(LSTM(self._char_lstm_size)))(char_embeddings)

        elmo_embeddings = Input(shape=(None, 1024), dtype='float32')

        word_embeddings = Concatenate()([word_embeddings, char_embeddings, elmo_embeddings])

        word_embeddings = Dropout(self._dropout)(word_embeddings)
        z = Bidirectional(LSTM(units=self._word_lstm_size, return_sequences=True))(word_embeddings)
        z = Dense(self._fc_dim, activation='tanh')(z)

        crf = CRF(self._num_labels, sparse_target=False)
        loss = crf.loss_function
        pred = crf(z)

        model = Model(inputs=[word_ids, char_ids, elmo_embeddings], outputs=pred)

        return model, loss 
Example #28
Source File: models.py    From chemical_vae with Apache License 2.0 5 votes vote down vote up
def variational_layers(z_mean, enc, kl_loss_var, params):
    # @inp mean : mean generated from encoder
    # @inp enc : output generated by encoding
    # @inp params : parameter dictionary passed throughout entire model.

    def sampling(args):
        z_mean, z_log_var = args

        epsilon = K.random_normal_variable(shape=(params['batch_size'], params['hidden_dim']),
                                           mean=0., scale=1.)
        # insert kl loss here

        z_rand = z_mean + K.exp(z_log_var / 2) * kl_loss_var * epsilon
        return K.in_train_phase(z_rand, z_mean)


    # variational encoding
    z_log_var_layer = Dense(params['hidden_dim'], name='z_log_var_sample')
    z_log_var = z_log_var_layer(enc)

    z_mean_log_var_output = Concatenate(
        name='z_mean_log_var')([z_mean, z_log_var])

    z_samp = Lambda(sampling)([z_mean, z_log_var])

    if params['batchnorm_vae']:
        z_samp = BatchNormalization(axis=-1)(z_samp)

    return z_samp, z_mean_log_var_output


# ====================
# Property Prediction
# ==================== 
Example #29
Source File: multi_sampler.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def _makeActorPolicy(self):
        '''
        Helper function: creates a model for the "actor" policy that will
        generate the controls to move towards a particular end effector pose.
        The job of this policy should be pretty simple.

        The actor policy is trained separately from the predictor/sampler
        policies, but using the same underlying representation.
        '''
        enc = Input((self.img_col_dim,))
        arm_goal = Input((self.num_arm_vars,),name="actor_arm_goal_in")
        gripper_goal = Input((1,),name="actor_gripper_goal_in")
        y = enc
        if not self.dense_representation:
            raise RuntimeError('Not yet supported!')
            y = Conv2D(int(self.img_num_filters/4),
                    kernel_size=[5,5],
                    strides=(2, 2),
                    padding='same')(y)
            y = Dropout(self.dropout_rate)(y)
            y = LeakyReLU(0.2)(y)
            y = BatchNormalization(momentum=0.9)(y)
            y = Flatten()(y)
        else:
            y = Concatenate()([y, arm_goal, gripper_goal])
            for _ in range(self.num_actor_policy_layers):
                y = Dense(self.combined_dense_size)(y)
                y = BatchNormalization(momentum=0.9)(y)
                y = LeakyReLU(0.2)(y)
                y = Dropout(self.dropout_rate)(y)
        arm_cmd_out = Lambda(lambda x: K.expand_dims(x, axis=1),name="arm_action")(
                Dense(self.arm_cmd_size)(y))
        gripper_cmd_out = Lambda(lambda x: K.expand_dims(x, axis=1),name="gripper_action")(
                Dense(self.gripper_cmd_size)(y))
        actor = Model([enc, arm_goal, gripper_goal], [arm_cmd_out,
            gripper_cmd_out], name="actor")
        return actor 
Example #30
Source File: hfusion.py    From hfusion with MIT License 5 votes vote down vote up
def output_of_lambda2(input_shape):
    return (input_shape[0], audio_dim)







# ################################################################################
# #Level 2

# #concatenate level 1 output to be sent to hfusion
# fused_tensor=Concatenate(axis=2)([context_1_2,context_1_3,context_2_3])