Python keras.layers.merge.Concatenate() Examples

The following are 30 code examples for showing how to use keras.layers.merge.Concatenate(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.layers.merge , or try the search function .

Example 1
Project: PSPNet-Keras-tensorflow   Author: Vladkryvoruchko   File: layers_builder.py    License: MIT License 6 votes vote down vote up
def build_pyramid_pooling_module(res, input_shape):
    """Build the Pyramid Pooling Module."""
    # ---PSPNet concat layers with Interpolation
    feature_map_size = tuple(int(ceil(input_dim / 8.0))
                             for input_dim in input_shape)
    print("PSP module will interpolate to a final feature map size of %s" %
          (feature_map_size, ))

    interp_block1 = interp_block(res, 1, feature_map_size, input_shape)
    interp_block2 = interp_block(res, 2, feature_map_size, input_shape)
    interp_block3 = interp_block(res, 3, feature_map_size, input_shape)
    interp_block6 = interp_block(res, 6, feature_map_size, input_shape)

    # concat all these layers. resulted
    # shape=(1,feature_map_size_x,feature_map_size_y,4096)
    res = Concatenate()([res,
                         interp_block6,
                         interp_block3,
                         interp_block2,
                         interp_block1])
    return res 
Example 2
Project: image-segmentation-keras   Author: divamgupta   File: _pspnet_2.py    License: MIT License 6 votes vote down vote up
def build_pyramid_pooling_module(res, input_shape):
    """Build the Pyramid Pooling Module."""
    # ---PSPNet concat layers with Interpolation
    feature_map_size = tuple(int(ceil(input_dim / 8.0))
                             for input_dim in input_shape)

    interp_block1 = interp_block(res, 1, feature_map_size, input_shape)
    interp_block2 = interp_block(res, 2, feature_map_size, input_shape)
    interp_block3 = interp_block(res, 3, feature_map_size, input_shape)
    interp_block6 = interp_block(res, 6, feature_map_size, input_shape)

    # concat all these layers. resulted
    # shape=(1,feature_map_size_x,feature_map_size_y,4096)
    res = Concatenate()([res,
                         interp_block6,
                         interp_block3,
                         interp_block2,
                         interp_block1])
    return res 
Example 3
Project: keras-image-captioning   Author: danieljl   File: models.py    License: MIT License 6 votes vote down vote up
def build(self, vocabs=None):
        if self._keras_model:
            return
        if vocabs is None and self._word_vector_init is not None:
            raise ValueError('If word_vector_init is not None, build method '
                             'must be called with vocabs that are not None!')

        image_input, image_embedding = self._build_image_embedding()
        sentence_input, word_embedding = self._build_word_embedding(vocabs)
        sequence_input = Concatenate(axis=1)([image_embedding, word_embedding])
        sequence_output = self._build_sequence_model(sequence_input)

        model = Model(inputs=[image_input, sentence_input],
                      outputs=sequence_output)
        model.compile(optimizer=Adam(lr=self._learning_rate, clipnorm=5.0),
                      loss=categorical_crossentropy_from_logits,
                      metrics=[categorical_accuracy_with_variable_timestep])

        self._keras_model = model 
Example 4
Project: DenseNet-Cifar10   Author: Kexiii   File: DenseNet.py    License: MIT License 6 votes vote down vote up
def dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1E-4):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
    Args:
        x: keras tensor
        nb_layers: the number of layers of conv_block to append to the model.
        nb_filter: number of filters
        growth_rate: growth rate
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_dim_ordering() == "th" else -1

    feature_list = [x]

    for i in range(nb_layers):
        x = conv_block(x, growth_rate, dropout_rate, weight_decay)
        feature_list.append(x)
        x = Concatenate(axis=concat_axis)(feature_list)
        nb_filter += growth_rate

    return x, nb_filter 
Example 5
Project: costar_plan   Author: jhu-lcsr   File: dense.py    License: Apache License 2.0 6 votes vote down vote up
def GetLSTMEncoder(xin, uin, dense_size, lstm_size, dense_layers=1,
        lstm_layers=1):
    '''
    Get LSTM encoder.
    '''
    x = xin
    for _ in xrange(dense_layers):
        if uin is not None:
            x = Concatenate(axis=-1)([x, uin])
        x = TimeDistributed(Dense(dense_size))(x)
        x = TimeDistributed(Activation('relu'))(x)
    for i in xrange(lstm_layers):
        if i == lstm_layers - 1:
            sequence_out = False
        else:
            sequence_out = True
        #sequence_out = True
        x = LSTM(lstm_size, return_sequences=sequence_out)(x)
        x = Activation('relu')(x)
    return x 
Example 6
Project: costar_plan   Author: jhu-lcsr   File: dvrk.py    License: Apache License 2.0 6 votes vote down vote up
def MakeJigsawsMultiDecoder(model, decoder, num_images=4, h_dim=(12,16)):
    '''
    Make multiple images
    '''
    h = Input((h_dim[0], h_dim[1], 64),name="h_in")

    xs = []
    for i in range(num_images):
        xi = h
        xi = AddConv2D(xi, 64, [5, 5], stride=1,
                dropout_rate=0.)
        xi = AddConv2D(xi, model.encoder_channels, [5, 5], stride=1,
                dropout_rate=0.)
        xi = decoder(xi)
        img_x = Lambda(
            lambda y: K.expand_dims(y, 1),
            name="img_hypothesis_%d"%i)(xi)
        xs.append(img_x)
    img_out = Concatenate(axis=1)(xs)

    mm = Model(h, img_out, name="multi")
    mm.compile(loss="mae", optimizer=model.getOptimizer())

    return mm 
Example 7
Project: WeSTClass   Author: yumeng5   File: model.py    License: Apache License 2.0 6 votes vote down vote up
def ConvolutionLayer(input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False, vocab_sz=None,
                     embedding_matrix=None, word_embedding_dim=100, hidden_dim=20, act='relu', init='ones'):
    x = Input(shape=(input_shape,), name='input')
    z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), name="embedding", 
                    weights=[embedding_matrix], trainable=word_trainable)(x)
    conv_blocks = []
    for sz in filter_sizes:
        conv = Convolution1D(filters=num_filters,
                             kernel_size=sz,
                             padding="valid",
                             activation=act,
                             strides=1,
                             kernel_initializer=init)(z)
        conv = GlobalMaxPooling1D()(conv)
        conv_blocks.append(conv)
    z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
    z = Dense(hidden_dim, activation="relu")(z)
    y = Dense(n_classes, activation="softmax")(z)
    return Model(inputs=x, outputs=y, name='classifier') 
Example 8
Project: WeSHClass   Author: yumeng5   File: models.py    License: Apache License 2.0 6 votes vote down vote up
def ConvolutionLayer(x, input_shape, n_classes, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False,
                     vocab_sz=None,
                     embedding_matrix=None, word_embedding_dim=100, hidden_dim=100, act='relu', init='ones'):
    if embedding_matrix is not None:
        z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,),
                      weights=[embedding_matrix], trainable=word_trainable)(x)
    else:
        z = Embedding(vocab_sz, word_embedding_dim, input_length=(input_shape,), trainable=word_trainable)(x)
    conv_blocks = []
    for sz in filter_sizes:
        conv = Convolution1D(filters=num_filters,
                             kernel_size=sz,
                             padding="valid",
                             activation=act,
                             strides=1,
                             kernel_initializer=init)(z)
        conv = GlobalMaxPooling1D()(conv)
        conv_blocks.append(conv)
    z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
    z = Dense(hidden_dim, activation="relu")(z)
    y = Dense(n_classes, activation="softmax")(z)
    return Model(inputs=x, outputs=y) 
Example 9
Project: RMDL   Author: kk7nc   File: BuildModel.py    License: GNU General Public License v3.0 6 votes vote down vote up
def to_multi_gpu(model, n_gpus=2):
    """
    Given a keras [model], return an equivalent model which parallelizes
    the computation over [n_gpus] GPUs.

    Each GPU gets a slice of the input batch, applies the model on that slice
    and later the outputs of the models are concatenated to a single tensor,
    hence the user sees a model that behaves the same as the original.
    """

    with tf.device('/cpu:0'):
        x = Input(model.input_shape[1:], name="input1")

    towers = []
    for g in range(n_gpus):
        with tf.device('/gpu:' + str(g)):
            slice_g = Lambda(slice_batch,
                             lambda shape: shape,
                             arguments={'n_gpus':n_gpus, 'part':g})(x)
            towers.append(model(slice_g))

    with tf.device('/cpu:0'):
        merged = Concatenate(axis=0)(towers)

    return Model(inputs=[x], outputs=[merged]) 
Example 10
Project: DeepIV   Author: jhartford   File: demand_simulation_mnist.py    License: MIT License 6 votes vote down vote up
def conv_embedding(images, output, other_features = [], dropout_rate=0.1,
                   embedding_dropout=0.1, embedding_l2=0.05, constrain_norm=True):
    print("Building conv net")
    x_embedding = architectures.convnet(images, Dense(64, activation='linear'),
                        dropout_rate=embedding_dropout,
                        activations='relu',
                        l2_rate=embedding_l2, constrain_norm=constrain_norm)

    if len(other_features) > 0:
        embedd = Concatenate(axis=1)([x_embedding] + other_features)
    else:
        embedd = x_embedding
    out = architectures.feed_forward_net(embedd, output,
                        hidden_layers=[32],
                        dropout_rate=dropout_rate,
                        activations='relu', constrain_norm=constrain_norm)
    return out 
Example 11
Project: DEXTR-KerasTensorflow   Author: scaelles   File: classifiers.py    License: GNU General Public License v3.0 5 votes vote down vote up
def build_pyramid_pooling_module(res, input_shape, nb_classes, sigmoid=False, output_size=None):
    """Build the Pyramid Pooling Module."""
    # ---PSPNet concat layers with Interpolation
    feature_map_size = tuple(int(ceil(input_dim / 8.0)) for input_dim in input_shape)
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    print("PSP module will interpolate to a final feature map size of %s" %
          (feature_map_size, ))

    interp_block1 = psp_block(res, 1, feature_map_size, input_shape)
    interp_block2 = psp_block(res, 2, feature_map_size, input_shape)
    interp_block3 = psp_block(res, 3, feature_map_size, input_shape)
    interp_block6 = psp_block(res, 6, feature_map_size, input_shape)

    # concat all these layers. resulted
    res = Concatenate()([interp_block1,
                         interp_block2,
                         interp_block3,
                         interp_block6,
                         res])
    x = Conv2D(512, (1, 1), strides=(1, 1), padding="same", name="class_psp_reduce_conv", use_bias=False)(res)
    x = resnet.BN(bn_axis, name="class_psp_reduce_bn")(x)
    x = Activation('relu')(x)

    x = Conv2D(nb_classes, (1, 1), strides=(1, 1), name="class_psp_final_conv")(x)

    if output_size:
        x = Upsampling(output_size)(x)

    if sigmoid:
        x = Activation('sigmoid')(x)
    return x 
Example 12
Project: hfusion   Author: SenticNet   File: hfusion.py    License: MIT License 5 votes vote down vote up
def output_of_lambda2(input_shape):
    return (input_shape[0], audio_dim)







# ################################################################################
# #Level 2

# #concatenate level 1 output to be sent to hfusion
# fused_tensor=Concatenate(axis=2)([context_1_2,context_1_3,context_2_3]) 
Example 13
Project: V-GAN   Author: jaeminSon   File: model.py    License: MIT License 5 votes vote down vote up
def GAN(g,d,img_size,n_filters_g, n_filters_d, alpha_recip, init_lr, name='gan'):
    """
    GAN (that binds generator and discriminator)
    """
    img_h, img_w=img_size[0], img_size[1]

    img_ch=3
    seg_ch=1
    
    fundus = Input((img_h, img_w, img_ch))
    vessel = Input((img_h, img_w, seg_ch))
    
    fake_vessel=g(fundus)
    fake_pair=Concatenate(axis=3)([fundus, fake_vessel])
    
    gan=Model([fundus, vessel], d(fake_pair), name=name)

    def gan_loss(y_true, y_pred):
        y_true_flat = K.batch_flatten(y_true)
        y_pred_flat = K.batch_flatten(y_pred)

        L_adv = objectives.binary_crossentropy(y_true_flat, y_pred_flat)
#         L_adv = objectives.mean_squared_error(y_true_flat, y_pred_flat)

        vessel_flat = K.batch_flatten(vessel)
        fake_vessel_flat = K.batch_flatten(fake_vessel)
        L_seg = objectives.binary_crossentropy(vessel_flat, fake_vessel_flat)
#         L_seg = objectives.mean_absolute_error(vessel_flat, fake_vessel_flat)

        return alpha_recip*L_adv + L_seg
    
    
    gan.compile(optimizer=Adam(lr=init_lr, beta_1=0.5), loss=gan_loss, metrics=['accuracy'])
        
    return gan 
Example 14
Project: text-classifier   Author: shibing624   File: deep_model.py    License: Apache License 2.0 5 votes vote down vote up
def cnn_model(max_len=400,
              vocabulary_size=20000,
              embedding_dim=128,
              hidden_dim=128,
              num_filters=512,
              filter_sizes="3,4,5",
              num_classses=4,
              dropout=0.5):
    print("Creating text CNN Model...")
    # a tensor
    inputs = Input(shape=(max_len,), dtype='int32')
    # emb
    embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim,
                          input_length=max_len, name="embedding")(inputs)
    # convolution block
    if "," in filter_sizes:
        filter_sizes = filter_sizes.split(",")
    else:
        filter_sizes = [3, 4, 5]
    conv_blocks = []
    for sz in filter_sizes:
        conv = Convolution1D(filters=num_filters,
                             kernel_size=int(sz),
                             strides=1,
                             padding='valid',
                             activation='relu')(embedding)
        conv = MaxPooling1D()(conv)
        conv = Flatten()(conv)
        conv_blocks.append(conv)
    conv_concate = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
    dropout_layer = Dropout(dropout)(conv_concate)
    output = Dense(hidden_dim, activation='relu')(dropout_layer)
    output = Dense(num_classses, activation='softmax')(output)
    # model
    model = Model(inputs=inputs, outputs=output)
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    model.summary()
    return model 
Example 15
Project: vess2ret   Author: costapt   File: models.py    License: MIT License 5 votes vote down vote up
def concatenate_layers(inputs, concat_axis, mode='concat'):
    if KERAS_2:
        assert mode == 'concat', "Only concatenation is supported in this wrapper"
        return Concatenate(axis=concat_axis)(inputs)
    else:
        return merge(inputs=inputs, concat_axis=concat_axis, mode=mode) 
Example 16
Project: Malware-GAN   Author: yanminglai   File: MalGAN_v1.py    License: GNU General Public License v3.0 5 votes vote down vote up
def build_generator(self):

        example = Input(shape=(self.apifeature_dims,))
        noise = Input(shape=(self.apifeature_dims,))
        x = Concatenate([example, noise], axis=1)
        for dim in self.generator_layers[1:]:
            x = Dense(dim)(x)
            x = Activation(activation='Sigmoid')(x)
        x = Maximum([example, x])
        generator = Model([example, noise], x, name='generator')
        generator.summary()
        return generator 
Example 17
Project: Malware-GAN   Author: yanminglai   File: MalGAN_v2.py    License: GNU General Public License v3.0 5 votes vote down vote up
def build_generator(self):

        example = Input(shape=(self.apifeature_dims,))
        noise = Input(shape=(self.z_dims,))
        x = Concatenate(axis=1)([example, noise])
        for dim in self.generator_layers[1:]:
            x = Dense(dim)(x)
        x = Activation(activation='sigmoid')(x)
        x = Maximum()([example, x])
        generator = Model([example, noise], x, name='generator')
        generator.summary()
        return generator 
Example 18
Project: Malware-GAN   Author: yanminglai   File: exp.py    License: GNU General Public License v3.0 5 votes vote down vote up
def build_generator(self):

        example = Input(shape=(self.apifeature_dims,))
        noise = Input(shape=(self.z_dims,))
        x = Concatenate(axis=1)([example, noise])
        for dim in self.generator_layers[1:]:
            x = Dense(dim)(x)
            x = Activation(activation='sigmoid')(x)
        x = Maximum()([example, x])
        generator = Model([example, noise], x, name='generator')
        generator.summary()
        return generator 
Example 19
Project: Malware-GAN   Author: yanminglai   File: MalGAN__v3.py    License: GNU General Public License v3.0 5 votes vote down vote up
def build_generator(self):

        example = Input(shape=(self.apifeature_dims,))
        noise = Input(shape=(self.z_dims,))
        x = Concatenate(axis=1)([example, noise])
        for dim in self.generator_layers[1:]:
            x = Dense(dim)(x)
        x = Activation(activation='sigmoid')(x)
        x = Maximum()([example, x])
        generator = Model([example, noise], x, name='generator')
        generator.summary()
        return generator 
Example 20
Project: costar_plan   Author: jhu-lcsr   File: hypertree_model.py    License: Apache License 2.0 5 votes vote down vote up
def concat_images_with_tiled_vector_layer(images, vector, image_shape=None, vector_shape=None):
    """Tile a vector as if it were channels onto every pixel of an image.

    This version is designed to be used as layers within a Keras model.

    # Params
       images: a list of images to combine, must have equal dimensions
       vector: the 1D vector to tile onto every pixel.
       image_shape: Tuple with 3 entries defining the shape (batch, height, width)
           images should be expected to have, do not specify the number
           of batches.
       vector_shape: Tuple with 3 entries defining the shape (batch, height, width)
           images should be expected to have, do not specify the number
           of batches.
    """
    with K.name_scope('concat_images_with_tiled_vector_layer'):
        if not isinstance(images, list):
            images = [images]
        if vector_shape is None:
            # check if K.shape, K.int_shape, or vector.get_shape().as_list()[1:] is better
            # https://github.com/fchollet/keras/issues/5211
            # TODO(ahundt) ensure shape works in both google brain/cornell dataset input tensor and keras Input() aka numpy array cases
            vector_shape = K.int_shape(vector)[1:]
        if image_shape is None:
            # check if K.shape, K.int_shape, or image.get_shape().as_list()[1:] is better
            # https://github.com/fchollet/keras/issues/5211
            # TODO(ahundt) ensure shape works in both google brain/cornell dataset input tensor and keras Input() aka numpy array cases
            image_shape = K.int_shape(images[0])[1:]
        vector = Reshape([1, 1, vector_shape[-1]])(vector)
        tile_shape = (int(1), int(image_shape[0]), int(image_shape[1]), int(1))
        tiled_vector = Lambda(lambda x: K.tile(x, tile_shape))(vector)
        x = Concatenate(axis=-1)([] + images + [tiled_vector])
    return x 
Example 21
Project: costar_plan   Author: jhu-lcsr   File: dvrk.py    License: Apache License 2.0 5 votes vote down vote up
def MakeJigsawsImageClassifier(model, img_shape, trainable = True):
    img0 = Input(img_shape,name="img0_classifier_in")
    img = Input(img_shape,name="img_classifier_in")
    bn = model.use_batchnorm
    disc = True
    dr = 0. #model.dropout_rate
    x = img
    x0 = img0

    x = AddConv2D(x, 32, [4,4], 2, dr, "same", lrelu=disc, bn=bn)
    x0 = AddConv2D(x0, 32, [4,4], 2, dr, "same", lrelu=disc, bn=bn)

    x = Concatenate()([x0, x])
    x = AddConv2D(x, 64, [4,4], 2, dr, "same", lrelu=disc, bn=bn)
    x = AddConv2D(x, 64, [4,4], 2, dr, "same", lrelu=disc, bn=bn)
    x = AddConv2D(x, 64, [4,4], 2, dr, "same", lrelu=disc, bn=bn)
    x = AddConv2D(x, 64, [4,4], 2, 0., "same", lrelu=disc, bn=bn)
    x = AddConv2D(x, 64, [4,4], 2, 0., "same", lrelu=disc, bn=bn)

    x = Flatten()(x)
    x = Dropout(0.5)(x)
    x = AddDense(x, 256, "lrelu", 0.5, output=True, bn=False, kr=0.)
    x = AddDense(x, model.num_options, "softmax", 0., output=True, bn=False)
    image_encoder = Model([img0, img], x, name="classifier")
    if not trainable:
        image_encoder.trainable = False
    image_encoder.compile(loss="categorical_crossentropy",
                          metrics=["accuracy"],
                          optimizer=model.getOptimizer())
    model.classifier = image_encoder
    return image_encoder 
Example 22
Project: costar_plan   Author: jhu-lcsr   File: multi_sampler.py    License: Apache License 2.0 5 votes vote down vote up
def _makeActorPolicy(self):
        '''
        Helper function: creates a model for the "actor" policy that will
        generate the controls to move towards a particular end effector pose.
        The job of this policy should be pretty simple.

        The actor policy is trained separately from the predictor/sampler
        policies, but using the same underlying representation.
        '''
        enc = Input((self.img_col_dim,))
        arm_goal = Input((self.num_arm_vars,),name="actor_arm_goal_in")
        gripper_goal = Input((1,),name="actor_gripper_goal_in")
        y = enc
        if not self.dense_representation:
            raise RuntimeError('Not yet supported!')
            y = Conv2D(int(self.img_num_filters/4),
                    kernel_size=[5,5],
                    strides=(2, 2),
                    padding='same')(y)
            y = Dropout(self.dropout_rate)(y)
            y = LeakyReLU(0.2)(y)
            y = BatchNormalization(momentum=0.9)(y)
            y = Flatten()(y)
        else:
            y = Concatenate()([y, arm_goal, gripper_goal])
            for _ in range(self.num_actor_policy_layers):
                y = Dense(self.combined_dense_size)(y)
                y = BatchNormalization(momentum=0.9)(y)
                y = LeakyReLU(0.2)(y)
                y = Dropout(self.dropout_rate)(y)
        arm_cmd_out = Lambda(lambda x: K.expand_dims(x, axis=1),name="arm_action")(
                Dense(self.arm_cmd_size)(y))
        gripper_cmd_out = Lambda(lambda x: K.expand_dims(x, axis=1),name="gripper_action")(
                Dense(self.gripper_cmd_size)(y))
        actor = Model([enc, arm_goal, gripper_goal], [arm_cmd_out,
            gripper_cmd_out], name="actor")
        return actor 
Example 23
Project: costar_plan   Author: jhu-lcsr   File: multi_sampler.py    License: Apache License 2.0 5 votes vote down vote up
def _makeStateEncoder(self, arm_size, gripper_size, disc=False):
        '''
        Encode arm state.

        Parameters:
        -----------
        arm_size: number of arm input variables
        gripper_size: number of gripper input variables
        disc: is this a discriminator? if so, use leaky relu
        '''
        arm = Input((arm_size,))
        gripper = Input((gripper_size,))
        option = Input((1,))
        if disc:
            activation = "lrelu"
        else:
            activation = "relu"

        dr = self.dropout_rate * 0.
        x = Concatenate()([arm,gripper])
        x = AddDense(x, 64, activation, dr)

        y = OneHot(self.num_options)(option)
        y = Flatten()(y)
        #y = AddDense(y, 32, activation, dr)

        if not self.disable_option_in_encoder:
            x = Concatenate()([x,y])

        x = AddDense(x, 64, activation, dr)

        state_encoder = Model([arm, gripper, option], x,
                name="state_encoder")
        state_encoder.compile(loss="mae", optimizer=self.getOptimizer())
        if not disc:
            self.state_encoder = state_encoder
        return state_encoder 
Example 24
Project: costar_plan   Author: jhu-lcsr   File: pretrain_image_jigsaws_gan.py    License: Apache License 2.0 5 votes vote down vote up
def _makeImageDiscriminator(self, img_shape):
        '''
        create image-only encoder to extract keypoints from the scene.

        Params:
        -------
        img_shape: shape of the image to encode
        '''
        img = Input(img_shape,name="img_encoder_in")
        img0 = Input(img_shape,name="img0_encoder_in")
        ins = [img, img0]
        dr = 0.3

        if self.use_wasserstein:
            loss = wasserstein_loss
            activation = "linear"
        else:
            loss = "binary_crossentropy"
            activation = "sigmoid"

        x = AddConv2D(img, 32, [4,4], 1, 0, "same", lrelu=True, bn=False)
        x0 = AddConv2D(img0, 32, [4,4], 1, 0, "same", lrelu=True, bn=False)
        x = Add()([x, x0])
        #x = Concatenate(axis=-1)([img0, img])
        x = AddConv2D(x, 32, [4,4], 2, dr, "same", lrelu=True, bn=False)
        x = AddConv2D(x, 64, [4,4], 2, dr, "same", lrelu=True, bn=False)
        x = AddConv2D(x, 128, [4,4], 2, dr, "same", lrelu=True, bn=False)
        #x = AddConv2D(x, 256, [4,4], 2, dr, "same", lrelu=True, bn=False)
        x = AddConv2D(x, 1, [1,1], 1, dr, "same", activation=activation, bn=False)
        x = AveragePooling2D(pool_size=(12,16))(x)
        x = Flatten()(x)
        #x = Dense(1, activation="linear")(x)
        discrim = Model(ins, x, name="image_discriminator")
        discrim.compile(loss=loss,
                optimizer=self.getOptimizer())
        self.image_discriminator = discrim
        return discrim 
Example 25
Project: costar_plan   Author: jhu-lcsr   File: planner.py    License: Apache License 2.0 5 votes vote down vote up
def CombinePoseAndOption(pose_in, option_in, dim=64):
    robot = Concatenate(axis=-1)([pose_in, option_in])
    robot = Dense(dim, activation="relu")(robot)
    return robot 
Example 26
Project: costar_plan   Author: jhu-lcsr   File: planner.py    License: Apache License 2.0 5 votes vote down vote up
def CombineArmAndGripperAndOption(arm_in, gripper_in, option_in, dim=64):
    robot = Concatenate(axis=-1)([arm_in, gripper_in, option_in])
    robot = Dense(dim, activation="relu")(robot)
    return robot 
Example 27
Project: costar_plan   Author: jhu-lcsr   File: planner.py    License: Apache License 2.0 5 votes vote down vote up
def TileOnto(x,z,zlen,xsize,add=False):
    z = Reshape([1,1,zlen])(z)
    tile_shape = (int(1), int(xsize[0]), int(xsize[1]), 1)
    z = Lambda(lambda x: K.tile(x, tile_shape))(z)
    if not add:
        x = Concatenate(axis=-1)([x,z])
    else:
        x = Add()([x,z])
    return x 
Example 28
Project: costar_plan   Author: jhu-lcsr   File: planner.py    License: Apache License 2.0 5 votes vote down vote up
def TilePose(x, pose_in, tile_width, tile_height,
        option=None, option_in=None,
        time_distributed=None, dim=64, concatenate=False):
    pose_size = int(pose_in.shape[-1])


    # handle error: options and grippers
    if option is None and option_in is not None \
        or option is not None and option_in is None:
            raise RuntimeError('must provide both #opts and input')

    # generate options and tile things together
    if option is None:
        robot = CombinePose(pose_in, dim=dim)
        #reshape_size = arm_size+gripper_size
        reshape_size = dim
    else:
        robot = CombinePoseAndOption(pose_in, option_in, dim=dim)
        reshape_size = dim
        #reshape_size = arm_size+gripper_size+option

    # time distributed or not
    if time_distributed is not None and time_distributed > 0:
        tile_shape = (1, 1, tile_width, tile_height, 1)
        robot = Reshape([time_distributed, 1, 1, reshape_size])(robot)
    else:
        tile_shape = (1, tile_width, tile_height, 1)
        robot = Reshape([1, 1, reshape_size])(robot)

    # finally perform the actual tiling
    robot0 = robot
    robot = Lambda(lambda x: K.tile(x, tile_shape))(robot)
    if concatenate:
        x = Concatenate(axis=-1)([x,robot])
    else:
        x = Add(axis=-1)([x, robot])

    return x, robot 
Example 29
Project: costar_plan   Author: jhu-lcsr   File: planner.py    License: Apache License 2.0 5 votes vote down vote up
def AddOptionTiling(x, option_length, option_in, height, width):
    tile_shape = (1, width, height, 1)
    option = Reshape([1,1,option_length])(option_in)
    option = Lambda(lambda x: K.tile(x, tile_shape))(option)
    x = Concatenate(
            axis=-1,
            name="add_option_%dx%d"%(width,height),
        )([x, option])
    return x 
Example 30
Project: costar_plan   Author: jhu-lcsr   File: husky.py    License: Apache License 2.0 5 votes vote down vote up
def GetHuskyActorModel(x, num_options, pose_size,
        dropout_rate=0.5, batchnorm=True):
    '''
    Make an "actor" network that takes in an encoded image and an "option"
    label and produces the next command to execute.
    '''
    xin = Input([int(d) for d in x.shape[1:]], name="actor_h_in")
    x0in = Input([int(d) for d in x.shape[1:]], name="actor_h0_in")

    pose_in = Input((pose_size,), name="actor_pose_in")
    option_in = Input((num_options,), name="actor_o_in")
    x = xin
    x0 = x0in
    dr, bn = dropout_rate, False
    use_lrelu = False

    x = Concatenate(axis=-1)([x, x0])
    x = AddConv2D(x, 32, [3,3], 1, dr, "same", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y = pose_in
    y = AddDense(y, 32, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y, 32, (8,8), add=False)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    # Add arm, gripper
    y2 = AddDense(option_in, 64, "relu", 0., output=True, constraint=3)
    x = TileOnto(x, y2, 64, (6,6), add=False)
    x = AddConv2D(x, 128, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)
    x = AddConv2D(x, 64, [3,3], 1, dr, "valid", lrelu=use_lrelu, bn=bn)

    x = Flatten()(x)
    x = AddDense(x, 512, "relu", dr, output=True, bn=bn)
    x = AddDense(x, 512, "relu", dr, output=True, bn=bn)    # Same setup as the state decoders


    pose = AddDense(x, pose_size, "linear", 0., output=True)
    actor = Model([x0in, xin, option_in, pose_in], [pose], name="actor")
    return actor