Python keras.layers.Flatten() Examples

The following are code examples for showing how to use keras.layers.Flatten(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: speed_estimation   Author: NeilNie   File: simple_conv.py    MIT License 13 votes vote down vote up
def commaai_model(self):

        model = Sequential()
        model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(configs.IMG_HEIGHT, configs.IMG_WIDTH, 3), output_shape=(configs.IMG_HEIGHT, configs.IMG_WIDTH, 3)))
        model.add(Conv2D(16, (8, 8), strides=4, padding="same"))
        model.add(ELU())
        model.add(Conv2D(32, (5, 5), strides=2, padding="same"))
        model.add(ELU())
        model.add(Conv2D(64, (5, 5), strides=2, padding="same"))
        model.add(Flatten())
        model.add(Dropout(.2))
        model.add(ELU())
        model.add(Dense(512))
        model.add(Dropout(.5))
        model.add(ELU())
        model.add(Dense(1))

        sgd = SGD(lr=0.00001, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(optimizer=sgd, loss='mean_squared_error')
        # print('steering model is created and compiled...')
        return model 
Example 2
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari.py    MIT License 7 votes vote down vote up
def deepMindAtariNet(nbClasses, inputShape, includeTop=True):
        '''Set up the 3 conv layer keras model.
        classes: Number of outputs.
        inputShape: The input shape without the batch size.
        includeTop: If you only want the whole net, or just the convolutions.
        '''
        inp = Input(shape=inputShape)
        x = Conv2D(32, 8, 8, subsample=(4, 4), activation='relu', border_mode='same', name='conv1')(inp)
        x = Conv2D(64, 4, 4, subsample=(2, 2), activation='relu', border_mode='same', name='conv2')(x)
        x = Conv2D(64, 3, 3, activation='relu', border_mode='same', name='conv3')(x)
        if includeTop:
            x = Flatten(name='flatten')(x)
            x = Dense(512, activation='relu', name='dense1')(x)
            out = Dense(nbClasses, activation='softmax', name='output')(x)
        else:
            out = x
        model = Model(inp, out)
        return model 
Example 3
Project: apex_dqn   Author: omurammm   File: learner.py    MIT License 6 votes vote down vote up
def build_network(self):
        l_input = Input(shape=(4,84,84))
        conv2d = Conv2D(32,8,strides=(4,4),activation='relu', data_format="channels_first")(l_input)
        conv2d = Conv2D(64,4,strides=(2,2),activation='relu', data_format="channels_first")(conv2d)
        conv2d = Conv2D(64,3,strides=(1,1),activation='relu', data_format="channels_first")(conv2d)
        fltn = Flatten()(conv2d)
        v = Dense(512, activation='relu', name="dense_v1")(fltn)
        v = Dense(1, name="dense_v2")(v)
        adv = Dense(512, activation='relu', name="dense_adv1")(fltn)
        adv = Dense(self.num_actions, name="dense_adv2")(adv)
        y = concatenate([v,adv])
        l_output = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - tf.stop_gradient(K.mean(a[:,1:],keepdims=True)), output_shape=(self.num_actions,))(y)
        model = Model(input=l_input,output=l_output)

        s = tf.placeholder(tf.float32, [None, self.state_length, self.frame_width, self.frame_height])
        q_values = model(s)

        return s, q_values, model 
Example 4
Project: apex_dqn   Author: omurammm   File: test_agent.py    MIT License 6 votes vote down vote up
def build_network(self):
        l_input = Input(shape=(4,84,84))
        conv2d = Conv2D(32,8,strides=(4,4),activation='relu', data_format="channels_first")(l_input)
        conv2d = Conv2D(64,4,strides=(2,2),activation='relu', data_format="channels_first")(conv2d)
        conv2d = Conv2D(64,3,strides=(1,1),activation='relu', data_format="channels_first")(conv2d)
        fltn = Flatten()(conv2d)
        v = Dense(512, activation='relu', name="dense_v1")(fltn)
        v = Dense(1, name="dense_v2")(v)
        adv = Dense(512, activation='relu', name="dense_adv1")(fltn)
        adv = Dense(self.num_actions, name="dense_adv2")(adv)
        y = concatenate([v,adv])
        l_output = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - tf.stop_gradient(K.mean(a[:,1:],keepdims=True)), output_shape=(self.num_actions,))(y)
        model = Model(input=l_input,output=l_output)

        s = tf.placeholder(tf.float32, [None, self.state_length, self.frame_width, self.frame_height])
        q_values = model(s)

        return s, q_values, model 
Example 5
Project: apex_dqn   Author: omurammm   File: actor.py    MIT License 6 votes vote down vote up
def build_network(self):
        l_input = Input(shape=(4,84,84))
        conv2d = Conv2D(32,8,strides=(4,4),activation='relu', data_format="channels_first")(l_input)
        conv2d = Conv2D(64,4,strides=(2,2),activation='relu', data_format="channels_first")(conv2d)
        conv2d = Conv2D(64,3,strides=(1,1),activation='relu', data_format="channels_first")(conv2d)
        fltn = Flatten()(conv2d)
        v = Dense(512, activation='relu', name="dense_v1_"+str(self.num))(fltn)
        v = Dense(1, name="dense_v2_"+str(self.num))(v)
        adv = Dense(512, activation='relu', name="dense_adv1_"+str(self.num))(fltn)
        adv = Dense(self.num_actions, name="dense_adv2_"+str(self.num))(adv)
        y = concatenate([v,adv])
        l_output = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - tf.stop_gradient(K.mean(a[:,1:],keepdims=True)), output_shape=(self.num_actions,))(y)
        model = Model(input=l_input,output=l_output)

        s = tf.placeholder(tf.float32, [None, self.state_length, self.frame_width, self.frame_height])
        q_values = model(s)

        return s, q_values, model 
Example 6
Project: cnn-levelset   Author: wiseodd   File: localizer.py    MIT License 6 votes vote down vote up
def __init__(self, model_path=None):
        if model_path is not None:
            self.model = self.load_model(model_path)
        else:
            # VGG16 last conv features
            inputs = Input(shape=(7, 7, 512))
            x = Convolution2D(128, 1, 1)(inputs)
            x = Flatten()(x)

            # Cls head
            h_cls = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
            h_cls = Dropout(p=0.5)(h_cls)
            cls_head = Dense(20, activation='softmax', name='cls')(h_cls)

            # Reg head
            h_reg = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
            h_reg = Dropout(p=0.5)(h_reg)
            reg_head = Dense(4, activation='linear', name='reg')(h_reg)

            # Joint model
            self.model = Model(input=inputs, output=[cls_head, reg_head]) 
Example 7
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 6 votes vote down vote up
def cnn_2d(input_shape):

    model = Sequential()
    model.add(Conv2D(100, (3, 3), padding='valid', activation='relu', input_shape=input_shape))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Conv2D(200, (3, 3), padding='valid', activation='relu'))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Flatten())
    model.add(Dense(200, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(84, activation='relu'))
    model.add(Dense(nb_classes, activation='softmax'))

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model 
Example 8
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: zfnet_keras.py    MIT License 6 votes vote down vote up
def ZFNet():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(96, (7, 7), padding='valid', strides=2, activation='relu', name='conv1')(inputs)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Conv2D(256, (5, 5), padding='valid', strides=2, activation='relu', name='conv2')(x)
    x = keras.layers.ZeroPadding2D(1)(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv3')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv4')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv5')(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 9
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: lenet_keras.py    MIT License 6 votes vote down vote up
def LeNet():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(6, (5, 5), padding='valid', activation=None, name='conv1')(inputs)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Activation('sigmoid')(x)
    x = Conv2D(16, (5, 5), padding='valid', activation=None, name='conv2')(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Activation('sigmoid')(x)
    
    x = Flatten()(x)
    x = Dense(120, name='dense1', activation=None)(x)
    x = Dense(64, name='dense2', activation=None)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 10
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: alexnet_keras.py    MIT License 6 votes vote down vote up
def AlexNet():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(96, (11, 11), padding='valid', strides=4, activation='relu', name='conv1')(inputs)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Conv2D(256, (5, 5), padding='valid', activation='relu', name='conv2')(x)
    x = keras.layers.ZeroPadding2D(1)(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv3')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv4')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv5')(x)
    
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 11
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: cgan_cifar10_keras.py    MIT License 6 votes vote down vote up
def D_model():
    base = 32
    inputs = Input([img_height, img_width, channel + num_classes])
    x = Conv2D(base, (5, 5), padding='same', strides=(2,2), name='d_conv1',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(inputs)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(base*2, (5, 5), padding='same', strides=(2,2), name='d_conv2',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(base*4, (5, 5), padding='same', strides=(2,2), name='d_conv3',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(base*8, (5, 5), padding='same', strides=(2,2), name='d_conv4',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Flatten()(x)
    x = Dense(1, activation='sigmoid', name='d_out',
        kernel_initializer=RN(mean=0.0, stddev=0.02), bias_initializer=Constant())(x)
    model = Model(inputs=inputs, outputs=x, name='D')
    return model 
Example 12
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: dcgan_keras.py    MIT License 6 votes vote down vote up
def D_model():
    base = 32
    inputs = Input((img_height, img_width, channel))
    x = Conv2D(base, (5, 5), padding='same', strides=(2,2), name='d_conv1',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(inputs)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(base*2, (5, 5), padding='same', strides=(2,2), name='d_conv2',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(base*4, (5, 5), padding='same', strides=(2,2), name='d_conv3',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(base*8, (5, 5), padding='same', strides=(2,2), name='d_conv4',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Flatten()(x)
    x = Dense(1, activation='sigmoid', name='d_out',
        kernel_initializer=RN(mean=0.0, stddev=0.02), bias_initializer=Constant())(x)
    model = Model(inputs=inputs, outputs=x, name='D')
    return model 
Example 13
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: dcgan_cifar10_keras.py    MIT License 6 votes vote down vote up
def D_model():
    base = 32
    inputs = Input((img_height, img_width, channel))
    x = Conv2D(base, (5, 5), padding='same', strides=(2,2), name='d_conv1',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(inputs)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(base*2, (5, 5), padding='same', strides=(2,2), name='d_conv2',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(base*4, (5, 5), padding='same', strides=(2,2), name='d_conv3',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(base*8, (5, 5), padding='same', strides=(2,2), name='d_conv4',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Flatten()(x)
    x = Dense(1, activation='sigmoid', name='d_out',
        kernel_initializer=RN(mean=0.0, stddev=0.02), bias_initializer=Constant())(x)
    model = Model(inputs=inputs, outputs=x, name='D')
    return model 
Example 14
Project: Keras-GAN   Author: eriklindernoren   File: context_encoder.py    MIT License 6 votes vote down vote up
def build_discriminator(self):

        model = Sequential()

        model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.missing_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(256, kernel_size=3, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))
        model.summary()

        img = Input(shape=self.missing_shape)
        validity = model(img)

        return Model(img, validity) 
Example 15
Project: Keras-GAN   Author: eriklindernoren   File: ccgan.py    MIT License 6 votes vote down vote up
def build_discriminator(self):

        img = Input(shape=self.img_shape)

        model = Sequential()
        model.add(Conv2D(64, kernel_size=4, strides=2, padding='same', input_shape=self.img_shape))
        model.add(LeakyReLU(alpha=0.8))
        model.add(Conv2D(128, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        model.add(Conv2D(256, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())

        model.summary()

        img = Input(shape=self.img_shape)
        features = model(img)

        validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(features)

        label = Flatten()(features)
        label = Dense(self.num_classes+1, activation="softmax")(label)

        return Model(img, [validity, label]) 
Example 16
Project: Keras-GAN   Author: eriklindernoren   File: bigan.py    MIT License 6 votes vote down vote up
def build_encoder(self):
        model = Sequential()

        model.add(Flatten(input_shape=self.img_shape))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(self.latent_dim))

        model.summary()

        img = Input(shape=self.img_shape)
        z = model(img)

        return Model(img, z) 
Example 17
Project: Keras-GAN   Author: eriklindernoren   File: bigan.py    MIT License 6 votes vote down vote up
def build_discriminator(self):

        z = Input(shape=(self.latent_dim, ))
        img = Input(shape=self.img_shape)
        d_in = concatenate([z, Flatten()(img)])

        model = Dense(1024)(d_in)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.5)(model)
        model = Dense(1024)(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.5)(model)
        model = Dense(1024)(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.5)(model)
        validity = Dense(1, activation="sigmoid")(model)

        return Model([z, img], validity) 
Example 18
Project: Keras-GAN   Author: eriklindernoren   File: pixelda.py    MIT License 6 votes vote down vote up
def build_classifier(self):

        def clf_layer(layer_input, filters, f_size=4, normalization=True):
            """Classifier layer"""
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if normalization:
                d = InstanceNormalization()(d)
            return d

        img = Input(shape=self.img_shape)

        c1 = clf_layer(img, self.cf, normalization=False)
        c2 = clf_layer(c1, self.cf*2)
        c3 = clf_layer(c2, self.cf*4)
        c4 = clf_layer(c3, self.cf*8)
        c5 = clf_layer(c4, self.cf*8)

        class_pred = Dense(self.num_classes, activation='softmax')(Flatten()(c5))

        return Model(img, class_pred) 
Example 19
Project: Keras-GAN   Author: eriklindernoren   File: cogan.py    MIT License 6 votes vote down vote up
def build_discriminators(self):

        img1 = Input(shape=self.img_shape)
        img2 = Input(shape=self.img_shape)

        # Shared discriminator layers
        model = Sequential()
        model.add(Flatten(input_shape=self.img_shape))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))

        img1_embedding = model(img1)
        img2_embedding = model(img2)

        # Discriminator 1
        validity1 = Dense(1, activation='sigmoid')(img1_embedding)
        # Discriminator 2
        validity2 = Dense(1, activation='sigmoid')(img2_embedding)

        return Model(img1, validity1), Model(img2, validity2) 
Example 20
Project: Keras-GAN   Author: eriklindernoren   File: gan.py    MIT License 6 votes vote down vote up
def build_discriminator(self):

        model = Sequential()

        model.add(Flatten(input_shape=self.img_shape))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(1, activation='sigmoid'))
        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity) 
Example 21
Project: Keras-GAN   Author: eriklindernoren   File: bgan.py    MIT License 6 votes vote down vote up
def build_discriminator(self):

        model = Sequential()

        model.add(Flatten(input_shape=self.img_shape))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(1, activation='sigmoid'))
        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity) 
Example 22
Project: rogueinabox   Author: rogueinabox   File: models.py    GNU General Public License v3.0 6 votes vote down vote up
def build_model(self):
        initializer = initializers.random_normal(stddev=0.02)
        model = Sequential()
        if self.padding:
            model.add(ZeroPadding2D(padding=(1, 0), data_format="channels_first", input_shape=(self.layers, self.rows, self.columns)))
        model.add(Conv2D(32, (8, 8), activation="relu", data_format="channels_first",
                         strides=(4, 4), kernel_initializer=initializer, padding='same',
                         input_shape=(self.layers, self.rows, self.columns)))
        model.add(Conv2D(64, (4, 4), activation="relu", data_format="channels_first", strides=(2, 2),
                         kernel_initializer=initializer, padding='same'))
        model.add(Conv2D(64, (3, 3), activation="relu", data_format="channels_first", strides=(1, 1),
                         kernel_initializer=initializer, padding='same'))
        model.add(Flatten())
        model.add(Dense(512, activation="relu", kernel_initializer=initializer))
        model.add(Dense(self.actions_num, kernel_initializer=initializer))

        adam = Adam(lr=1e-6)
        model.compile(loss='mse', optimizer=adam)
        return model 
Example 23
Project: derplearning   Author: notkarol   File: line_train.py    MIT License 6 votes vote down vote up
def create_model(input_shape, n_output, n_blocks=2):
    model = Sequential()
    model.add(Conv2D(96, (5, 5), padding='same', input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Activation('elu'))
    model.add(MaxPooling2D(pool_size=2))

    for i in range(n_blocks):
        model.add(Conv2D(32, (3, 3), padding='same'))
        model.add(BatchNormalization())
        model.add(Activation('elu'))
        model.add(Conv2D(32, (3, 3), padding='same'))
        model.add(BatchNormalization())
        model.add(Activation('elu'))
        model.add(MaxPooling2D(pool_size=2))

    model.add(Flatten())
    model.add(Dense(100))
    model.add(Activation('elu'))
    model.add(Dense(n_output))
    
    return model 
Example 24
Project: mtrl-auto-uav   Author: brunapearson   File: mtrl_network.py    MIT License 5 votes vote down vote up
def create_model():
    #Create the convolutional stacks
    input_img = Input(shape=(224,224,3))

    x = Conv2D(16, kernel_size=3, activation='relu')(input_img)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Conv2D(32, kernel_size=3, activation='relu')(x)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Conv2D(64, kernel_size=3, activation='relu')(x)
    x = MaxPooling2D(pool_size=(2,2))(x)
    x = Flatten()(x)
    x = Dense(500, activation='relu')(x)
    x = Dropout(0.20)(x)
    x = Dense(100, activation='relu')(x)
    x = Dense(20, activation='relu')(x)

    n = Conv2D(16, kernel_size=3, activation='relu')(input_img)
    n = MaxPooling2D(pool_size=(2,2))(n)
    n = Conv2D(32, kernel_size=3, activation='relu')(n)
    n = MaxPooling2D(pool_size=(2,2))(n)
    n = Conv2D(64, kernel_size=3, activation='relu')(n)
    n = MaxPooling2D(pool_size=(2,2))(n)
    n = Flatten()(n)
    n = Dense(500, activation='relu')(n)
    #n = Dropout(0.50)(n)
    n = Dense(100, activation='relu')(n)
    n = Dense(20, activation='relu')(n)

    #output
    output_x = Dense(1, activation='linear', name='input_x')(n)
    output_y = Dense(1, activation='linear', name='input_y')(n)
    output_z = Dense(1, activation='linear', name='input_z')(n)

    output_qw = Dense(1, activation='linear', name='input_qw')(x)
    output_qx = Dense(1, activation='linear', name='input_qx')(x)
    output_qy = Dense(1, activation='linear', name='input_qy')(x)
    output_qz = Dense(1, activation='linear', name='input_qz')(x)


    model = Model(inputs=input_img, outputs=[output_x,output_y,output_z,output_qw,output_qx,output_qy,output_qz])
    return model 
Example 25
Project: VisualNN   Author: angelhunt   File: layers_export.py    GNU General Public License v3.0 5 votes vote down vote up
def dense(layer, layer_in, layerId, tensor=True):
    out = {}
    if (len(layer['shape']['input']) > 1):
        out[layerId + 'Flatten'] = Flatten()(*layer_in)
        layer_in = [out[layerId + 'Flatten']]
    units = layer['params']['num_output']
    if (layer['params']['weight_filler'] in fillerMap):
        kernel_initializer = fillerMap[layer['params']['weight_filler']]
    else:
        kernel_initializer = layer['params']['weight_filler']
    if (layer['params']['bias_filler'] in fillerMap):
        bias_initializer = fillerMap[layer['params']['bias_filler']]
    else:
        bias_initializer = layer['params']['bias_filler']
    # safety checks to avoid runtime errors
    kernel_regularizer = None
    bias_regularizer = None
    activity_regularizer = None
    kernel_constraint = None
    bias_constraint = None
    if 'kernel_regularizer' in layer['params']:
        kernel_regularizer = regularizerMap[layer['params']['kernel_regularizer']]
    if 'bias_regularizer' in layer['params']:
        bias_regularizer = regularizerMap[layer['params']['bias_regularizer']]
    if 'activity_regularizer' in layer['params']:
        activity_regularizer = regularizerMap[layer['params']
                                              ['activity_regularizer']]
    if 'kernel_constraint' in layer['params']:
        kernel_constraint = constraintMap[layer['params']['kernel_constraint']]
    if 'bias_constraint' in layer['params']:
        bias_constraint = constraintMap[layer['params']['bias_constraint']]
    use_bias = layer['params']['use_bias']
    out[layerId] = Dense(units=units, kernel_initializer=kernel_initializer,
                         kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer,
                         activity_regularizer=activity_regularizer, bias_constraint=bias_constraint,
                         kernel_constraint=kernel_constraint, use_bias=use_bias,
                         bias_initializer=bias_initializer)
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out 
Example 26
Project: VisualNN   Author: angelhunt   File: layers_export.py    GNU General Public License v3.0 5 votes vote down vote up
def flatten(layer, layer_in, layerId, tensor=True):
    out = {layerId: Flatten()}
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out 
Example 27
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Flatten(input_shape=(64, 10)))
        model.build()
        self.keras_type_test(model, 0, 'Flatten') 
Example 28
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Flatten']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = flatten(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Flatten') 
Example 29
Project: MODS_ConvNet   Author: santiagolopezg   File: hipster_net.py    MIT License 5 votes vote down vote up
def cifar():

    # Determine proper input shape
    K.set_image_dim_ordering('th')
    input_shape = (1, 256, 192)
    img_input = Input(shape=input_shape)

    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_1')(img_input)
    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x)

    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_1')(x)
    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_2')(x)
    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='conv2_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(x)

    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_1')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_2')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='conv3_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(x)

    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_1')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_2')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='conv4_3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(x)

    x = Flatten(name='flatten')(x)
    x = Dense(1000, activation='relu', name='fc1')(x)
    x = Dense(1000, activation='relu', name='fc2')(x)
    x = Dense(2, activation='softmax', name='pred')(x)

    # Create model.
    model = Model(img_input, x)

    #weights='MODS_keras_weights_3_he_normal_0.5_rmsprop_24.h5'
    #model.load_weights(weights)

    return model 
Example 30
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari_multi.py    MIT License 5 votes vote down vote up
def deepMindAtariNet(nbClasses, inputShape, includeTop=True):
        '''Set up the 3 conv layer keras model.
        classes: Number of outputs.
        inputShape: The input shape without the batch size.
        includeTop: If you only want the whole net, or just the convolutions.
        '''
        inp = Input(shape=inputShape)
        x = Conv2D(32, 8, 8, subsample=(4, 4), activation='relu', border_mode='same', name='conv1')(inp)
        x = Conv2D(64, 4, 4, subsample=(2, 2), activation='relu', border_mode='same', name='conv2')(x)
        x = Conv2D(64, 3, 3, activation='relu', border_mode='same', name='conv3')(x)
        if includeTop:
            x = Flatten(name='flatten')(x)
            x = Dense(512, activation='relu', name='dense1')(x)
            out = Dense(nbClasses, activation='softmax', name='output')(x)
        else:
            out = x
        model = Model(inp, out)
        return model 
Example 31
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari_multi.py    MIT License 5 votes vote down vote up
def setupModel(self):
        '''Setup models:
        self.actionModel is the action predictions.
        self.valueModel is the prediction of the value function.
        self.model is the model with both outputs
        '''
        inputShape = (self.D, self.D, self.nbImgInState)
        model = self.deepMindAtariNet(self.nbActionClasses, inputShape, includeTop=False)
        inp = Input(shape=inputShape)
        x = model(inp)
        x = Flatten()(x)
        x = Dense(512, activation='relu', name='dense1')(x)

        action = Dense(self.nbActionClasses, activation='softmax', name='action')(x)
        self.actionModel = Model(inp, action)
        # Should we compile model?

        value = Dense(1, activation='linear', name='value')(x)
        self.valueModel = Model(inp, value)
        # Should we compile model?

        self.model = Model(inp, [action, value])
        actionAndEntropyLoss = makeActionAndEntropyLossA3C(self.entropyBeta)
        loss = {'action': actionAndEntropyLoss, 'value': 'mse'}
        loss_weights = {'action': 1, 'value': self.mseBeta}
        optim = RMSprop(self.learningRate, self.decayRate)
        self.model.compile(optim, loss) # Need to make it possible to set other optimizers

        if self.resume:
            self.model.load_weights(self.modelFileName)
            return 
Example 32
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari.py    MIT License 5 votes vote down vote up
def setupModel(self):
        '''Setup models:
        self.actionModel is the action predictions.
        self.valueModel is the prediction of the value function.
        self.model is the model with both outputs
        '''
        if self.resume:
            self.model = load_model(self.modelFileName)
            # Need the other models as well...
            return
        inputShape = (self.D, self.D, self.nbImgInState)
        model = self.deepMindAtariNet(self.nbClasses, inputShape, includeTop=False)
        inp = Input(shape=inputShape)
        x = model(inp)
        x = Flatten()(x)
        x = Dense(512, activation='relu', name='dense1')(x)

        action = Dense(self.nbClasses, activation='softmax', name='action')(x)
        self.actionModel = Model(inp, action)
        # Should we compile model?

        value = Dense(1, activation='linear', name='value')(x)
        self.valueModel = Model(inp, value)
        # Should we compile model?

        self.model = Model(inp, [action, value])
        # loss = {'action': 'categorical_crossentropy', 'value': 'mse'}
        # loss = {'action': categoricalCrossentropyWithWeights, 'value': 'mse'}
        actionAndEntropyLoss = makeActionAndEntropyLossA3C(self.entropyBeta)
        loss = {'action': actionAndEntropyLoss, 'value': 'mse'}
        loss_weights = {'action': 1, 'value': self.mseBeta}
        optim = RMSprop(self.learningRate, self.decayRate)
        self.model.compile(optim, loss) # Need to make it possible to set other optimizers 
Example 33
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_a2c.py    MIT License 5 votes vote down vote up
def setupModel(self):
        '''Setup models:
        self.actionModel is the action predictions.
        self.valueModel is the prediction of the value function V. 
        self.model is the model with both outputs
        '''
        inputShape = (self.D, self.D, self.nbImgInState)

        inp = Input(shape=inputShape)
        x = Conv2D(32, 8, 8, subsample=(4, 4), activation='relu', border_mode='same', name='conv1')(inp)
        x = Conv2D(64, 4, 4, subsample=(2, 2), activation='relu', border_mode='same', name='conv2')(x)
        x = Conv2D(64, 3, 3, activation='relu', border_mode='same', name='conv3')(x)
        x = Flatten(name='flatten')(x)
        x = Dense(512, activation='relu', name='dense1')(x)

        action = Dense(self.nbActionClasses, activation='softmax', name='action')(x)
        self.actionModel = Model(inp, action)
        # Should we compile model?

        value = Dense(1, activation='linear', name='value')(x)
        self.valueModel = Model(inp, value)
        # Should we compile model?

        self.model = Model(inp, [action, value])

        actionAndEntropyLoss = makeActionAndEntropyLossA3C(self.entropyBeta)
        loss = {'action': actionAndEntropyLoss, 'value': 'mse'}
        loss_weights = {'action': 1, 'value': self.mseBeta}

        optim = RMSprop(self.learningRate, self.decayRate)
        self.model.compile(optim, loss) 

        if self.resume:
            self.model.load_weights(self.modelFileName)
            return 
Example 34
Project: oslodatascience-rl   Author: Froskekongen   File: erlenda_pong_parallel.py    MIT License 5 votes vote down vote up
def create_perc_model(input_dim,hidden_dim):
    inp=Input(shape=(80,80,1), dtype='float32', name='main_input')
    dd=Flatten()(inp)
    dd=Dense(hidden_dim,activation='relu')(dd)
    out=Dense(1,activation='sigmoid')(dd)
    return inp,out 
Example 35
Project: oslodatascience-rl   Author: Froskekongen   File: erlenda_pong_parallel.py    MIT License 5 votes vote down vote up
def create_conv_model(input_dim):
    inp=Input(shape=(80,80,1), dtype='float32', name='main_input')
    dd=Convolution2D(32,4,4,border_mode='same',activation='relu')(inp)
    dd=Convolution2D(32,4,4,border_mode='same',activation='relu')(dd)
    dd=Flatten()(dd)
    out=Dense(1,activation='sigmoid')(dd)
    return inp,out 
Example 36
Project: Jetson-RaceCar-AI   Author: ardamavi   File: get_model.py    Apache License 2.0 5 votes vote down vote up
def get_model():
    img_inputs = Input(shape=(500, 500, 1))
    lidar_inputs = Input(shape=(3,))

    conv_1 = Conv2D(32, (4,4), strides=(2,2))(img_inputs)

    conv_2 = Conv2D(32, (4,4), strides=(2,2))(conv_1)

    conv_3 = Conv2D(32, (3,3), strides=(1,1))(conv_2)
    act_3 = Activation('relu')(conv_3)

    pooling_1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(act_3)

    flat_1 = Flatten()(pooling_1)

    fc = Dense(32)(flat_1)

    lidar_fc = Dense(32)(lidar_inputs)

    concatenate_layer = concatenate([fc, lidar_fc])

    fc = Dense(10)(concatenate_layer)
    fc = Activation('relu')(fc)
    fc = Dropout(0.5)(fc)

    outputs = Dense(2)(fc)

    outputs = Activation('sigmoid')(outputs)


    model = Model(inputs=[img_inputs, lidar_inputs], outputs=[outputs])

    model.compile(loss='mse', optimizer='adadelta', metrics=['accuracy'])

    print(model.summary())

    return model 
Example 37
Project: phoneticSimilarity   Author: ronggong   File: models.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def createModel_schluter_valid(input, num_filter, height_filter, width_filter, filter_density, pool_n_row,
                pool_n_col, dropout):
    """
    original Schluter relu activation, no dropout
    :param input:
    :param num_filter:
    :param height_filter:
    :param width_filter:
    :param filter_density:
    :param pool_n_row:
    :param pool_n_col:
    :param dropout:
    :return:
    """

    x = ZeroPadding2D(padding=(0, int(width_filter/2)),  data_format="channels_first")(input)

    x = Conv2D(int(num_filter * filter_density), (height_filter, width_filter), padding="valid",
                       data_format="channels_first",
                       activation='relu')(x)

    output_shape = K.int_shape(x)

    if pool_n_row == 'all' and pool_n_col == 'all':
        x = MaxPooling2D(pool_size=(output_shape[2], output_shape[3]), padding='same', data_format="channels_first")(x)
    elif pool_n_row == 'all' and pool_n_col != 'all':
        x = MaxPooling2D(pool_size=(output_shape[2], pool_n_col), padding='same', data_format="channels_first")(x)
    elif pool_n_row != 'all' and pool_n_col == 'all':
        x = MaxPooling2D(pool_size=(pool_n_row, output_shape[3]), padding='same', data_format="channels_first")(x)
    else:
        x = MaxPooling2D(pool_size=(pool_n_row, pool_n_col), padding='same', data_format="channels_first")(x)
    x = Dropout(dropout)(x)
    x = Flatten()(x)

    return x 
Example 38
Project: SSD_keras_restnet   Author: hzm8341   File: SSD_resnet.py    MIT License 5 votes vote down vote up
def resnet_34(width,height,channel,classes):
    inpt = Input(shape=(width, height, channel))
    x = ZeroPadding2D((3, 3))(inpt)

    #conv1
    x = Conv2d_BN(x, nb_filter=64, kernel_size=(7, 7), strides=(2, 2), padding='valid')
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)

    #conv2_x
    x = identity_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=64, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=64, kernel_size=(3, 3))

    #conv3_x
    x = identity_Block(x, nb_filter=128, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
    x = identity_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=128, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=128, kernel_size=(3, 3))

    #conv4_x
    x = identity_Block(x, nb_filter=256, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
    x = identity_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=256, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=256, kernel_size=(3, 3))

    #conv5_x
    x = identity_Block(x, nb_filter=512, kernel_size=(3, 3), strides=(2, 2), with_conv_shortcut=True)
    x = identity_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = identity_Block(x, nb_filter=512, kernel_size=(3, 3))
    x = AveragePooling2D(pool_size=(7, 7))(x)
    x = Flatten()(x)
    x = Dense(classes, activation='softmax')(x)

    model = Model(inputs=inpt, outputs=x)
    return model 
Example 39
Project: SSD_keras_restnet   Author: hzm8341   File: SSD_resnet.py    MIT License 5 votes vote down vote up
def resnet_50(input_shape, num_classes=21):

    input_tensor = Input(shape=input_shape)
    
    x = ZeroPadding2D((3, 3))(input_tensor)
    x = Conv2d_BN(x, nb_filter=64, kernel_size=(7, 7), strides=(2, 2), padding='valid')
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)

    #conv2_x
    x = bottleneck_Block(x, nb_filters=[64,64,256],strides=(1,1),with_conv_shortcut=True)
    x = bottleneck_Block(x, nb_filters=[64,64,256])
    x = bottleneck_Block(x, nb_filters=[64,64,256])

    #conv3_x
    x = bottleneck_Block(x, nb_filters=[128, 128, 512],strides=(2,2),with_conv_shortcut=True)
    x = bottleneck_Block(x, nb_filters=[128, 128, 512])
    x = bottleneck_Block(x, nb_filters=[128, 128, 512])
    x = bottleneck_Block(x, nb_filters=[128, 128, 512])

    #conv4_x
    x = bottleneck_Block(x, nb_filters=[256, 256, 1024],strides=(2,2),with_conv_shortcut=True)
    x = bottleneck_Block(x, nb_filters=[256, 256, 1024])
    x = bottleneck_Block(x, nb_filters=[256, 256, 1024])
    x = bottleneck_Block(x, nb_filters=[256, 256, 1024])
    x = bottleneck_Block(x, nb_filters=[256, 256, 1024])
    x = bottleneck_Block(x, nb_filters=[256, 256, 1024])

    #conv5_x
    x = bottleneck_Block(x, nb_filters=[512, 512, 2048], strides=(2, 2), with_conv_shortcut=True)
    x = bottleneck_Block(x, nb_filters=[512, 512, 2048])
    x = bottleneck_Block(x, nb_filters=[512, 512, 2048])

    x = AveragePooling2D(pool_size=(7, 7))(x)
    x = Flatten()(x)
    x = Dense(classes, activation='softmax')(x)

    model = Model(inputs=inpt, outputs=x)
    return model 
Example 40
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: vgg16_keras.py    MIT License 5 votes vote down vote up
def VGG16():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_1')(inputs)
    x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_2')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_1')(x)
    x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_2')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_1')(x)
    x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_2')(x)
    x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_3')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_1')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_2')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_3')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_1')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_2')(x)
    x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_3')(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 41
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: easy_keras.py    MIT License 5 votes vote down vote up
def Mynet():
    inputs = Input((img_height, img_width, 3))
    x = inputs
    # block conv1
    for i in range(2):
        x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_{}'.format(i+1))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv2
    for i in range(2):
        x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_{}'.format(i+1))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv3
    for i in range(3):
        x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_{}'.format(i+1))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv4
    for i in range(3):
        x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_{}'.format(i+1))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv5
    for i in range(3):
        x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_{}'.format(i))(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 42
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: main_keras.py    MIT License 5 votes vote down vote up
def Mynet():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(32, (3, 3), padding='same', activation='relu', name='conv1_1')(inputs)
    x = BatchNormalization()(x)
    x = Conv2D(32, (3, 3), padding='same', activation='relu', name='conv1_2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Conv2D(64, (3, 3), padding='same', activation='relu', name='conv2_1')(x)
    x = BatchNormalization()(x)
    x = Conv2D(64, (3, 3), padding='same', activation='relu', name='conv2_2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Conv2D(128, (3, 3), padding='same', activation='relu', name='conv3_1')(x)
    x = BatchNormalization()(x)
    x = Conv2D(128, (3, 3), padding='same', activation='relu', name='conv3_2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv4_1')(x)
    x = BatchNormalization()(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv4_2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((2,2), padding='same')(x)
    x = Flatten()(x)
    x = Dense(1024, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 43
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: gan_keras.py    MIT License 5 votes vote down vote up
def D_model():
    inputs = Input((img_height, img_width, channel))
    x = Flatten()(inputs)
    x = Dense(512, name='d_dense1')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Dense(256, name='d_dense2')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Dense(1, activation='sigmoid', name='d_out')(x)
    model = Model(inputs, x, name='D')
    return model 
Example 44
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: gan_cifar10_keras.py    MIT License 5 votes vote down vote up
def D_model():
    inputs = Input((img_height, img_width, channel))
    base = 512
    x = Flatten()(inputs)
    x = Dense(base * 2, name='d_dense1')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Dense(base, name='d_dense2')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Dense(1, activation='sigmoid', name='d_out')(x)
    model = Model(inputs, x, name='D')
    return model 
Example 45
Project: DQN_Agent   Author: Fritz449   File: QNeuralNetwork.py    Apache License 2.0 5 votes vote down vote up
def create_model(self):
        my_init = 'glorot_uniform'
        # This is the place where neural network model initialized
        self.state_in = Input(self.state_dim)
        # self.state_inp = BatchNormalization()(self.state_in)
        self.l1 = Convolution2D(32, 8, 8, activation='relu', init=my_init, subsample=(4, 4), border_mode='same')(
            self.state_in)
        # self.l1bn = BatchNormalization()(self.l1)
        self.l2 = Convolution2D(64, 4, 4, activation='relu', init=my_init, subsample=(2, 2), border_mode='same')(
            self.l1)
        # self.l2bn = BatchNormalization()(self.l2)
        self.l3 = Convolution2D(64, 3, 3, activation='relu', init=my_init, subsample=(1, 1), border_mode='same')(
            self.l2)
        # self.l3bn = BatchNormalization()(self.l3)
        self.h = Flatten()(self.l3)

        if self.DUELING_ARCHITECTURE:
            self.hida = Dense(256, init=my_init, activation='relu')(self.h)
            self.hidv = Dense(256, init=my_init, activation='relu')(self.h)
            self.v = Dense(1)(self.hidv)
            self.a = Dense(self.action_dim)(self.hida)
            self.q = merge([self.a, self.v], mode='concat')
        else:
            self.hid = Dense(512, init=my_init, activation='relu')(self.h)
            self.q = Dense(self.action_dim, init=my_init)(self.hid)
        self.model = Model(self.state_in, self.q)

    # def create_model(self):
    #     # This is the place where neural network model initialized
    #     self.state_in = Input(self.state_dim)  # This layer is required for any network.
    #     if self.DUELING_ARCHITECTURE:
    #         self.hida = Dense(10, activation='relu')(self.state_in)
    #         self.hidv = Dense(10, activation='relu')(self.state_in)
    #         self.v = Dense(1)(self.hidv)
    #         self.a = Dense(self.action_dim)(self.hida)
    #         self.q = merge([self.a, self.v], mode='concat')
    #     else:
    #         self.hid = Dense(64, activation='relu', init='lecun_uniform')(self.state_in)
    #         self.q = Dense(self.action_dim, init='lecun_uniform')(self.hid)
    #     self.model = Model(self.state_in, self.q)  # Complete the model 
Example 46
Project: Keras-GAN   Author: eriklindernoren   File: sgan.py    MIT License 5 votes vote down vote up
def build_discriminator(self):

        model = Sequential()

        model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())

        model.summary()

        img = Input(shape=self.img_shape)

        features = model(img)
        valid = Dense(1, activation="sigmoid")(features)
        label = Dense(self.num_classes+1, activation="softmax")(features)

        return Model(img, [valid, label]) 
Example 47
Project: Keras-GAN   Author: eriklindernoren   File: cgan.py    MIT License 5 votes vote down vote up
def build_discriminator(self):

        model = Sequential()

        model.add(Dense(512, input_dim=np.prod(self.img_shape)))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.4))
        model.add(Dense(1, activation='sigmoid'))
        model.summary()

        img = Input(shape=self.img_shape)
        label = Input(shape=(1,), dtype='int32')

        label_embedding = Flatten()(Embedding(self.num_classes, np.prod(self.img_shape))(label))
        flat_img = Flatten()(img)

        model_input = multiply([flat_img, label_embedding])

        validity = model(model_input)

        return Model([img, label], validity) 
Example 48
Project: Keras-GAN   Author: eriklindernoren   File: infogan.py    MIT License 5 votes vote down vote up
def build_disk_and_q_net(self):

        img = Input(shape=self.img_shape)

        # Shared layers between discriminator and recognition network
        model = Sequential()
        model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(256, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(512, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Flatten())

        img_embedding = model(img)

        # Discriminator
        validity = Dense(1, activation='sigmoid')(img_embedding)

        # Recognition
        q_net = Dense(128, activation='relu')(img_embedding)
        label = Dense(self.num_classes, activation='softmax')(q_net)

        # Return discriminator and recognition network
        return Model(img, validity), Model(img, label) 
Example 49
Project: Keras-GAN   Author: eriklindernoren   File: wgan.py    MIT License 5 votes vote down vote up
def build_critic(self):

        model = Sequential()

        model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity) 
Example 50
Project: Keras-GAN   Author: eriklindernoren   File: wgan_gp.py    MIT License 5 votes vote down vote up
def build_critic(self):

        model = Sequential()

        model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity) 
Example 51
Project: Keras-GAN   Author: eriklindernoren   File: acgan.py    MIT License 5 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label))

        model_input = multiply([noise, label_embedding])
        img = model(model_input)

        return Model([noise, label], img) 
Example 52
Project: Keras-GAN   Author: eriklindernoren   File: acgan.py    MIT License 5 votes vote down vote up
def build_discriminator(self):

        model = Sequential()

        model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.summary()

        img = Input(shape=self.img_shape)

        # Extract feature representation
        features = model(img)

        # Determine validity and label of the image
        validity = Dense(1, activation="sigmoid")(features)
        label = Dense(self.num_classes, activation="softmax")(features)

        return Model(img, [validity, label]) 
Example 53
Project: Keras-GAN   Author: eriklindernoren   File: dcgan.py    MIT License 5 votes vote down vote up
def build_discriminator(self):

        model = Sequential()

        model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity) 
Example 54
Project: rogueinabox   Author: rogueinabox   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def build_model(self):
    
        initializer = initializers.random_normal(stddev=0.02)
    
        input_img = Input(shape=(self.layers, 22, 80))
        input_2 = Lambda(lambda x: x[:, 1:, :, :], output_shape=lambda x: (None, self.layers - 1, 22, 80))(input_img) # no map channel
    
        # whole map
        tower_1 = Conv2D(64, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same")(input_img)
        tower_1 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same")(tower_1)
        tower_1 = MaxPooling2D(pool_size=(22, 80), data_format="channels_first")(tower_1)
    
    
        #tower2
        tower_2 = MaxPooling2D(pool_size=(2, 2), data_format="channels_first")(input_2)
        for _ in range(self.depth):
            tower_2 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same", activation='relu')(tower_2)
        tower_2 = MaxPooling2D(pool_size=(11, 40), data_format="channels_first")(tower_2)
    
        #tower3
        tower_3 = MaxPooling2D(pool_size=(3, 6), data_format="channels_first", padding='same')(input_2)
        for _ in range(self.depth):
            tower_3 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same", activation='relu')(tower_3)
        tower_3 = MaxPooling2D(pool_size=(8, 14), data_format="channels_first", padding='same')(tower_3)
    
        merged_layers = concatenate([tower_1, tower_2, tower_3], axis=1)
    
        flat_layer = Flatten()(merged_layers)
        
        predictions = Dense(5, kernel_initializer=initializer)(flat_layer)
        model = Model(inputs=input_img, outputs=predictions)
        
        rmsprop = RMSprop(lr=0.00025)
        model.compile(loss='mse', optimizer=rmsprop)
        return model 
Example 55
Project: rogueinabox   Author: rogueinabox   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def build_model(self):
    
        initializer = initializers.random_normal(stddev=0.02)
    
        input_img = Input(shape=(self.layers, 22, 80))
        input_2 = Lambda(lambda x: x[:, :2, :, :], output_shape=lambda x: (None, 2, 22, 80))(input_img) # no map channel
    
        # whole map 10x1
        tower_1 = ZeroPadding2D(padding=(1, 0), data_format="channels_first")(input_2)
        tower_1 = Conv2D(32, (10, 1), data_format="channels_first", strides=(7, 1), kernel_initializer=initializer, padding="valid")(tower_1)
        tower_1 = Flatten()(tower_1)
    
        # whole map 1x10
        tower_2 = Conv2D(32, (1, 10), data_format="channels_first", strides=(1, 7), kernel_initializer=initializer, padding="valid")(input_2)
        tower_2 = Flatten()(tower_2)
    
        # whole map 3x3 then maxpool 22x80
        tower_3 = Conv2D(32, (3, 3), data_format="channels_first", strides=(1, 1), kernel_initializer=initializer, padding="same")(input_2)
        tower_3 = MaxPooling2D(pool_size=(22, 80), data_format="channels_first")(tower_3)
        tower_3 = Flatten()(tower_3)
    
        merged_layers = concatenate([tower_1, tower_2, tower_3], axis=1)
    
        predictions = Dense(4, kernel_initializer=initializer)(merged_layers)
        model = Model(inputs=input_img, outputs=predictions)
        
        adam = Adam(lr=1e-6)
        model.compile(loss='mse', optimizer=adam)
        return model 
Example 56
Project: TaiwanTrainVerificationCode2text   Author: linsamtw   File: load_model.py    Apache License 2.0 5 votes vote down vote up
def load_model():

    from keras.models import Model
    from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D
    
    tensor_in = Input((60, 200, 3))
    out = tensor_in
    out = Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(out)
    out = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(out)
    out = Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu')(out)
    out = Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = Conv2D(filters=256, kernel_size=(3, 3), activation='relu')(out)
    out = MaxPooling2D(pool_size=(2, 2))(out)
    out = Flatten()(out)
    out = Dropout(0.5)(out)
    out = [Dense(37, name='digit1', activation='softmax')(out),\
        Dense(37, name='digit2', activation='softmax')(out),\
        Dense(37, name='digit3', activation='softmax')(out),\
        Dense(37, name='digit4', activation='softmax')(out),\
        Dense(37, name='digit5', activation='softmax')(out),\
        Dense(37, name='digit6', activation='softmax')(out)]
    
    model = Model(inputs=tensor_in, outputs=out)
    
    # Define the optimizer
    model.compile(loss='categorical_crossentropy', optimizer='Adamax', metrics=['accuracy'])
    if 'Windows' in platform.platform():
        model.load_weights('{}\\cnn_weight\\verificatioin_code.h5'.format(PATH)) 
    else:
        model.load_weights('{}/cnn_weight/verificatioin_code.h5'.format(PATH)) 
    
    return model 
Example 57
Project: EUSIPCO2017   Author: Veleslavia   File: singlelayer.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def build_model(n_classes):

    if K.image_dim_ordering() == 'th':
        input_shape = (1, N_MEL_BANDS, SEGMENT_DUR)
        channel_axis = 1
    else:
        input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1)
        channel_axis = 3
    melgram_input = Input(shape=input_shape)

    m_sizes = [50, 70]
    n_sizes = [1, 3, 5]
    n_filters = [128, 64, 32]
    maxpool_const = 4

    layers = list()

    for m_i in m_sizes:
        for i, n_i in enumerate(n_sizes):
            x = Convolution2D(n_filters[i], m_i, n_i,
                              border_mode='same',
                              init='he_normal',
                              W_regularizer=l2(1e-5),
                              name=str(n_i)+'_'+str(m_i)+'_'+'conv')(melgram_input)
            x = BatchNormalization(axis=channel_axis, mode=0, name=str(n_i)+'_'+str(m_i)+'_'+'bn')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(N_MEL_BANDS, SEGMENT_DUR/maxpool_const), name=str(n_i)+'_'+str(m_i)+'_'+'pool')(x)
            x = Flatten(name=str(n_i)+'_'+str(m_i)+'_'+'flatten')(x)
            layers.append(x)

    x = merge(layers, mode='concat', concat_axis=channel_axis)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, init='he_normal', W_regularizer=l2(1e-5), activation='softmax', name='prediction')(x)
    model = Model(melgram_input, x)

    return model 
Example 58
Project: DeepCCS   Author: plpla   File: DeepCCS.py    GNU General Public License v3.0 5 votes vote down vote up
def create_model(self):
        """
            Builds a neural net using a set of arguments
            """
        if len(self.smiles_encoder.converter) == 0 or len(self.adduct_encoder.converter) ==  0:
            raise ValueError("Encoders must be fit before creating a model.")
        smile_input_layer = Input(shape=(250, len(self.smiles_encoder.converter)), name="smile")
        conv = Conv1D(64, kernel_size=4, activation='relu', kernel_initializer='normal')(smile_input_layer)

        previous = conv
        for i in range(6):
            conv = Conv1D(64, kernel_size=4, activation='relu', kernel_initializer='normal')(previous)
            if i == 5:
                pool = MaxPooling1D(pool_size=2, strides=2)(conv)
            else:
                pool = MaxPooling1D(pool_size=2, strides=1)(conv)
            previous = pool

        flat = Flatten()(previous)
        adduct_input_layer = Input(shape=(len(self.adduct_encoder.converter),), name="adduct")
        remix_layer = keras.layers.concatenate([flat, adduct_input_layer], axis=-1)

        previous = remix_layer
        for i in range(2):
            dense_layer = Dense(384, activation="relu", kernel_initializer='normal')(previous)
            previous = dense_layer

        output = Dense(1, activation="linear")(previous)

        opt = getattr(keras.optimizers, 'adam')
        opt = opt(lr=0.0001)
        model = Model(input=[smile_input_layer, adduct_input_layer], outputs=output)
        model.compile(optimizer=opt, loss='mean_squared_error')

        self.model = model 
Example 59
Project: ocr_svc   Author: daveshap   File: keras_alphanumeric_model.py    MIT License 5 votes vote down vote up
def instantiate_model():
    print('COMPILING MODEL')
    model = Sequential()
    model.add(Convolution2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', input_shape=input_shape))
    model.add(Convolution2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    print(model.summary())
    return model 
Example 60
Project: keras_tfrecord   Author: indraforyou   File: mnist_tfrecord.py    MIT License 5 votes vote down vote up
def arch(inp):
  con1 = Convolution2D(32, 3, 3, border_mode='valid', activation = 'relu', subsample=(2,2))
  con2 = Convolution2D(32, 3, 3, activation = 'relu', subsample=(2,2))
  fla1 = Flatten()
  den1 = Dense(128, activation = 'relu')
  den2 = Dense(nb_classes, activation = 'softmax')
  out = den2(den1(fla1(con2(con1(inp)))))

  # fla1 = Flatten()
  # den1 = Dense(128, activation = 'relu')
  # den2 = Dense(128, activation = 'relu')
  # den3 = Dense(nb_classes, activation = 'softmax')
  # out = den3(den2(den1(fla1(inp))))

  return out 
Example 61
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo3.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 8, 8,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 6, 6,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 4, 4,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 2, 2,init=weight_init, name='conv1_4'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 208, out 104
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 8, 8,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 6, 6,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 4, 4,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 2, 2,init=weight_init, name='conv2_4'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 88, out is 44 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(220, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 62
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_three.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 63
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 5, 5,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 116, out 58
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 52, out is 26 
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  # in is 20, out is 10 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(10, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 64
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_two.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(16, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(16, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 65
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_three.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 66
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo2.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 7, 7,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 5, 5,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 212, out 106
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 7, 7,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 5, 5,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 94, out is 47 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(220, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 67
Project: Scene-Understanding   Author: foamliu   File: vgg16.py    MIT License 4 votes vote down vote up
def vgg16_model(img_rows, img_cols, channel=3):
    model = Sequential()
    # Encoder
    model.add(ZeroPadding2D((1, 1), input_shape=(img_rows, img_cols, channel), name='input'))
    model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # Add Fully Connected Layer
    model.add(Flatten(name='flatten'))
    model.add(Dense(4096, activation='relu', name='dense1'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu', name='dense2'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax', name='softmax'))

    # Loads ImageNet pre-trained data
    weights_path = 'models/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
    model.load_weights(weights_path)

    return model 
Example 68
Project: phoneticSimilarity   Author: ronggong   File: models.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def jan_original(filter_density, dropout, input_shape, batchNorm=False, dense_activation='relu', channel=1):
    if channel == 1:
        reshape_dim = (1, input_shape[0], input_shape[1])
        channel_order = 'channels_first'
    else:
        reshape_dim = input_shape
        channel_order = 'channels_last'

    model_1 = Sequential()

    if batchNorm:
        model_1.add(BatchNormalization(axis=1, input_shape=reshape_dim))

    model_1.add(Conv2D(int(10 * filter_density), (3, 7), padding="valid",
                       input_shape=reshape_dim,
                       data_format=channel_order, activation='relu'))
    model_1.add(MaxPooling2D(pool_size=(3, 1), padding='valid', data_format=channel_order))

    model_1.add(Conv2D(int(20 * filter_density), (3, 3), padding="valid",
                       data_format=channel_order, activation='relu'))
    model_1.add(MaxPooling2D(pool_size=(3, 1), padding='valid', data_format=channel_order))

    if dropout:
        model_1.add(Dropout(dropout))  # test Schluter dataset, comment in jingju dataset

    model_1.add(Flatten())

    model_1.add(Dense(units=256, activation=dense_activation))
    # model_1.add(ELU())

    if dropout:
        model_1.add(Dropout(dropout))

    model_1.add(Dense(1, activation='sigmoid'))
    # model_1.add(Activation("softmax"))

    # optimizer = SGD(lr=0.05, momentum=0.45, decay=0.0, nesterov=False)
    optimizer = Adam()

    model_1.compile(loss='binary_crossentropy',
                    optimizer=optimizer,
                    metrics=['accuracy'])

    print(model_1.summary())

    return model_1 
Example 69
Project: ndparse   Author: neurodata   File: nddl.py    Apache License 2.0 4 votes vote down vote up
def ciresan_n3(n=65, nOutput=2):
    """An approximation of the N3 network from [1].
    Note that we also made a few small modifications along the way
    (from Theano to caffe and now to tensorflow/keras).

    As of this writing, no serious attempt has been made to optimize
    hyperparameters or structure of this network.

    Parameters:
       n : The tile size (diameter) to use in the sliding window.
           Tiles are assumed to be square, hence only one parameter.

    [1] Ciresan et al 'Deep neural networks segment neuronal membranes in
        electron microscopy images,' NIPS 2012.
    """

    from keras.optimizers import SGD
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Activation, Flatten
    from keras.layers import Convolution2D, MaxPooling2D
    from keras.layers.normalization import BatchNormalization


    model = Sequential()

    # input: nxn images with 1 channel -> (1, n, n) tensors.
    # this applies 48 convolution filters of size 5x5 each.
    model.add(Convolution2D(48, 5, 5, border_mode='valid', dim_ordering='th', input_shape=(1, n, n)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    model.add(BatchNormalization())  # note: we used LRN previously...

    model.add(Convolution2D(48, 5, 5))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    model.add(BatchNormalization())  # note: we used LRN previously...
    #model.add(Dropout(0.25))

    model.add(Convolution2D(48, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))

    model.add(Flatten())
    # Note: Keras does automatic shape inference.
    model.add(Dense(200))
    model.add(Activation('relu'))
    #model.add(Dropout(0.5))

    model.add(Dense(nOutput))  # use 2 for binary classification
    model.add(Activation('softmax'))

    return model


#-------------------------------------------------------------------------------
#  Code for training a deep learning network
#------------------------------------------------------------------------------- 
Example 70
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 4 votes vote down vote up
def cnn_3d(input_shape):

    model = Sequential()
    model.add(Conv3D(16, kernel_size=(3, 3, 20), strides=(1, 1, 10), padding='valid', kernel_regularizer=l2(REG_lambda), input_shape=input_shape))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(16, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 3)))

    model.add(Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 3)))

    model.add(Conv3D(64, kernel_size=(2, 2, 2), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(64, kernel_size=(2, 2, 2), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2)))

    model.add(Flatten())
    model.add(Dense(128))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes, activation='softmax'))

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model


# 2D-CNN model 
Example 71
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: res18_keras.py    MIT License 4 votes vote down vote up
def Res18():

    def ResBlock(x, in_f, out_f, stride=1, name="res"):
        res_x = Conv2D(out_f, [3, 3], strides=stride, padding='same', activation=None, name=name+"_conv1")(x)
        res_x = BatchNormalization(name=name+"_bn1")(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(out_f, [3, 3], strides=1, padding='same', activation=None, name=name+"_conv2")(res_x)
        res_x = BatchNormalization(name=name+"_bn2")(res_x)
        res_x = Activation("relu")(res_x)

        if in_f != out_f:
            x = Conv2D(out_f, [1, 1], strides=1, padding="same", activation=None, name=name+"_conv_sc")(x)
            x = BatchNormalization(name=name+"_bn_sc")(x)
            x = Activation("relu")(x)

        if stride == 2:
            x = MaxPooling2D([2, 2], strides=2, padding="same")(x)
        
        x = Add()([res_x, x])
        x = Activation("relu")(x)

        return x
        
    
    inputs = Input((img_height, img_width, channel))
    x = inputs
    
    x = Conv2D(64, [7, 7], strides=2, padding='same', activation=None, name="conv1")(x)
    x = BatchNormalization(name="bn1")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D([3, 3], strides=2, padding='same')(x)

    x = ResBlock(x, 64, 64, name="res2_1")
    x = ResBlock(x, 64, 64, name="res2_2")

    x = ResBlock(x, 64, 128, stride=2, name="res3_1")
    x = ResBlock(x, 128, 128, name="res3_2")

    x = ResBlock(x, 128, 256, stride=2, name="res4_1")
    x = ResBlock(x, 256, 256, name="res4_2")

    x = ResBlock(x, 256, 512, stride=2, name="res5_1")
    x = ResBlock(x, 512, 512, name="res5_2")

    x = AveragePooling2D([7, 7], strides=1, padding='valid')(x)
    x = Flatten()(x)
    x = Dense(num_classes, activation='softmax', name="fc")(x)

    model = Model(inputs=inputs, outputs=x)

    return model 
Example 72
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: res101_keras.py    MIT License 4 votes vote down vote up
def Res101():

    def ResBlock(x, in_f, f_1, out_f, stride=1, name="res"):
        res_x = Conv2D(f_1, [1, 1], strides=stride, padding='same', activation=None, name=name+"_conv1")(x)
        res_x = BatchNormalization(name=name+"_bn1")(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(f_1, [3, 3], strides=1, padding='same', activation=None, name=name+"_conv2")(res_x)
        res_x = BatchNormalization(name=name+"_bn2")(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(out_f, [1, 1], strides=1, padding='same', activation=None, name=name+"_conv3")(res_x)
        res_x = BatchNormalization(name=name+"_bn3")(res_x)
        res_x = Activation("relu")(res_x)

        if in_f != out_f:
            x = Conv2D(out_f, [1, 1], strides=1, padding="same", activation=None, name=name+"_conv_sc")(x)
            x = BatchNormalization(name=name+"_bn_sc")(x)
            x = Activation("relu")(x)

        if stride == 2:
            x = MaxPooling2D([2, 2], strides=2, padding="same")(x)
        
        x = Add()([res_x, x])
        x = Activation("relu")(x)

        return x
        
    
    inputs = Input((img_height, img_width, channel))
    x = inputs
    
    x = Conv2D(64, [7, 7], strides=2, padding='same', activation=None, name="conv1")(x)
    x = BatchNormalization(name="bn1")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D([3, 3], strides=2, padding='same')(x)

    x = ResBlock(x, 64, 64, 256, name="res2_1")
    x = ResBlock(x, 256, 64, 256, name="res2_2")
    x = ResBlock(x, 256, 64, 256, name="res2_3")

    x = ResBlock(x, 256, 128, 512, stride=2, name="res3_1")
    x = ResBlock(x, 512, 128, 512, name="res3_2")
    x = ResBlock(x, 512, 128, 512, name="res3_3")
    x = ResBlock(x, 512, 128, 512, name="res3_4")

    x = ResBlock(x, 512, 256, 1024, stride=2, name="res4_1")
    for i in range(22):
        x = ResBlock(x, 1024, 256, 1024, name="res4_{}".format(i+2))

    x = ResBlock(x, 1024, 512, 2048, stride=2, name="res5_1")
    x = ResBlock(x, 2048, 256, 2048, name="res5_2")
    x = ResBlock(x, 2048, 256, 2048, name="res5_3")

    x = AveragePooling2D([img_height // 32, img_width // 32], strides=1, padding='valid')(x)
    x = Flatten()(x)
    x = Dense(num_classes, activation='softmax', name="fc")(x)

    model = Model(inputs=inputs, outputs=x)

    return model 
Example 73
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: res34_keras.py    MIT License 4 votes vote down vote up
def Res34():

    def ResBlock(x, in_f, out_f, stride=1, name="res"):
        res_x = Conv2D(out_f, [3, 3], strides=stride, padding='same', activation=None, name=name+"_conv1")(x)
        res_x = BatchNormalization(name=name+"_bn1")(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(out_f, [3, 3], strides=1, padding='same', activation=None, name=name+"_conv2")(res_x)
        res_x = BatchNormalization(name=name+"_bn2")(res_x)
        res_x = Activation("relu")(res_x)

        if in_f != out_f:
            x = Conv2D(out_f, [1, 1], strides=1, padding="same", activation=None, name=name+"_conv_sc")(x)
            x = BatchNormalization(name=name+"_bn_sc")(x)
            x = Activation("relu")(x)

        if stride == 2:
            x = MaxPooling2D([2, 2], strides=2, padding="same")(x)
        
        x = Add()([res_x, x])
        x = Activation("relu")(x)

        return x
        
    
    inputs = Input((img_height, img_width, channel))
    x = inputs
    
    x = Conv2D(64, [7, 7], strides=2, padding='same', activation=None, name="conv1")(x)
    x = BatchNormalization(name="bn1")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D([3, 3], strides=2, padding='same')(x)

    x = ResBlock(x, 64, 64, name="res2_1")
    x = ResBlock(x, 64, 64, name="res2_2")
    x = ResBlock(x, 64, 64, name="res2_3")

    x = ResBlock(x, 64, 128, stride=2, name="res3_1")
    x = ResBlock(x, 128, 128, name="res3_2")
    x = ResBlock(x, 128, 128, name="res3_3")
    x = ResBlock(x, 128, 128, name="res3_4")

    x = ResBlock(x, 128, 256, stride=2, name="res4_1")
    x = ResBlock(x, 256, 256, name="res4_2")
    x = ResBlock(x, 256, 256, name="res4_3")
    x = ResBlock(x, 256, 256, name="res4_4")
    x = ResBlock(x, 256, 256, name="res4_5")
    x = ResBlock(x, 256, 256, name="res4_6")

    x = ResBlock(x, 256, 512, stride=2, name="res5_1")
    x = ResBlock(x, 512, 512, name="res5_2")
    x = ResBlock(x, 512, 512, name="res5_3")

    x = AveragePooling2D([img_height // 32, img_width // 32], strides=1, padding='valid')(x)
    x = Flatten()(x)
    x = Dense(num_classes, activation='softmax', name="fc")(x)

    model = Model(inputs=inputs, outputs=x)

    return model 
Example 74
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: bn_keras.py    MIT License 4 votes vote down vote up
def VGG16():
    inputs = Input((img_height, img_width, 3))
    x = inputs
    # block conv1
    for i in range(2):
        x = Conv2D(64, (3, 3), padding='same', strides=1, activation='relu', name='conv1_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv2
    for i in range(2):
        x = Conv2D(128, (3, 3), padding='same', strides=1, activation='relu', name='conv2_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv3
    for i in range(3):
        x = Conv2D(256, (3, 3), padding='same', strides=1, activation='relu', name='conv3_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv4
    for i in range(3):
        x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv4_{}'.format(i+1))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    # block conv5
    for i in range(3):
        x = Conv2D(512, (3, 3), padding='same', strides=1, activation='relu', name='conv5_{}'.format(i))(x)
        x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2), strides=2,  padding='same')(x)
    
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 75
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: res152_keras.py    MIT License 4 votes vote down vote up
def Res152():

    def ResBlock(x, in_f, f_1, out_f, stride=1, name="res"):
        res_x = Conv2D(f_1, [1, 1], strides=stride, padding='same', activation=None, name=name+"_conv1")(x)
        res_x = BatchNormalization(name=name+"_bn1")(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(f_1, [3, 3], strides=1, padding='same', activation=None, name=name+"_conv2")(res_x)
        res_x = BatchNormalization(name=name+"_bn2")(res_x)
        res_x = Activation("relu")(res_x)

        res_x = Conv2D(out_f, [1, 1], strides=1, padding='same', activation=None, name=name+"_conv3")(res_x)
        res_x = BatchNormalization(name=name+"_bn3")(res_x)
        res_x = Activation("relu")(res_x)

        if in_f != out_f:
            x = Conv2D(out_f, [1, 1], strides=1, padding="same", activation=None, name=name+"_conv_sc")(x)
            x = BatchNormalization(name=name+"_bn_sc")(x)
            x = Activation("relu")(x)

        if stride == 2:
            x = MaxPooling2D([2, 2], strides=2, padding="same")(x)
        
        x = Add()([res_x, x])
        x = Activation("relu")(x)

        return x
        
    
    inputs = Input((img_height, img_width, channel))
    x = inputs
    
    x = Conv2D(64, [7, 7], strides=2, padding='same', activation=None, name="conv1")(x)
    x = BatchNormalization(name="bn1")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D([3, 3], strides=2, padding='same')(x)

    x = ResBlock(x, 64, 64, 256, name="res2_1")
    x = ResBlock(x, 256, 64, 256, name="res2_2")
    x = ResBlock(x, 256, 64, 256, name="res2_3")

    x = ResBlock(x, 256, 128, 512, stride=2, name="res3_1")
    for i in range(7):
        x = ResBlock(x, 512, 128, 512, name="res3_{}".format(i+2))

    x = ResBlock(x, 512, 256, 1024, stride=2, name="res4_1")
    for i in range(35):
        x = ResBlock(x, 1024, 256, 1024, name="res4_{}".format(i+2))

    x = ResBlock(x, 1024, 512, 2048, stride=2, name="res5_1")
    x = ResBlock(x, 2048, 256, 2048, name="res5_2")
    x = ResBlock(x, 2048, 256, 2048, name="res5_3")

    x = AveragePooling2D([img_height // 32, img_width // 32], strides=1, padding='valid')(x)
    x = Flatten()(x)
    x = Dense(num_classes, activation='softmax', name="fc")(x)

    model = Model(inputs=inputs, outputs=x)

    return model 
Example 76
Project: 3DGCN   Author: blackmints   File: model.py    MIT License 4 votes vote down vote up
def model_3DGCN(hyper):
    # Kipf adjacency, neighborhood mixing
    num_atoms = hyper["num_atoms"]
    num_features = hyper["num_features"]
    units_conv = hyper["units_conv"]
    units_dense = hyper["units_dense"]
    num_layers = hyper["num_layers"]
    std = hyper["data_std"]
    loss = hyper["loss"]
    task = hyper["task"]
    pooling = hyper["pooling"]
    outputs = hyper["outputs"]

    atoms = Input(name='atom_inputs', shape=(num_atoms, num_features))
    adjms = Input(name='adjm_inputs', shape=(num_atoms, num_atoms))
    dists = Input(name='coor_inputs', shape=(num_atoms, num_atoms, 3))

    sc, vc = GraphEmbed()([atoms, dists])

    for _ in range(num_layers):
        sc_s = GraphSToS(units_conv, activation='relu')(sc)
        sc_v = GraphVToS(units_conv, activation='relu')([vc, dists])

        vc_s = GraphSToV(units_conv, activation='tanh')([sc, dists])
        vc_v = GraphVToV(units_conv, activation='tanh')(vc)

        sc = GraphConvS(units_conv, pooling='sum', activation='relu')([sc_s, sc_v, adjms])
        vc = GraphConvV(units_conv, pooling='sum', activation='tanh')([vc_s, vc_v, adjms])

    sc, vc = GraphGather(pooling=pooling)([sc, vc])
    sc_out = Dense(units_dense, activation='relu', kernel_regularizer=l2(0.005))(sc)
    sc_out = Dense(units_dense, activation='relu', kernel_regularizer=l2(0.005))(sc_out)

    vc_out = TimeDistributed(Dense(units_dense, activation='relu', kernel_regularizer=l2(0.005)))(vc)
    vc_out = TimeDistributed(Dense(units_dense, activation='relu', kernel_regularizer=l2(0.005)))(vc_out)
    vc_out = Flatten()(vc_out)

    out = Concatenate(axis=-1)([sc_out, vc_out])

    if task == "regression":
        out = Dense(outputs, activation='linear', name='output')(out)
        model = Model(inputs=[atoms, adjms, dists], outputs=out)
        model.compile(optimizer=Adam(lr=0.001), loss=loss, metrics=[std_mae(std=std), std_rmse(std=std)])
    elif task == "binary":
        out = Dense(outputs, activation='sigmoid', name='output')(out)
        model = Model(inputs=[atoms, adjms, dists], outputs=out)
        model.compile(optimizer=Adam(lr=0.001), loss=loss)
    elif task == "classification":
        out = Dense(outputs, activation='softmax', name='output')(out)
        model = Model(inputs=[atoms, adjms, dists], outputs=out)
        model.compile(optimizer=Adam(lr=0.001), loss=loss)
    else:
        raise ValueError("Unsupported task on model generation.")

    return model 
Example 77
Project: chartAnalyst   Author: huima58   File: chart_analyst.py    Apache License 2.0 4 votes vote down vote up
def predict(train_imgs, train_labels, test_imgs, test_labels, x_pix_num=x_pix_num_default, y_pix_num=y_pix_num_default,
            use_saved_weights=False, weights_file_name=''):
    model = Sequential()
    # use partial VGG16 model
    model.add(ZeroPadding2D((1, 1), input_shape=(1, y_pix_num, x_pix_num)))
    
    base_filter_num = 64
    model.add(Convolution2D(base_filter_num, 3, 3, activation='relu', name='conv1_1'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))  # added this layer to reduce the input size
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(base_filter_num, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(base_filter_num * 2, 3, 3, activation='relu', name='conv2_1'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(base_filter_num *2, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(128, init='uniform', activation='tanh'))
    model.add(Dropout(0.25))
    model.add(Dense(64, init='uniform', activation='tanh'))
    model.add(Dense(3, init='uniform', activation='softmax'))
    
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adadelta',
                  metrics=['accuracy'])
    
    if use_saved_weights:
        model.load_weights(weights_file_name)  #need to install h5py
    else:
        start_time = datetime.today()
        checkpointer = ModelCheckpoint(filepath=weights_file_name, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
        earlyStopping = EarlyStopping(monitor='val_acc', patience=10, verbose=1, mode='max')
        model.fit(train_imgs, train_labels,
                        nb_epoch=30,
                        verbose=1,
                        batch_size=70,
                        validation_split=0.1,
                        callbacks=[checkpointer, earlyStopping])
        model.load_weights(weights_file_name)
        end_time = datetime.today()
        print "----trained time is from " + str(start_time) + " to " + str(end_time)
        
    predict_rst = model.predict_proba(test_imgs, verbose=0)
    return predict_rst 
Example 78
Project: TaiwanTrainVerificationCode2text   Author: linsamtw   File: build_verification_code_cnn_model.py    Apache License 2.0 4 votes vote down vote up
def train_verification_model(self):

        def build_cnn_model():

            tensor_in = Input((60, 200, 3))
            tensor_out = tensor_in
            tensor_out = Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(tensor_out)
            tensor_out = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(tensor_out)
            tensor_out = MaxPooling2D(pool_size=(2, 2))(tensor_out)
            tensor_out = Dropout(0.25)(tensor_out)
            
            tensor_out = Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(tensor_out)
            tensor_out = Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(tensor_out)
            tensor_out = MaxPooling2D(pool_size=(2, 2))(tensor_out)
            tensor_out = Dropout(0.25)(tensor_out)
            
            tensor_out = Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu')(tensor_out)
            tensor_out = Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(tensor_out)
            tensor_out = MaxPooling2D(pool_size=(2, 2))(tensor_out)
            tensor_out = Dropout(0.25)(tensor_out)
            
            tensor_out = Conv2D(filters=256, kernel_size=(3, 3), activation='relu')(tensor_out)
            tensor_out = MaxPooling2D(pool_size=(2, 2))(tensor_out)
            
            Dense(1024, activation = "relu")            
            
            tensor_out = Flatten()(tensor_out)
            tensor_out = Dropout(0.5)(tensor_out)
            tensor_out = [Dense(37, name='digit1', activation='softmax')(tensor_out),\
                Dense(37, name='digit2', activation='softmax')(tensor_out),\
                Dense(37, name='digit3', activation='softmax')(tensor_out),\
                Dense(37, name='digit4', activation='softmax')(tensor_out),\
                Dense(37, name='digit5', activation='softmax')(tensor_out),\
                Dense(37, name='digit6', activation='softmax')(tensor_out)]
            model = Model(inputs=tensor_in, outputs=tensor_out)
            

            return model

        model = build_cnn_model()
        #===============================================================
        optimizer = RMSprop(lr=1e-3, rho=0.8, epsilon=1e-08, decay=0.0)
        # Adamax
        # Define the optimizer
        model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
        # model.summary()

        history = model.fit(self.train_data,self.train_labels, 
                            batch_size = 512, epochs=20, verbose=1, 
                            validation_data=(self.test_data,self.test_labels) )
        
        self.model = model
        self.history = history
        ( self.train_correct3 , self.test_correct3, 
          self.train_final_score, self.test_final_score ) = self.compare_val_train_error()
#------------------------------------------------------------------- 
Example 79
Project: EUSIPCO2017   Author: Veleslavia   File: multilayer.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def build_model(n_classes):

    if K.image_dim_ordering() == 'th':
        input_shape = (1, N_MEL_BANDS, SEGMENT_DUR)
        channel_axis = 1
    else:
        input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1)
        channel_axis = 3
    melgram_input = Input(shape=input_shape)

    maxpool_const = 4
    m_sizes = [5, 80]
    n_sizes = [1, 3, 5]
    n_filters = [128, 64, 32]

    layers = list()

    for m_i in m_sizes:
        for i, n_i in enumerate(n_sizes):
            x = Convolution2D(n_filters[i], m_i, n_i,
                              border_mode='same',
                              init='he_normal',
                              W_regularizer=l2(1e-5),
                              name=str(n_i)+'_'+str(m_i)+'_'+'conv')(melgram_input)
            x = BatchNormalization(axis=channel_axis, mode=0, name=str(n_i)+'_'+str(m_i)+'_'+'bn')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(N_MEL_BANDS/maxpool_const, SEGMENT_DUR/maxpool_const),
                             name=str(n_i)+'_'+str(m_i)+'_'+'pool')(x)
            layers.append(x)

    x = merge(layers, mode='concat', concat_axis=channel_axis)

    x = Dropout(0.25)(x)
    x = Convolution2D(128, 3, 3, init='he_normal', W_regularizer=l2(1e-5), border_mode='same', name='conv2')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn2')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x)

    x = Dropout(0.25)(x)
    x = Convolution2D(128, 3, 3, init='he_normal', W_regularizer=l2(1e-5), border_mode='same', name='conv3')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn3')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool3')(x)

    x = Flatten(name='flatten')(x)
    x = Dropout(0.5)(x)
    x = Dense(256, init='he_normal', W_regularizer=l2(1e-5), name='fc1')(x)
    x = ELU()(x)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, init='he_normal', W_regularizer=l2(1e-5), activation='softmax', name='prediction')(x)
    model = Model(melgram_input, x)

    return model 
Example 80
Project: dsl-char-cnn   Author: boknilev   File: cnn_multifilter_cv.py    MIT License 4 votes vote down vote up
def make_model(maxlen, alphabet_size, embedding_dims, embedding_droupout,
               nb_filters, filter_lengths, hidden_dims, fc_dropout, 
               num_classes):
    print('Build model...')
    main_input = Input(shape=(maxlen,))
    
    # we start off with an efficient embedding layer which maps
    # our vocab indices into embedding_dims dimensions
    embedding_layer = Embedding(alphabet_size,
                        embedding_dims,
                        input_length=maxlen,
                        dropout=embedding_droupout)
    embedded = embedding_layer(main_input)
    
    # we add a Convolution1D for each filter length, which will learn nb_filters[i]
    # word group filters of size filter_lengths[i]:
    convs = []
    for i in xrange(len(nb_filters)):
        conv_layer = Convolution1D(nb_filter=nb_filters[i],
                            filter_length=filter_lengths[i],
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1)
        conv_out = conv_layer(embedded)
        # we use max pooling:
        conv_out = MaxPooling1D(pool_length=conv_layer.output_shape[1])(conv_out)
        # We flatten the output of the conv layer,
        # so that we can concat all conv outpus and add a vanilla dense layer:
        conv_out = Flatten()(conv_out)
        convs.append(conv_out)
    
    # concat all conv outputs
    x = merge(convs, mode='concat') if len(convs) > 1 else convs[0]
    #concat = BatchNormalization()(concat)
    
    # We add a vanilla hidden layer:
    x = Dense(hidden_dims)(x)
    x = Dropout(fc_dropout)(x)
    x = Activation('relu')(x)
    
    # We project onto number of classes output layer, and squash it with a softmax:
    main_output = Dense(num_classes, activation='softmax')(x)
    
    # finally, define the model 
    model = Model(input=main_input, output=main_output)
    model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
    return model