Python keras.models.Sequential() Examples

The following are code examples for showing how to use keras.models.Sequential(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: speed_estimation   Author: NeilNie   File: simple_conv.py    MIT License 13 votes vote down vote up
def commaai_model(self):

        model = Sequential()
        model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(configs.IMG_HEIGHT, configs.IMG_WIDTH, 3), output_shape=(configs.IMG_HEIGHT, configs.IMG_WIDTH, 3)))
        model.add(Conv2D(16, (8, 8), strides=4, padding="same"))
        model.add(ELU())
        model.add(Conv2D(32, (5, 5), strides=2, padding="same"))
        model.add(ELU())
        model.add(Conv2D(64, (5, 5), strides=2, padding="same"))
        model.add(Flatten())
        model.add(Dropout(.2))
        model.add(ELU())
        model.add(Dense(512))
        model.add(Dropout(.5))
        model.add(ELU())
        model.add(Dense(1))

        sgd = SGD(lr=0.00001, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(optimizer=sgd, loss='mean_squared_error')
        # print('steering model is created and compiled...')
        return model 
Example 2
Project: keras-anomaly-detection   Author: chen0040   File: recurrent.py    MIT License 9 votes vote down vote up
def create_model(time_window_size, metric):
        model = Sequential()

        model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu',
                         input_shape=(time_window_size, 1)))
        model.add(MaxPooling1D(pool_size=4))

        model.add(LSTM(64))

        model.add(Dense(units=time_window_size, activation='linear'))

        model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])

        # model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
        # model.compile(optimizer="sgd", loss="mse", metrics=[metric])

        print(model.summary())
        return model 
Example 3
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 7 votes vote down vote up
def test_keras_import(self):
        # Pad 1D
        model = Sequential()
        model.add(ZeroPadding1D(2, input_shape=(224, 3)))
        model.add(Conv1D(32, 7, strides=2))
        model.build()
        self.pad_test(model, 'pad_w', 2)
        # Pad 2D
        model = Sequential()
        model.add(ZeroPadding2D(2, input_shape=(224, 224, 3)))
        model.add(Conv2D(32, 7, strides=2))
        model.build()
        self.pad_test(model, 'pad_w', 2)
        # Pad 3D
        model = Sequential()
        model.add(ZeroPadding3D(2, input_shape=(224, 224, 224, 3)))
        model.add(Conv3D(32, 7, strides=2))
        model.build()
        self.pad_test(model, 'pad_w', 2)


# ********** Export json tests **********

# ********** Data Layers Test ********** 
Example 4
Project: Anamoly-Detection   Author: msmsk05   File: gaal_base.py    BSD 2-Clause "Simplified" License 7 votes vote down vote up
def create_generator(latent_size):  # pragma: no cover
    """Create the generator of the GAN for a given latent size.

    Parameters
    ----------
    latent_size : int
        The size of the latent space of the generator

    Returns
    -------
    D : Keras model() object
        Returns a model() object.
    """

    gen = Sequential()
    gen.add(Dense(latent_size, input_dim=latent_size, activation='relu',
                  kernel_initializer=keras.initializers.Identity(
                      gain=1.0)))
    gen.add(Dense(latent_size, activation='relu',
                  kernel_initializer=keras.initializers.Identity(
                      gain=1.0)))
    latent = Input(shape=(latent_size,))
    fake_data = gen(latent)
    return Model(latent, fake_data) 
Example 5
Project: sanctuary   Author: bzamecnik   File: mnist.py    MIT License 6 votes vote down vote up
def create_fc_model(input_shape, nb_classes, nb_layers, layer_width, dropout, activation):
    model = Sequential()
    model.add(Reshape((input_shape[0] * input_shape[1],), input_shape=input_shape))
    for i in range(nb_layers - 1):
        model.add(Dense(layer_width))
        model.add(Activation(activation))
        if dropout > 0:
            model.add(Dropout(dropout))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    model.summary()

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model 
Example 6
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_import(self):
        # Conv 1D
        model = Sequential()
        model.add(Conv1D(32, 10, kernel_regularizer=regularizers.l2(0.01),
                         bias_regularizer=regularizers.l2(0.01),
                         activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                         bias_constraint='max_norm', activation='relu', input_shape=(10, 1)))
        model.build()
        self.keras_param_test(model, 1, 9)
        # Conv 2D
        model = Sequential()
        model.add(Conv2D(32, (3, 3), kernel_regularizer=regularizers.l2(0.01),
                         bias_regularizer=regularizers.l2(0.01),
                         activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                         bias_constraint='max_norm', activation='relu', input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 1, 13)
        # Conv 3D
        model = Sequential()
        model.add(Conv3D(32, (3, 3, 3), kernel_regularizer=regularizers.l2(0.01),
                         bias_regularizer=regularizers.l2(0.01),
                         activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                         bias_constraint='max_norm', activation='relu', input_shape=(16, 16, 16, 1)))
        model.build()
        self.keras_param_test(model, 1, 17) 
Example 7
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_import(self):
        # Conv 1D
        model = Sequential()
        model.add(LocallyConnected1D(32, 3, kernel_regularizer=regularizers.l2(0.01),
                                     bias_regularizer=regularizers.l2(0.01),
                                     activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                                     bias_constraint='max_norm', activation='relu', input_shape=(16, 10)))
        model.build()
        self.keras_param_test(model, 1, 12)
        # Conv 2D
        model = Sequential()
        model.add(LocallyConnected2D(32, (3, 3), kernel_regularizer=regularizers.l2(0.01),
                                     bias_regularizer=regularizers.l2(0.01),
                                     activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                                     bias_constraint='max_norm', activation='relu', input_shape=(16, 16, 10)))
        model.build()
        self.keras_param_test(model, 1, 14)


# ********** Recurrent Layers ********** 
Example 8
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(LSTM(64, return_sequences=True, input_shape=(10, 64)))
        model.add(SimpleRNN(32, return_sequences=True))
        model.add(GRU(10, kernel_regularizer=regularizers.l2(0.01),
                      bias_regularizer=regularizers.l2(0.01), recurrent_regularizer=regularizers.l2(0.01),
                      activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                      bias_constraint='max_norm', recurrent_constraint='max_norm'))
        model.build()
        json_string = Model.to_json(model)
        with open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'w') as out:
            json.dump(json.loads(json_string), out, indent=4)
        sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'r')
        response = self.client.post(reverse('keras-import'), {'file': sample_file})
        response = json.loads(response.content)
        layerId = sorted(response['net'].keys())
        self.assertEqual(response['result'], 'success')
        self.assertGreaterEqual(len(response['net'][layerId[1]]['params']), 7)
        self.assertGreaterEqual(len(response['net'][layerId[3]]['params']), 7)
        self.assertGreaterEqual(len(response['net'][layerId[6]]['params']), 7)


# ********** Embedding Layers ********** 
Example 9
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(BatchNormalization(center=True, scale=True, beta_regularizer=regularizers.l2(0.01),
                                     gamma_regularizer=regularizers.l2(0.01),
                                     beta_constraint='max_norm', gamma_constraint='max_norm',
                                     input_shape=(16, 10)))
        model.build()
        json_string = Model.to_json(model)
        with open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'w') as out:
            json.dump(json.loads(json_string), out, indent=4)
        sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'r')
        response = self.client.post(reverse('keras-import'), {'file': sample_file})
        response = json.loads(response.content)
        layerId = sorted(response['net'].keys())
        self.assertEqual(response['result'], 'success')
        self.assertEqual(response['net'][layerId[0]]['info']['type'], 'Scale')
        self.assertEqual(response['net'][layerId[1]]['info']['type'], 'BatchNorm')


# ********** Noise Layers ********** 
Example 10
Project: oslodatascience-rl   Author: Froskekongen   File: erlenda_pong_parallel.py    MIT License 6 votes vote down vote up
def buildmodel(opt):
    print("Now we build the model")
    model = Sequential()
    model.add(Convolution2D(32, 8, 8, subsample=(4,4), border_mode='same',input_shape=(80,80,1)))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 4, 4, subsample=(2,2), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, subsample=(1,1), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dense(1))

    model.compile(loss='binary_crossentropy',optimizer=opt)
    print("We finish building the model")
    return model 
Example 11
Project: deep-nn-car   Author: scope-lab-vu   File: train.py    MIT License 6 votes vote down vote up
def createModel():

    model = Sequential()
    input1= Input(shape=(66,200,3), name='image')
    input2=Input(shape=(1,), name='speed')
    steer_inp = BatchNormalization(epsilon=0.001, axis=-1,momentum=0.99)(input1)
    layer1 = Conv2D(24, (5, 5), padding="valid", strides=(2, 2), activation="relu")(steer_inp)
    layer2 = Conv2D(36, (5, 5), padding="valid", strides=(2, 2), activation="relu")(layer1)
    layer3 = Conv2D(48, (5, 5), padding="valid", strides=(2, 2), activation="relu")(layer2)
    layer4 = Conv2D(64, (3, 3), padding="valid", strides=(1, 1), activation="relu")(layer3)
    layer5 = Conv2D(64, (3, 3), padding="valid", strides=(1, 1), activation="relu")(layer4)
    layer6 = Flatten()(layer5)
    layer7 = Dense(1164, activation='relu')(layer6)
    layer8 = Dense(100, activation='relu')(layer7)
    layer9 = Dense(100, activation= 'relu')(input2)
    merged=add([layer8, layer9])
    layer10 = Dense(50, activation='relu')(merged)
    layer11 = Dense(50, activation='relu')(layer10)
    layer12 = Dense(10, activation='relu')(layer11)
    steer_out = Dense(1, activation='tanh')(layer12)
    model=Model(inputs=[input1,input2], outputs=steer_out)
    return model 
Example 12
Project: ANN   Author: waynezv   File: ANN_v0.1.py    MIT License 6 votes vote down vote up
def train(in_dim, out_dim, X_train, Y_train, X_test, Y_test):
    model = Sequential()
    model.add(Dense(100000, input_dim = in_dim, init='uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(100000, init='uniform'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(out_dim, init='uniform'))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='sgd',\
            metrics=['accuracy'])

    hist = model.fit(X_train, Y_train, nb_epoch=5, batch_size=32,\
            validation_split=0.1, shuffle=True)
    print(hist.history)

    loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32)

    classes = model.predict_classes(X_test, batch_size=32)

    proba = model.predict_proba(X_test, batch_size=32) 
Example 13
Project: Mnist-practise   Author: hsmyy   File: mnist.py    MIT License 6 votes vote down vote up
def CNN2(X_train, Y_train, X_test, Y_test, activation='relu'):
    batch_size = 64
    nb_classes = 10
    nb_epoch = 20
    np.random.seed(1337)
    X_train = X_train.reshape(60000,1, 28, 28)
    X_test = X_test.reshape(10000,1, 28, 28)
    X_train = X_train.astype("float32")
    X_test.astype("float32")
    #X_train /= 255
    #X_test /= 255
    print(X_train.shape, 'train samples')
    print(Y_train.shape, 'train labels')
    print(X_test.shape, 'test smaples')

    Y_train = np_utils.to_categorical(Y_train, nb_classes)
    Y_test = np_utils.to_categorical(Y_test, nb_classes)

    model = Sequential()
    model.add(Convolution2D(4, 1, 5, 5, border_mode='valid'))
    model.add(Activation(activation))
    model.add(Convolution2D(8, 4, 3, 3, border_mode='valid'))
    model.add(Activation(activation))
    model.add(MaxPooling2D(poolsize=(2, 2)))
    model.add(Convolution2D(16, 8, 3, 3, border_mode='valid'))
    model.add(Activation(activation))
    model.add(MaxPooling2D(poolsize=(2,2)))
    model.add(Flatten())
    model.add(Dense(16 * 4 * 4, 128, init='normal'))
    model.add(Activation(activation))
    model.add(Dense(128, nb_classes, init='normal'))
    model.add(Activation('softmax'))
    sgd = SGD(l2=0.0, lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)
    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=10, shuffle=True, verbose=1, show_accuracy=True, validation_split=0.2)
    score = model.evaluate(X_test, Y_test, batch_size=batch_size)
    print('Test score:', score) 
Example 14
Project: Mnist-practise   Author: hsmyy   File: mnist.py    MIT License 6 votes vote down vote up
def CNN(X_train, Y_train, X_test, Y_test):
    batch_size = 64
    nb_classes = 10
    nb_epoch = 20
    np.random.seed(1337)
    X_train = X_train.reshape(60000,1, 28, 28)
    X_test = X_test.reshape(10000,1, 28, 28)
    X_train = X_train.astype("float32")
    X_test.astype("float32")
    #X_train /= 255
    #X_test /= 255
    print(X_train.shape, 'train samples')
    print(Y_train.shape, 'train labels')
    print(X_test.shape, 'test smaples')

    Y_train = np_utils.to_categorical(Y_train, nb_classes)
    Y_test = np_utils.to_categorical(Y_test, nb_classes)

    model = Sequential()
    model.add(Convolution2D(20, 1, 4, 4))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(MaxPooling2D(poolsize=(2, 2), ignore_border=False))
    model.add(Convolution2D(40, 20, 5, 5))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(MaxPooling2D(poolsize=(3, 3)))
    model.add(Flatten())
    model.add(Dense(40 * 3 * 3, 150, init='normal'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(150, nb_classes, init='normal'))
    model.add(Activation('softmax'))
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)
    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=30)
    score = model.evaluate(X_test, Y_test, batch_size=batch_size)
    print('Test score:', score) 
Example 15
Project: automlk   Author: pierre-chaville   File: keras_wrapper.py    MIT License 6 votes vote down vote up
def keras_create_model(params, problem_type):
    # creates a neural net model with params definition

    log.info('creating NN structure')
    model = Sequential()
    for l in range(int(params['number_layers'])):
        if l == 0:
            model.add(Dense(units=params['units'], input_dim=params['input_dim']))
        else:
            model.add(Dense(units=params['units']))
        model.add(Activation(params['activation']))
        if params['batch_normalization']:
            model.add(BatchNormalization())
        model.add(Dropout(params['dropout']))

    model.add(Dense(params['output_dim']))

    if problem_type == 'classification':
        model.add(Activation('sigmoid'))

    keras_compile_model(model, params, problem_type)
    return model 
Example 16
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 6 votes vote down vote up
def cnn_2d(input_shape):

    model = Sequential()
    model.add(Conv2D(100, (3, 3), padding='valid', activation='relu', input_shape=input_shape))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Conv2D(200, (3, 3), padding='valid', activation='relu'))
    model.add(MaxPooling2D(pool_size=pool_size))
    model.add(Flatten())
    model.add(Dense(200, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(84, activation='relu'))
    model.add(Dense(nb_classes, activation='softmax'))

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model 
Example 17
Project: Keras-GAN   Author: eriklindernoren   File: sgan.py    MIT License 6 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(1, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        img = model(noise)

        return Model(noise, img) 
Example 18
Project: Keras-GAN   Author: eriklindernoren   File: context_encoder.py    MIT License 6 votes vote down vote up
def build_discriminator(self):

        model = Sequential()

        model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.missing_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(256, kernel_size=3, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))
        model.summary()

        img = Input(shape=self.missing_shape)
        validity = model(img)

        return Model(img, validity) 
Example 19
Project: Keras-GAN   Author: eriklindernoren   File: ccgan.py    MIT License 6 votes vote down vote up
def build_discriminator(self):

        img = Input(shape=self.img_shape)

        model = Sequential()
        model.add(Conv2D(64, kernel_size=4, strides=2, padding='same', input_shape=self.img_shape))
        model.add(LeakyReLU(alpha=0.8))
        model.add(Conv2D(128, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        model.add(Conv2D(256, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())

        model.summary()

        img = Input(shape=self.img_shape)
        features = model(img)

        validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(features)

        label = Flatten()(features)
        label = Dense(self.num_classes+1, activation="softmax")(label)

        return Model(img, [validity, label]) 
Example 20
Project: Keras-GAN   Author: eriklindernoren   File: bigan.py    MIT License 6 votes vote down vote up
def build_generator(self):
        model = Sequential()

        model.add(Dense(512, input_dim=self.latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(np.prod(self.img_shape), activation='tanh'))
        model.add(Reshape(self.img_shape))

        model.summary()

        z = Input(shape=(self.latent_dim,))
        gen_img = model(z)

        return Model(z, gen_img) 
Example 21
Project: sanctuary   Author: bzamecnik   File: mnist.py    MIT License 5 votes vote down vote up
def create_conv_model(input_shape, nb_classes, nb_conv_blocks,
        nb_filters, pool_size, kernel_size, dropout, activation,
        fc_layer_width):
    model = Sequential()
    nb_rows, nb_cols = input_shape
    # add one dimension for the convolution filters
    model.add(Reshape((nb_rows, nb_cols, 1), input_shape=input_shape))
    for i in range(nb_conv_blocks):
        model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                                border_mode='valid'))
        model.add(Activation(activation))
        model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
        model.add(Activation(activation))
        model.add(MaxPooling2D(pool_size=pool_size))
        if dropout > 0:
            model.add(Dropout(dropout))
    model.add(Flatten())
    model.add(Dense(fc_layer_width))
    model.add(Activation(activation))
    model.add(Dropout(dropout))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    model.summary()

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model 
Example 22
Project: sklearn2docker   Author: KhaledSharif   File: keras_classifier_test.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def create_binary_classification_model():
        from keras.models import Sequential
        from keras.layers import Dense, Dropout

        model = Sequential()
        model.add(Dense(64, input_shape=(30,), activation='relu'))
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(loss='binary_crossentropy', optimizer='adam')
        return model 
Example 23
Project: sklearn2docker   Author: KhaledSharif   File: keras_classifier_test.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def create_categorical_classification_model():
        from keras.models import Sequential
        from keras.layers import Dense, Dropout

        model = Sequential()
        model.add(Dense(64, input_shape=(30,), activation='relu'))
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(3, activation='sigmoid'))
        model.compile(loss='categorical_crossentropy', optimizer='adam')
        return model 
Example 24
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Dense(100, kernel_regularizer=regularizers.l2(0.01), bias_regularizer=regularizers.l2(0.01),
                        activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                        bias_constraint='max_norm', activation='relu', input_shape=(16,)))
        model.build()
        self.keras_param_test(model, 1, 3) 
Example 25
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Dropout(0.5, input_shape=(64, 10)))
        model.build()
        self.keras_type_test(model, 0, 'Dropout') 
Example 26
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Flatten(input_shape=(64, 10)))
        model.build()
        self.keras_type_test(model, 0, 'Flatten') 
Example 27
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Reshape((5, 2), input_shape=(10,)))
        model.build()
        self.keras_type_test(model, 0, 'Reshape') 
Example 28
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Permute((2, 1), input_shape=(64, 10)))
        model.build()
        self.keras_type_test(model, 0, 'Permute') 
Example 29
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(ActivityRegularization(l1=2, input_shape=(10,)))
        model.build()
        self.keras_type_test(model, 0, 'Regularization') 
Example 30
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Masking(mask_value=0., input_shape=(100, 5)))
        model.build()
        self.keras_type_test(model, 0, 'Masking')


# ********** Convolutional Layers ********** 
Example 31
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(SeparableConv2D(32, 3, depthwise_regularizer=regularizers.l2(0.01),
                                  pointwise_regularizer=regularizers.l2(0.01),
                                  bias_regularizer=regularizers.l2(0.01),
                                  activity_regularizer=regularizers.l2(0.01), depthwise_constraint='max_norm',
                                  bias_constraint='max_norm', pointwise_constraint='max_norm',
                                  activation='relu', input_shape=(16, 16, 1)))
        self.keras_param_test(model, 1, 12) 
Example 32
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Conv2DTranspose(32, (3, 3), kernel_regularizer=regularizers.l2(0.01),
                                  bias_regularizer=regularizers.l2(0.01),
                                  activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                                  bias_constraint='max_norm', activation='relu', input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 1, 13) 
Example 33
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        # Global Pooling 1D
        model = Sequential()
        model.add(GlobalMaxPooling1D(input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 5)
        # Global Pooling 2D
        model = Sequential()
        model.add(GlobalMaxPooling2D(input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 8)
        # Pooling 1D
        model = Sequential()
        model.add(MaxPooling1D(pool_size=2, strides=2, padding='same', input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 5)
        # Pooling 2D
        model = Sequential()
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 8)
        # Pooling 3D
        model = Sequential()
        model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same',
                               input_shape=(16, 16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 11)


# ********** Locally-connected Layers ********** 
Example 34
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Embedding(1000, 64, input_length=10, embeddings_regularizer=regularizers.l2(0.01),
                            embeddings_constraint='max_norm'))
        model.build()
        self.keras_param_test(model, 0, 7)


# ********** Merge Layers ********** 
Example 35
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(GaussianDropout(rate=0.5, input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 1) 
Example 36
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(AlphaDropout(rate=0.5, seed=5, input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 1)


# ********** Utility Layers ********** 
Example 37
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet.py    MIT License 5 votes vote down vote up
def CapsNet(input_shape, n_class, num_routing):
    """
    A Capsule Network on MNIST.
    :param input_shape: data shape, 3d, [width, height, channels]
    :param n_class: number of classes
    :param num_routing: number of routing iterations
    :return: Two Keras Models, the first one used for training, and the second one for evaluation.
            `eval_model` can also be used for training.
    """
    x = layers.Input(shape=input_shape)

    # Layer 1: Just a conventional Conv2D layer
    conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)

    # Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
    primarycaps = PrimaryCap(conv1, dim_capsule=8, n_channels=32, kernel_size=9, strides=2, padding='valid')

    # Layer 3: Capsule layer. Routing algorithm works here.
    digitcaps = CapsuleLayer(num_capsule=n_class, dim_capsule=16, num_routing=num_routing,
                             name='digitcaps')(primarycaps)

    # Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
    # If using tensorflow, this will not be necessary. :)
    out_caps = Length(name='capsnet')(digitcaps)

    # Decoder network.
    y = layers.Input(shape=(n_class,))
    masked_by_y = Mask()([digitcaps, y])  # The true label is used to mask the output of capsule layer. For training
    masked = Mask()(digitcaps)  # Mask using the capsule with maximal length. For prediction

    # Shared Decoder model in training and prediction
    decoder = models.Sequential(name='decoder')
    decoder.add(layers.Dense(512, activation='relu', input_dim=16*n_class))
    decoder.add(layers.Dense(1024, activation='relu'))
    decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
    decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))

    # Models for training and evaluation (prediction)
    train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
    eval_model = models.Model(x, [out_caps, decoder(masked)])
    return train_model, eval_model 
Example 38
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils_keras.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setUp(self):
        from keras.models import Sequential
        from keras.layers import Dense, Activation
        import tensorflow as tf

        def dummy_model():
            input_shape = (100,)
            return Sequential([Dense(20, name='l1',
                                     input_shape=input_shape),
                               Dense(10, name='l2'),
                               Activation('softmax', name='softmax')])

        self.sess = tf.Session()
        self.sess.as_default()
        self.model = dummy_model() 
Example 39
Project: deep-learning-note   Author: wdxtub   File: 3_price_prediction.py    MIT License 5 votes vote down vote up
def build_model():
    model = models.Sequential()
    model.add(layers.Dense(64, activation='relu', input_shape=(train_data.shape[1], )))
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dense(1))
    model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
    return model 
Example 40
Project: keras-anomaly-detection   Author: chen0040   File: convolutional.py    MIT License 5 votes vote down vote up
def create_model(time_window_size, metric):
        model = Sequential()
        model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu',
                         input_shape=(time_window_size, 1)))
        model.add(GlobalMaxPool1D())

        model.add(Dense(units=time_window_size, activation='linear'))

        model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
        print(model.summary())
        return model 
Example 41
Project: keras-anomaly-detection   Author: chen0040   File: recurrent.py    MIT License 5 votes vote down vote up
def create_model(time_window_size, metric):
        model = Sequential()
        model.add(LSTM(units=128, input_shape=(time_window_size, 1), return_sequences=False))

        model.add(Dense(units=time_window_size, activation='linear'))

        model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
        print(model.summary())
        return model 
Example 42
Project: keras-anomaly-detection   Author: chen0040   File: recurrent.py    MIT License 5 votes vote down vote up
def create_model(time_window_size, metric):
        model = Sequential()

        model.add(Bidirectional(LSTM(units=64, dropout=0.2, recurrent_dropout=0.2), input_shape=(time_window_size, 1)))

        model.add(Dense(units=time_window_size, activation='linear'))

        model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])

        # model.compile(optimizer='adam', loss='mean_squared_error', metrics=[metric])
        # model.compile(optimizer="sgd", loss="mse", metrics=[metric])

        print(model.summary())
        return model 
Example 43
Project: Handwritten-Digit-Recognition-using-Deep-Learning   Author: anujdutt9   File: neural_network.py    MIT License 5 votes vote down vote up
def build(width, height, depth, total_classes, Saved_Weights_Path=None):
        # Initialize the Model
        model = Sequential()

        # First CONV => RELU => POOL Layer
        model.add(Conv2D(20, 5, 5, border_mode="same", input_shape=(depth, height, width)))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering="th"))

        # Second CONV => RELU => POOL Layer
        model.add(Conv2D(50, 5, 5, border_mode="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering="th"))

        # Third CONV => RELU => POOL Layer
        # Convolution -> ReLU Activation Function -> Pooling Layer
        model.add(Conv2D(100, 5, 5, border_mode="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering="th"))

        # FC => RELU layers
        #  Fully Connected Layer -> ReLU Activation Function
        model.add(Flatten())
        model.add(Dense(500))
        model.add(Activation("relu"))

        # Using Softmax Classifier for Linear Classification
        model.add(Dense(total_classes))
        model.add(Activation("softmax"))

        # If the saved_weights file is already present i.e model is pre-trained, load that weights
        if Saved_Weights_Path is not None:
            model.load_weights(Saved_Weights_Path)
        return model
# --------------------------------- EOC ------------------------------------ 
Example 44
Project: Mnist-practise   Author: hsmyy   File: mnist.py    MIT License 5 votes vote down vote up
def DNN(X_train, Y_train, X_test, Y_test):
    batch_size = 64
    nb_classes = 10
    nb_epoch = 20
    np.random.seed(1337)
    X_train = X_train.reshape(60000,784)
    X_test = X_test.reshape(10000,784)
    X_train = X_train.astype("float32")
    X_test.astype("float32")
    X_train /= 255
    X_test /= 255
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test smaples')

    Y_train = np_utils.to_categorical(Y_train, nb_classes)
    Y_test = np_utils.to_categorical(Y_test, nb_classes)

    model = Sequential()
    model.add(Dense(784, 128))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(Dense(128,128))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(Dense(128,10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms)

    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1]) 
Example 45
Project: dac   Author: KBNLresearch   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def create_model(self):
        '''
        Create new keras model.
        '''
        self.class_weight = {0: 0.25, 1: 0.75}

        model = Sequential()
        model.add(Dense(self.data.shape[1], input_dim=self.data.shape[1],
                        activation='relu', kernel_constraint=maxnorm(3)))
        model.add(Dropout(0.5))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(optimizer='RMSprop', loss='binary_crossentropy',
                      metrics=['accuracy'])
        return model 
Example 46
Project: wrangle   Author: autonomio   File: create_synth_model.py    MIT License 5 votes vote down vote up
def _base_for_model(mode, n=50, neurons=50):

    from keras.models import Sequential
    from keras.layers import Dense

    from .create_synth_data import create_synth_data

    x, y = create_synth_data(mode, n=n)
    model = Sequential()
    model.add(Dense(neurons,
                    input_dim=x.shape[1],
                    activation='relu'))

    return x, y, model 
Example 47
Project: RFMLS-NEU   Author: neu-spiral   File: BaselineModel2D.py    MIT License 5 votes vote down vote up
def getBaselineModel2D(slice_size=64, classes=1000, cnn_stacks=3, fc_stacks=1, channels=128, dropout_flag=True, \
                        fc1=256, fc2=128, batchnorm=False, \
                        #optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False), \
                        loss='categorical_crossentropy'):
    model = models.Sequential()
    model.add(Conv2D(channels,(7, 2),activation='relu', padding='same', input_shape=(slice_size, 2, 1)))
    model.add(Conv2D(channels,(5, 2),activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 1)))
    model.add(Conv2D(channels,(7, 2),activation='relu', padding='same'))
    model.add(Conv2D(channels,(5, 2),activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 1)))
    model.add(Conv2D(channels,(7, 2),activation='relu', padding='same'))
    model.add(Conv2D(channels,(5, 2),activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 1)))
    model.add(Flatten())
    model.add(Dense(fc1, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(fc2, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(classes, activation='softmax'))

    # optimizer = Adam(lr=LR, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
    # model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
    model.summary()

    return model 
Example 48
Project: RFMLS-NEU   Author: neu-spiral   File: HomegrownModel.py    MIT License 5 votes vote down vote up
def getHomegrownModel(slice_size=1024, classes=1000, cnn_stacks=3, fc_stacks=1, channels=128, dropout_flag=True, \
                        flt=[50, 50, 256, 80],k1=[1, 7], k2=[2, 7], batchnorm=False, dr=0.5,\
                        #optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False), \
                        loss='categorical_crossentropy'):
    """Original Homegrown model"""
    in_shp = [2, slice_size]
    model_nn = models.Sequential()
    model_nn.add(Reshape([1] + in_shp, input_shape=in_shp))
    model_nn.add(ZeroPadding2D((0, 2)))
    model_nn.add(Conv2D(flt[0], (k1[0], k1[1]), padding="valid", kernel_initializer="glorot_uniform", name="conv1"))
    if batchnorm:
        model_nn.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_1'))
    model_nn.add(Activation('relu'))
    model_nn.add(ZeroPadding2D((0, 2)))
    model_nn.add(Conv2D(flt[1], (k2[0], k2[1]), padding="valid", kernel_initializer="glorot_uniform", name="conv2"))
    if batchnorm:
        model_nn.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_2'))
    model_nn.add(Activation('relu'))
    model_nn.add(Flatten())
    model_nn.add(Dense(flt[2], kernel_initializer='he_normal', name="dense1"))
    if batchnorm:
        model_nn.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_3'))
    model_nn.add(Activation('relu'))
    model_nn.add(Dropout(dr))
    model_nn.add(Dense(flt[3], kernel_initializer='he_normal', name="dense2"))
    if batchnorm:
        model_nn.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_4'))
    model_nn.add(Activation('relu'))
    model_nn.add(Dropout(dr))
    model_nn.add(Dense(classes, kernel_initializer='he_normal', kernel_regularizer=l2(0.0001), name="dense3"))
    model_nn.add(Activation('softmax'))


    model_nn.summary()




    return model_nn 
Example 49
Project: RFMLS-NEU   Author: neu-spiral   File: BaselineModel.py    MIT License 5 votes vote down vote up
def getBaselineModel(slice_size=64, classes=1000, cnn_stacks=3, fc_stacks=1, channels=128, dropout_flag=True, \
                        fc1=256, fc2=128, batchnorm=False, \
                        #optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False), \
                        loss='categorical_crossentropy'):
    """A dummy model to test the functionalities of the Data Generator"""
    model = models.Sequential()
    model.add(Conv1D(channels,7,activation='relu', padding='same', input_shape=(slice_size, 2)))
    model.add(Conv1D(channels,5, padding='same'))
    if batchnorm:
        model.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_1'))
    model.add(Activation('relu'))
    model.add(MaxPooling1D())
    for i in range(1, cnn_stacks):
        model.add(Conv1D(channels,7,activation='relu', padding='same'))
        model.add(Conv1D(channels,5, padding='same'))
        if batchnorm:
            model.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_'+str(i+1)))
        model.add(Activation('relu'))
        model.add(MaxPooling1D())
    #model.add(Conv1D(128,7,activation='relu', padding='same'))
    #model.add(Conv1D(128,5,activation='relu', padding='same'))
    #model.add(MaxPooling1D())
    model.add(Flatten())
    for j in range(1, fc_stacks):
        model.add(Dense(fc1, activation='relu'))
        if dropout_flag:
            model.add(Dropout(0.5))
    model.add(Dense(fc2, activation='relu'))
    if dropout_flag:
        model.add(Dropout(0.5))
    model.add(Dense(classes, activation='softmax'))

    #optimizer = optimizer
    #optimizer = Adam(lr=lr, beta_1=beta_2, beta_2=beta_2, epsilon=epsilon, decay=decay, amsgrad=amsgrad)
    #model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
    #model.summary()

    return model 
Example 50
Project: RFMLS-NEU   Author: neu-spiral   File: evaluate_model.py    MIT License 5 votes vote down vote up
def getModel(slice_size, classes):
        """A dummy model to test the functionalities of the Data Generator"""
        model = models.Sequential()
        model.add(Conv1D(args.num_filters,7,activation='relu', padding='same', input_shape=(slice_size, 2)))
        model.add(Conv1D(args.num_filters,5,activation='relu', padding='same'))
        model.add(MaxPooling1D())
        model.add(Conv1D(args.num_filters,7,activation='relu', padding='same'))
        model.add(Conv1D(args.num_filters,5,activation='relu', padding='same'))
        model.add(MaxPooling1D())
        model.add(Conv1D(args.num_filters,7,activation='relu', padding='same'))
        model.add(Conv1D(args.num_filters,5,activation='relu', padding='same'))
        model.add(MaxPooling1D())
        model.add(Flatten())
        model.add(Dense(256, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(classes, activation='softmax'))

        optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
        model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
        model.summary()

        return model

    # Take one random 1k dataset just to test the Data Generator 
Example 51
Project: Stock_Market_Forecast   Author: cuevas1208   File: keras_model.py    MIT License 5 votes vote down vote up
def __init__(self):
        #self.model = Sequential()
        self.get_model() 
Example 52
Project: Stock_Market_Forecast   Author: cuevas1208   File: keras_model.py    MIT License 5 votes vote down vote up
def get_model(self):
        self.model = Sequential()
        # Preprocess incoming data
        # Normalize the features using Min-Max scaling centered around zero and reshape
        self.model.add(Lambda(lambda x: (x/6) - 1., input_shape=(32, 32, 1), output_shape=(32, 32, 1)))
        print(self.model.output_shape)
        #self.model.add(Dense(32, input_shape=(32, 32, 1)))
        self.model.add(Convolution2D(4, 2, 2))
        print(self.model.output_shape)
        self.model.add(AveragePooling2D((2, 2)))
        self.model.add(Activation('relu'))
        print(self.model.output_shape)

        self.model.add(Convolution2D(8, 1, 1))
        self.model.add(Activation('relu'))
        print(self.model.output_shape)
        
        self.model.add(Convolution2D(16, 2, 2))
        print(self.model.output_shape)
        self.model.add(AveragePooling2D((2, 2)))
        self.model.add(Activation('relu'))
        print(self.model.output_shape)
        
        self.model.add(Flatten())
        self.model.add(Activation('relu'))
        print(self.model.output_shape)
        
        self.model.add(Dense(200))
        self.model.add(Activation('relu'))
        classes = getclassesTotal()
        self.model.add(Dense(classes))
        if (classes > 1):
            model.add(Dense(classes, activation='softmax'))
            self.model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
        else:
            self.model.compile(optimizer="adam", loss="mse", metrics=['accuracy'])
        print(self.model.output_shape) 
Example 53
Project: ppi_lstm_rnn_keras   Author: ylhsieh   File: train_keras.py    MIT License 5 votes vote down vote up
def main():

    def build_model():
        model = Sequential()
        model.add(Embedding(len(train_vocab), hidden_size, weights=[embedding_array],\
                            input_length=max_sequence_length))
        model.add(Dropout(dropout_rate))
        model.add(Bidirectional(LSTM(rnn_hidden_size)))
        model.add(Dropout(dropout_rate))
        model.add(Dense(2, activation='softmax'))
        model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
        print(model.summary())
        return model

    train_vocab = load_vocab_from(opt.data + '.vocab')
    embedding_array = load_pretrained_embeddings(train_vocab, pretrained_embeddings_file)
    for fold_id in range(10):
        tfsession = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.5)))
        K.set_session(tfsession)
        train_file = 'corpus/{}_f{}_train.txt'.format(opt.data, fold_id)
        test_file = 'corpus/{}_f{}_test.txt'.format(opt.data, fold_id)
        log_file = '{}_f{}.log'.format(opt.data, fold_id)
        x_train, x_test, y_train, y_test, _ = read_corpus(train_file, test_file, train_vocab)
        fscore_cb = FscoreLogCallback(log_file)
        model = build_model()
        print("Fold {}".format(fold_id))
        model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, \
                  callbacks=[fscore_cb], verbose=2)
        predicted = np.argmax(model.predict(x_test), axis=1)
        y_test_to_label = np.argmax(y_test, axis=1)
        prec, reca, fscore, sup = precision_recall_fscore_support(y_test_to_label, predicted, average='binary')
        print("Final Precision:{:2.2f}% Recall:{:2.2f}% Fscore:{:2.2f}%".format(prec*100, reca*100, fscore*100)) 
Example 54
Project: Anamoly-Detection   Author: msmsk05   File: auto_encoder.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _build_model(self):
        model = Sequential()
        # Input layer
        model.add(Dense(
            self.hidden_neurons_[0], activation=self.hidden_activation,
            input_shape=(self.n_features_,),
            activity_regularizer=l2(self.l2_regularizer)))
        model.add(Dropout(self.dropout_rate))

        # Additional layers
        for i, hidden_neurons in enumerate(self.hidden_neurons_, 1):
            model.add(Dense(
                hidden_neurons,
                activation=self.hidden_activation,
                activity_regularizer=l2(self.l2_regularizer)))
            model.add(Dropout(self.dropout_rate))

        # Output layers
        model.add(Dense(self.n_features_, activation=self.output_activation,
                        activity_regularizer=l2(self.l2_regularizer)))

        # Compile model
        model.compile(loss=self.loss, optimizer=self.optimizer)
        print(model.summary())
        return model

    # noinspection PyUnresolvedReferences 
Example 55
Project: Anamoly-Detection   Author: msmsk05   File: gaal_base.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def create_discriminator(latent_size, data_size):  # pragma: no cover
    """Create the discriminator of the GAN for a given latent size.

    Parameters
    ----------
    latent_size : int
        The size of the latent space of the generator.

    data_size : int
        Size of the input data.

    Returns
    -------
    D : Keras model() object
        Returns a model() object.
    """

    dis = Sequential()
    dis.add(Dense(int(math.ceil(math.sqrt(data_size))),
                  input_dim=latent_size, activation='relu',
                  kernel_initializer=keras.initializers.VarianceScaling(
                      scale=1.0, mode='fan_in', distribution='normal',
                      seed=None)))
    dis.add(Dense(1, activation='sigmoid',
                  kernel_initializer=keras.initializers.VarianceScaling(
                      scale=1.0, mode='fan_in', distribution='normal',
                      seed=None)))
    data = Input(shape=(latent_size,))
    fake = dis(data)
    return Model(data, fake) 
Example 56
Project: AIGame   Author: chenghongkuan   File: RL_brain_Keras.py    GNU General Public License v3.0 5 votes vote down vote up
def _build_net(self):
        # ------------------ 建造估计层 ------------------
        # 因为神经网络在这个地方只是用来输出不同动作对应的Q值,最后的决策是用Q表的选择来做的
        # 所以其实这里的神经网络可以看做是一个线性的,也就是通过不同的输入有不同的输出,而不是确定类别的几个输出
        # 这里我们先按照上一个例子造一个两层每层单个神经元的神经网络
        self.model_eval = Sequential([
            # 输入 并且给每一个神经元配一个激活函数
            Dense(self.first_layer_neurno, input_dim=self.n_features, activation='relu'),
            # Activation('relu'),
            # Dense(1, activation='tanh'),
            # 输出
            Dense(self.n_actions),
        ])
        # 选择rms优化器,输入学习率参数
        rmsprop = RMSprop(lr=self.lr, rho=0.9, epsilon=1e-08, decay=0.0)
        self.model_eval.compile(loss='mse',
                            optimizer=rmsprop,
                            metrics=['accuracy'])

        # ------------------ 构建目标神经网络 ------------------
        # 目标神经网络的架构必须和估计神经网络一样,但是不需要计算损失函数
        self.model_target = Sequential([
            # 输入 并且给每一个神经元配一个激活函数
            Dense(self.first_layer_neurno, input_dim=self.n_features, activation='relu'),
            # Activation('relu'),
            # Dense(1, activation='tanh'),
            # 输出
            Dense(self.n_actions),
        ]) 
Example 57
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: gan_keras.py    MIT License 5 votes vote down vote up
def Combined_model(g, d):
    model = Sequential()
    model.add(g)
    model.add(d)
    return model 
Example 58
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: cgan_cifar10_keras.py    MIT License 5 votes vote down vote up
def Combined_model(g, d):
    inputs = Input([100, ], name="x")
    con_x = Input([num_classes, ], name="con_x")
    con_x2 = Input([img_height, img_width, num_classes], name="con_x2")
    x = g(inputs=[inputs, con_x, con_x2])
    x = d(x)
    model = Model(inputs=[inputs, con_x, con_x2], outputs=[x])

    #model = Sequential()
    #model.add(g)
    #model.add(d)
    return model 
Example 59
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: dcgan_keras.py    MIT License 5 votes vote down vote up
def Combined_model(g, d):
    model = Sequential()
    model.add(g)
    model.add(d)
    return model 
Example 60
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: gan_cifar10_keras.py    MIT License 5 votes vote down vote up
def Combined_model(g, d):
    model = Sequential()
    model.add(g)
    model.add(d)
    return model 
Example 61
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: cgan_mnist_keras.py    MIT License 5 votes vote down vote up
def Combined_model(g, d):
    inputs = Input([100, ], name="x")
    con_x = Input([num_classes, ], name="con_x")
    con_x2 = Input([img_height, img_width, num_classes], name="con_x2")
    x = g(inputs=[inputs, con_x, con_x2])
    x = d(x)
    model = Model(inputs=[inputs, con_x, con_x2], outputs=[x])

    #model = Sequential()
    #model.add(g)
    #model.add(d)
    return model 
Example 62
Project: Keras-GAN   Author: eriklindernoren   File: sgan.py    MIT License 5 votes vote down vote up
def build_discriminator(self):

        model = Sequential()

        model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0,1),(0,1))))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())

        model.summary()

        img = Input(shape=self.img_shape)

        features = model(img)
        valid = Dense(1, activation="sigmoid")(features)
        label = Dense(self.num_classes+1, activation="softmax")(features)

        return Model(img, [valid, label]) 
Example 63
Project: Keras-GAN   Author: eriklindernoren   File: context_encoder.py    MIT License 5 votes vote down vote up
def build_generator(self):


        model = Sequential()

        # Encoder
        model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))

        model.add(Conv2D(512, kernel_size=1, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.5))

        # Decoder
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation('relu'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation('relu'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation('tanh'))

        model.summary()

        masked_img = Input(shape=self.img_shape)
        gen_missing = model(masked_img)

        return Model(masked_img, gen_missing) 
Example 64
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo3.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 8, 8,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 6, 6,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 4, 4,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 2, 2,init=weight_init, name='conv1_4'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 208, out 104
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 8, 8,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 6, 6,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 4, 4,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 2, 2,init=weight_init, name='conv2_4'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 88, out is 44 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(220, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 65
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_three.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 66
Project: MODS_ConvNet   Author: santiagolopezg   File: convnet_keras_1.py    MIT License 4 votes vote down vote up
def network(regl1, regl2, weight_init, dropout, optimize):   
    
    #create network architecture
    model = Sequential()
    
    model.add(Convolution2D(16, 5, 5,input_shape=(1, 256, 192),W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(32, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Flatten())
    model.add(Dense(50,W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    
    model.add(Dense(output_dim=1))
    model.add(Activation('sigmoid'))    

    model.compile(optimizer=optimize, loss='binary_crossentropy', metrics=['accuracy'])
    
    return model 
Example 67
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 5, 5,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 116, out 58
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 52, out is 26 
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  # in is 20, out is 10 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(10, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 68
Project: MODS_ConvNet   Author: santiagolopezg   File: convnet_keras_2.py    MIT License 4 votes vote down vote up
def network(regl1, regl2, weight_init, dropout, optimize):   
    
    #create network architecture
    model = Sequential()
    
    model.add(Convolution2D(16, 5, 5,input_shape=(1, 256, 192),W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(32, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Flatten())
    model.add(Dense(50,W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    
    model.add(Dense(output_dim=1))
    model.add(Activation('sigmoid'))    

    model.compile(optimizer=optimize, loss='binary_crossentropy', metrics=['accuracy'])
    
    return model 
Example 69
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_two.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(16, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(16, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 70
Project: MODS_ConvNet   Author: santiagolopezg   File: foo_three.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 3, 3,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(128, 3, 3,init=weight_init, name='conv3_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(512, 3,3,init=weight_init, border_mode='same', name='conv4_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))  
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(120, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dropout(dropout))
	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 71
Project: MODS_ConvNet   Author: santiagolopezg   File: little_foo2.py    MIT License 4 votes vote down vote up
def foo():

    # Determine proper input shape
	if keras.__version__ > '1.0.3':
		K.set_image_dim_ordering('th')
	input_shape = (1, 224, 224)

	#img_input = Input(shape=input_shape)

	model = Sequential()

	model.add(Convolution2D(32, 7, 7,
			        input_shape=input_shape,init=weight_init, name='conv1_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 5, 5,init=weight_init, name='conv1_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(32, 3, 3,init=weight_init, name='conv1_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in 212, out 106
	model.add(Dropout(dropout))

	model.add(Convolution2D(64, 7, 7,init=weight_init, name='conv2_1'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 5, 5,init=weight_init, name='conv2_2'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(Convolution2D(64, 3, 3,init=weight_init, name='conv2_3'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2))) # in is 94, out is 47 
	model.add(Dropout(dropout))

	model.add(Flatten())
	model.add(Dense(220, init=weight_init))
	model.add(Activation('relu'))
	model.add(Dropout(dropout))

	model.add(Dense(2))
	model.add(Activation('sigmoid'))

	return model 
Example 72
Project: neural-fingerprinting   Author: StephanZheng   File: utils_keras.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,
              channels=1, nb_filters=64, nb_classes=10):
    """
    Defines a CNN model using Keras sequential model
    :param logits: If set to False, returns a Keras model, otherwise will also
                    return logits tensor
    :param input_ph: The TensorFlow tensor for the input
                    (needed if returning logits)
                    ("ph" stands for placeholder but it need not actually be a
                    placeholder)
    :param img_rows: number of row in the image
    :param img_cols: number of columns in the image
    :param channels: number of color channels (e.g., 1 for MNIST)
    :param nb_filters: number of convolutional filters per layer
    :param nb_classes: the number of output classes
    :return:
    """
    model = Sequential()

    # Define the layers successively (convolution layers are version dependent)
    if keras.backend.image_dim_ordering() == 'th':
        input_shape = (channels, img_rows, img_cols)
    else:
        input_shape = (img_rows, img_cols, channels)

    layers = [conv_2d(nb_filters, (8, 8), (2, 2), "same",
                      input_shape=input_shape),
              Activation('relu'),
              conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"),
              Activation('relu'),
              conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"),
              Activation('relu'),
              Flatten(),
              Dense(nb_classes)]

    for layer in layers:
        model.add(layer)

    if logits:
        logits_tensor = model(input_ph)
    model.add(Activation('softmax'))

    if logits:
        return model, logits_tensor
    else:
        return model 
Example 73
Project: Scene-Understanding   Author: foamliu   File: vgg16.py    MIT License 4 votes vote down vote up
def vgg16_model(img_rows, img_cols, channel=3):
    model = Sequential()
    # Encoder
    model.add(ZeroPadding2D((1, 1), input_shape=(img_rows, img_cols, channel), name='input'))
    model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(64, (3, 3), activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # Add Fully Connected Layer
    model.add(Flatten(name='flatten'))
    model.add(Dense(4096, activation='relu', name='dense1'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu', name='dense2'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax', name='softmax'))

    # Loads ImageNet pre-trained data
    weights_path = 'models/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
    model.load_weights(weights_path)

    return model 
Example 74
Project: ANN   Author: waynezv   File: ANN.py    MIT License 4 votes vote down vote up
def train_mlp(self, input, output):
        self.in_real = input.data['real']
        self.in_imag = input.data['imag']
        self.out_real = output.data['real']
        self.out_imag = output.data['imag']

        (i_dim_x, i_dim_y, i_dim_z) = self.in_real.shape
        in_dim = i_dim_x*i_dim_y*i_dim_z
        input_data = self.in_real.reshape(in_dim, 1)

        (o_dim_x, o_dim_y, o_dim_z) = self.out_real.shape
        out_dim = o_dim_x*o_dim_y*o_dim_z
        output_data = self.out_real.reshape(out_dim, 1)

        model = Sequential()
        model.add(Dense(200, input_dim=in_dim, init='uniform'))
        model.add(Activation('relu'))
        #  model.add(Dropout(0.25))

        model.add(Dense(200))#, init='uniform'))
        model.add(Activation('relu'))
        #  model.add(Dropout(0.25))

        model.add(Dense(out_dim))#, init='uniform'))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='sgd',\
                metrics=['accuracy'])

        early_stop = EarlyStopping(monitor='val_loss', patience=2)
        hist = model.fit(input_data, output_data, nb_epoch=50, \
                         batch_size=64, validation_split=0.2, \
                         shuffle=True, callbacks=[early_stop])
        print(hist.history)
        #TODO: batch train
        model.train_on_batch()

        # Save model
        model_to_save_json = model.to_json()
        open('model_architecture.json', 'w').write(model_to_save_json)
        model_to_save_yaml = model.to_yaml()
        open('model_architecture.yaml', 'w').write(model_to_save_yaml)
        model.save_weights('weights.h5') 
Example 75
Project: ANN   Author: waynezv   File: ANN_large_v22.py    MIT License 4 votes vote down vote up
def train_mlp(self, input, output):
        self.in_real = input.data['real']
        self.in_imag = input.data['imag']
        self.out_real = output.data['real']
        self.out_imag = output.data['imag']

        (i_dim_x, i_dim_y, i_dim_z) = self.in_real.shape
        in_dim = i_dim_x*i_dim_y*i_dim_z
        input_data = self.in_real.reshape(in_dim, 1)

        (o_dim_x, o_dim_y, o_dim_z) = self.out_real.shape
        out_dim = o_dim_x*o_dim_y*o_dim_z
        output_data = self.out_real.reshape(out_dim, 1)

        model = Sequential()
        model.add(Dense(200, input_dim=in_dim, init='uniform'))
        model.add(Activation('relu'))
        #  model.add(Dropout(0.25))

        model.add(Dense(200))#, init='uniform'))
        model.add(Activation('relu'))
        #  model.add(Dropout(0.25))

        model.add(Dense(out_dim))#, init='uniform'))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='sgd',\
                metrics=['accuracy'])

        early_stop = EarlyStopping(monitor='val_loss', patience=2)
        hist = model.fit(input_data, output_data, nb_epoch=50, \
                         batch_size=64, validation_split=0.2, \
                         shuffle=True, callbacks=[early_stop])
        print(hist.history)
        #TODO: batch train
        model.train_on_batch()

        # Save model
        model_to_save_json = model.to_json()
        open('model_architecture.json', 'w').write(model_to_save_json)
        model_to_save_yaml = model.to_yaml()
        open('model_architecture.yaml', 'w').write(model_to_save_yaml)
        model.save_weights('weights.h5') 
Example 76
Project: ANN   Author: waynezv   File: ANN_large_v3.py    MIT License 4 votes vote down vote up
def train_mlp(self, input, output):
        self.in_real = input.data['real']
        self.in_imag = input.data['imag']
        self.out_real = output.data['real']
        self.out_imag = output.data['imag']

        (i_dim_x, i_dim_y, i_dim_z) = self.in_real.shape
        in_dim = i_dim_x*i_dim_y*i_dim_z
        input_data = self.in_real.reshape(in_dim, 1)

        (o_dim_x, o_dim_y, o_dim_z) = self.out_real.shape
        out_dim = o_dim_x*o_dim_y*o_dim_z
        output_data = self.out_real.reshape(out_dim, 1)

        model = Sequential()
        model.add(Dense(200, input_dim=in_dim, init='uniform'))
        model.add(Activation('relu'))
        #  model.add(Dropout(0.25))

        model.add(Dense(200))#, init='uniform'))
        model.add(Activation('relu'))
        #  model.add(Dropout(0.25))

        model.add(Dense(out_dim))#, init='uniform'))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='sgd',\
                metrics=['accuracy'])

        early_stop = EarlyStopping(monitor='val_loss', patience=2)
        hist = model.fit(input_data, output_data, nb_epoch=50, \
                         batch_size=64, validation_split=0.2, \
                         shuffle=True, callbacks=[early_stop])
        print(hist.history)
        #TODO: batch train
        model.train_on_batch()

        # Save model
        model_to_save_json = model.to_json()
        open('model_architecture.json', 'w').write(model_to_save_json)
        model_to_save_yaml = model.to_yaml()
        open('model_architecture.yaml', 'w').write(model_to_save_yaml)
        model.save_weights('weights.h5') 
Example 77
Project: ANN   Author: waynezv   File: ANN_large_v2.py    MIT License 4 votes vote down vote up
def train_mlp(self, input, output):
        self.in_real = input.data['real']
        self.in_imag = input.data['imag']
        self.out_real = output.data['real']
        self.out_imag = output.data['imag']

        (i_dim_x, i_dim_y, i_dim_z) = self.in_real.shape
        in_dim = i_dim_x*i_dim_y*i_dim_z
        input_data = self.in_real.reshape(in_dim, 1)

        (o_dim_x, o_dim_y, o_dim_z) = self.out_real.shape
        out_dim = o_dim_x*o_dim_y*o_dim_z
        output_data = self.out_real.reshape(out_dim, 1)

        model = Sequential()
        model.add(Dense(200, input_dim=in_dim, init='uniform'))
        model.add(Activation('relu'))
        #  model.add(Dropout(0.25))

        model.add(Dense(200))#, init='uniform'))
        model.add(Activation('relu'))
        #  model.add(Dropout(0.25))

        model.add(Dense(out_dim))#, init='uniform'))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='sgd',\
                metrics=['accuracy'])

        early_stop = EarlyStopping(monitor='val_loss', patience=2)
        hist = model.fit(input_data, output_data, nb_epoch=50, \
                         batch_size=64, validation_split=0.2, \
                         shuffle=True, callbacks=[early_stop])
        print(hist.history)
        #TODO: batch train
        model.train_on_batch()

        # Save model
        model_to_save_json = model.to_json()
        open('model_architecture.json', 'w').write(model_to_save_json)
        model_to_save_yaml = model.to_yaml()
        open('model_architecture.yaml', 'w').write(model_to_save_yaml)
        model.save_weights('weights.h5') 
Example 78
Project: phoneticSimilarity   Author: ronggong   File: models.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def jan_original(filter_density, dropout, input_shape, batchNorm=False, dense_activation='relu', channel=1):
    if channel == 1:
        reshape_dim = (1, input_shape[0], input_shape[1])
        channel_order = 'channels_first'
    else:
        reshape_dim = input_shape
        channel_order = 'channels_last'

    model_1 = Sequential()

    if batchNorm:
        model_1.add(BatchNormalization(axis=1, input_shape=reshape_dim))

    model_1.add(Conv2D(int(10 * filter_density), (3, 7), padding="valid",
                       input_shape=reshape_dim,
                       data_format=channel_order, activation='relu'))
    model_1.add(MaxPooling2D(pool_size=(3, 1), padding='valid', data_format=channel_order))

    model_1.add(Conv2D(int(20 * filter_density), (3, 3), padding="valid",
                       data_format=channel_order, activation='relu'))
    model_1.add(MaxPooling2D(pool_size=(3, 1), padding='valid', data_format=channel_order))

    if dropout:
        model_1.add(Dropout(dropout))  # test Schluter dataset, comment in jingju dataset

    model_1.add(Flatten())

    model_1.add(Dense(units=256, activation=dense_activation))
    # model_1.add(ELU())

    if dropout:
        model_1.add(Dropout(dropout))

    model_1.add(Dense(1, activation='sigmoid'))
    # model_1.add(Activation("softmax"))

    # optimizer = SGD(lr=0.05, momentum=0.45, decay=0.0, nesterov=False)
    optimizer = Adam()

    model_1.compile(loss='binary_crossentropy',
                    optimizer=optimizer,
                    metrics=['accuracy'])

    print(model_1.summary())

    return model_1 
Example 79
Project: ndparse   Author: neurodata   File: nddl.py    Apache License 2.0 4 votes vote down vote up
def ciresan_n3(n=65, nOutput=2):
    """An approximation of the N3 network from [1].
    Note that we also made a few small modifications along the way
    (from Theano to caffe and now to tensorflow/keras).

    As of this writing, no serious attempt has been made to optimize
    hyperparameters or structure of this network.

    Parameters:
       n : The tile size (diameter) to use in the sliding window.
           Tiles are assumed to be square, hence only one parameter.

    [1] Ciresan et al 'Deep neural networks segment neuronal membranes in
        electron microscopy images,' NIPS 2012.
    """

    from keras.optimizers import SGD
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Activation, Flatten
    from keras.layers import Convolution2D, MaxPooling2D
    from keras.layers.normalization import BatchNormalization


    model = Sequential()

    # input: nxn images with 1 channel -> (1, n, n) tensors.
    # this applies 48 convolution filters of size 5x5 each.
    model.add(Convolution2D(48, 5, 5, border_mode='valid', dim_ordering='th', input_shape=(1, n, n)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    model.add(BatchNormalization())  # note: we used LRN previously...

    model.add(Convolution2D(48, 5, 5))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    model.add(BatchNormalization())  # note: we used LRN previously...
    #model.add(Dropout(0.25))

    model.add(Convolution2D(48, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))

    model.add(Flatten())
    # Note: Keras does automatic shape inference.
    model.add(Dense(200))
    model.add(Activation('relu'))
    #model.add(Dropout(0.5))

    model.add(Dense(nOutput))  # use 2 for binary classification
    model.add(Activation('softmax'))

    return model


#-------------------------------------------------------------------------------
#  Code for training a deep learning network
#------------------------------------------------------------------------------- 
Example 80
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 4 votes vote down vote up
def cnn_3d(input_shape):

    model = Sequential()
    model.add(Conv3D(16, kernel_size=(3, 3, 20), strides=(1, 1, 10), padding='valid', kernel_regularizer=l2(REG_lambda), input_shape=input_shape))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(16, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 3)))

    model.add(Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 3)))

    model.add(Conv3D(64, kernel_size=(2, 2, 2), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(64, kernel_size=(2, 2, 2), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2)))

    model.add(Flatten())
    model.add(Dense(128))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes, activation='softmax'))

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model


# 2D-CNN model