Python keras.Sequential() Examples

The following are 30 code examples for showing how to use keras.Sequential(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras , or try the search function .

Example 1
Project: speech-emotion-recognition   Author: harry-7   File: dnn.py    License: MIT License 6 votes vote down vote up
def __init__(self, input_shape, num_classes, **params):
        """
        Constructor to initialize the deep neural network model. Takes the input
        shape and number of classes and other parameters required for the
        abstract class `Model` as parameters.

        Args:
            input_shape (tuple): shape of the input
            num_classes (int): number of different classes ( labels ) in the data.
            **params: Additional parameters required by the underlying abstract
                class `Model`.

        """
        super(DNN, self).__init__(**params)
        self.input_shape = input_shape
        self.model = Sequential()
        self.make_default_model()
        self.model.add(Dense(num_classes, activation='softmax'))
        self.model.compile(loss='binary_crossentropy', optimizer='adam',
                           metrics=['accuracy'])
        print(self.model.summary(), file=sys.stderr)
        self.save_path = self.save_path or self.name + '_best_model.h5' 
Example 2
Project: CCKS2019-Chinese-Clinical-NER   Author: fordai   File: model.py    License: MIT License 6 votes vote down vote up
def __build_model(self):
        model = Sequential()

        embedding_layer = Embedding(input_dim=len(self.vocab) + 1,
                                    output_dim=self.embedding_dim,
                                    weights=[self.embedding_mat],
                                    trainable=False)
        model.add(embedding_layer)

        bilstm_layer = Bidirectional(LSTM(units=256, return_sequences=True))
        model.add(bilstm_layer)

        model.add(TimeDistributed(Dense(256, activation="relu")))

        crf_layer = CRF(units=len(self.tags), sparse_target=True)
        model.add(crf_layer)

        model.compile(optimizer="adam", loss=crf_loss, metrics=[crf_viterbi_accuracy])
        model.summary()

        return model 
Example 3
Project: MatchZoo   Author: NTMC-Community   File: diin.py    License: Apache License 2.0 6 votes vote down vote up
def _make_char_embedding_layer(self) -> keras.layers.Layer:
        """
        Apply embedding, conv and maxpooling operation over time dimension
        for each token to obtain a vector.

        :return: Wrapper Keras 'Layer' as character embedding feature
            extractor.
        """

        return keras.layers.TimeDistributed(keras.Sequential([
            keras.layers.Embedding(
                input_dim=self._params['char_embedding_input_dim'],
                output_dim=self._params['char_embedding_output_dim'],
                input_length=self._params['input_shapes'][2][-1]),
            keras.layers.Conv1D(
                filters=self._params['char_conv_filters'],
                kernel_size=self._params['char_conv_kernel_size']),
            keras.layers.GlobalMaxPooling1D()])) 
Example 4
Project: Voice-based-gender-recognition   Author: SuperKogito   File: GenderIdentifier.py    License: MIT License 6 votes vote down vote up
def __init__(self, females_files_path, males_files_path, females_model_path, males_model_path):
        self.females_training_path = females_files_path
        self.males_training_path   = males_files_path
        self.error                 = 0
        self.total_sample          = 0
        self.features_extractor    = FeaturesExtractor()
        # load models
        self.females_gmm = pickle.load(open(females_model_path, 'rb'))
        self.males_gmm   = pickle.load(open(males_model_path, 'rb'))
        
        # svm
        self.X_train = np.vstack((self.females_gmm, self.males_gmm))
        self.y_train = np.hstack(( 0 * np.ones(self.females_gmm.shape[0]), np.ones(self.males_gmm.shape[0])))
        print(self.X_train.shape, self.y_train.shape)
        # define the keras model
        self.model = keras.Sequential()
        self.model.add(keras.layers.Dense(39, input_dim=39, activation='relu'))
        self.model.add(keras.layers.Dense(13, activation='relu'))
        self.model.add(keras.layers.Dense( 2, activation='sigmoid'))
        
        self.model.compile(optimizer = 'adam',
                           loss      = 'binary_crossentropy',
                           metrics   = ['accuracy'])
        self.model.fit(self.X_train, keras.utils.to_categorical(self.y_train), epochs = 5) 
Example 5
Project: Generative-Adversarial-Networks-Projects   Author: PacktPublishing   File: run.py    License: MIT License 6 votes vote down vote up
def build_generator():
    gen_model = Sequential()

    gen_model.add(Dense(input_dim=100, output_dim=2048))
    gen_model.add(ReLU())

    gen_model.add(Dense(256 * 8 * 8))
    gen_model.add(BatchNormalization())
    gen_model.add(ReLU())
    gen_model.add(Reshape((8, 8, 256), input_shape=(256 * 8 * 8,)))
    gen_model.add(UpSampling2D(size=(2, 2)))

    gen_model.add(Conv2D(128, (5, 5), padding='same'))
    gen_model.add(ReLU())

    gen_model.add(UpSampling2D(size=(2, 2)))

    gen_model.add(Conv2D(64, (5, 5), padding='same'))
    gen_model.add(ReLU())

    gen_model.add(UpSampling2D(size=(2, 2)))

    gen_model.add(Conv2D(3, (5, 5), padding='same'))
    gen_model.add(Activation('tanh'))
    return gen_model 
Example 6
Project: DeepConcolic   Author: TrustAI   File: utils.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def eval_batch(o, ims, allow_input_layer = False):
  layer_functions, has_input_layer = (
    get_layer_functions (o) if isinstance (o, (keras.Sequential, keras.Model))
    # TODO: Check it's sequential? --------------------------------------^
    else o)
  having_input_layer = allow_input_layer and has_input_layer
  activations = []
  for l, func in enumerate(layer_functions):
    if not having_input_layer:
      if l==0:
        activations.append(func([ims])[0])
      else:
        activations.append(func([activations[l-1]])[0])
    else:
      if l==0:
        activations.append([]) #activations.append(func([ims])[0])
      elif l==1:
        activations.append(func([ims])[0])
      else:
        activations.append(func([activations[l-1]])[0])
  return activations 
Example 7
Project: neuron_poker   Author: dickreuter   File: agent_custom_q1.py    License: MIT License 6 votes vote down vote up
def initiate_agent(self, nb_actions):
        """initiate a deep Q agent"""

        self.model = Sequential()
        self.model.add(Dense(512, activation='relu', input_shape=env.observation_space))  # pylint: disable=no-member
        self.model.add(Dropout(0.2))
        self.model.add(Dense(512, activation='relu'))
        self.model.add(Dropout(0.2))
        self.model.add(Dense(512, activation='relu'))
        self.model.add(Dropout(0.2))
        self.model.add(Dense(nb_actions, activation='linear'))

        # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
        # even the metrics!
        memory = SequentialMemory(limit=memory_limit, window_length=window_length)  # pylint: disable=unused-variable
        policy = TrumpPolicy()  # pylint: disable=unused-variable 
Example 8
Project: Python-Application   Author: xflywind   File: weixin_audio.py    License: GNU General Public License v3.0 6 votes vote down vote up
def main():
    x_train, x_test, y_train, y_test = get_train_test()
    x_train = x_train.reshape(-1, 220)
    x_test = x_test.reshape(-1, 220)
    y_train_hot = to_categorical(y_train)
    y_test_hot = to_categorical(y_test)
    model = Sequential()
    model.add(Dense(64, activation='relu', input_shape=(220,)))
    model.add(Dense(64, activation='relu'))
    model.add(Dense(64, activation='relu'))
    model.add(Dense(6, activation='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.RMSprop(),
                  metrics=['accuracy'])
    history = model.fit(x_train, y_train_hot, batch_size=100, epochs=20, verbose=1,
                        validation_data=(x_test, y_test_hot))
    plot_history(history) 
Example 9
Project: seqeval   Author: chakki-works   File: test_metrics.py    License: MIT License 5 votes vote down vote up
def test_keras_callback(self):
        expected_score = f1_score(self.y_true, self.y_pred)
        tokenizer = Tokenizer(lower=False)
        tokenizer.fit_on_texts(self.y_true)
        maxlen = max((len(row) for row in self.y_true))

        def prepare(y, padding):
            indexes = tokenizer.texts_to_sequences(y)
            padded = pad_sequences(indexes, maxlen=maxlen, padding=padding, truncating=padding)
            categorical = to_categorical(padded)
            return categorical

        for padding in ('pre', 'post'):
            callback = F1Metrics(id2label=tokenizer.index_word)
            y_true_cat = prepare(self.y_true, padding)
            y_pred_cat = prepare(self.y_pred, padding)

            input_shape = (1,)
            layer = Lambda(lambda _: constant(y_pred_cat), input_shape=input_shape)
            fake_model = Sequential(layers=[layer])
            callback.set_model(fake_model)

            X = numpy.zeros((y_true_cat.shape[0], 1))

            # Verify that the callback translates sequences correctly by itself
            y_true_cb, y_pred_cb = callback.predict(X, y_true_cat)
            self.assertEqual(y_pred_cb, self.y_pred)
            self.assertEqual(y_true_cb, self.y_true)

            # Verify that the callback stores the correct number in logs
            fake_model.compile(optimizer='adam', loss='categorical_crossentropy')
            history = fake_model.fit(x=X, batch_size=y_true_cat.shape[0], y=y_true_cat,
                                     validation_data=(X, y_true_cat),
                                     callbacks=[callback])
            actual_score = history.history['f1'][0]
            self.assertAlmostEqual(actual_score, expected_score) 
Example 10
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cudnn_recurrent_test.py    License: MIT License 5 votes vote down vote up
def test_preprocess_weights_for_loading_gru_incompatible():
    """
    Loading weights between incompatible layers should fail fast with an exception.
    """
    def gru(cudnn=False, **kwargs):
        layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRU
        return layer_class(2, input_shape=[3, 5], **kwargs)

    def initialize_weights(layer):
        # A model is needed to initialize weights.
        _ = keras.models.Sequential([layer])
        return layer

    def assert_not_compatible(src, dest, message):
        with pytest.raises(ValueError) as ex:
            keras.engine.topology.preprocess_weights_for_loading(
                dest, initialize_weights(src).get_weights())
        assert message in ex.value.message

    assert_not_compatible(gru(), gru(cudnn=True),
                          'GRU(reset_after=False) is not compatible with CuDNNGRU')
    assert_not_compatible(gru(cudnn=True), gru(),
                          'CuDNNGRU is not compatible with GRU(reset_after=False)')
    assert_not_compatible(gru(), gru(reset_after=True),
                          'GRU(reset_after=False) is not compatible with GRU(reset_after=True)')
    assert_not_compatible(gru(reset_after=True), gru(),
                          'GRU(reset_after=True) is not compatible with GRU(reset_after=False)') 
Example 11
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cudnn_recurrent_test.py    License: MIT License 5 votes vote down vote up
def test_preprocess_weights_for_loading_gru_incompatible():
    """
    Loading weights between incompatible layers should fail fast with an exception.
    """
    def gru(cudnn=False, **kwargs):
        layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRU
        return layer_class(2, input_shape=[3, 5], **kwargs)

    def initialize_weights(layer):
        # A model is needed to initialize weights.
        _ = keras.models.Sequential([layer])
        return layer

    def assert_not_compatible(src, dest, message):
        with pytest.raises(ValueError) as ex:
            keras.engine.topology.preprocess_weights_for_loading(
                dest, initialize_weights(src).get_weights())
        assert message in ex.value.message

    assert_not_compatible(gru(), gru(cudnn=True),
                          'GRU(reset_after=False) is not compatible with CuDNNGRU')
    assert_not_compatible(gru(cudnn=True), gru(),
                          'CuDNNGRU is not compatible with GRU(reset_after=False)')
    assert_not_compatible(gru(), gru(reset_after=True),
                          'GRU(reset_after=False) is not compatible with GRU(reset_after=True)')
    assert_not_compatible(gru(reset_after=True), gru(),
                          'GRU(reset_after=True) is not compatible with GRU(reset_after=False)') 
Example 12
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cudnn_recurrent_test.py    License: MIT License 5 votes vote down vote up
def test_preprocess_weights_for_loading_gru_incompatible():
    """
    Loading weights between incompatible layers should fail fast with an exception.
    """
    def gru(cudnn=False, **kwargs):
        layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRU
        return layer_class(2, input_shape=[3, 5], **kwargs)

    def initialize_weights(layer):
        # A model is needed to initialize weights.
        _ = keras.models.Sequential([layer])
        return layer

    def assert_not_compatible(src, dest, message):
        with pytest.raises(ValueError) as ex:
            keras.engine.topology.preprocess_weights_for_loading(
                dest, initialize_weights(src).get_weights())
        assert message in ex.value.message

    assert_not_compatible(gru(), gru(cudnn=True),
                          'GRU(reset_after=False) is not compatible with CuDNNGRU')
    assert_not_compatible(gru(cudnn=True), gru(),
                          'CuDNNGRU is not compatible with GRU(reset_after=False)')
    assert_not_compatible(gru(), gru(reset_after=True),
                          'GRU(reset_after=False) is not compatible with GRU(reset_after=True)')
    assert_not_compatible(gru(reset_after=True), gru(),
                          'GRU(reset_after=True) is not compatible with GRU(reset_after=False)') 
Example 13
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cudnn_recurrent_test.py    License: MIT License 5 votes vote down vote up
def test_preprocess_weights_for_loading_gru_incompatible():
    """
    Loading weights between incompatible layers should fail fast with an exception.
    """
    def gru(cudnn=False, **kwargs):
        layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRU
        return layer_class(2, input_shape=[3, 5], **kwargs)

    def initialize_weights(layer):
        # A model is needed to initialize weights.
        _ = keras.models.Sequential([layer])
        return layer

    def assert_not_compatible(src, dest, message):
        with pytest.raises(ValueError) as ex:
            keras.engine.topology.preprocess_weights_for_loading(
                dest, initialize_weights(src).get_weights())
        assert message in ex.value.message

    assert_not_compatible(gru(), gru(cudnn=True),
                          'GRU(reset_after=False) is not compatible with CuDNNGRU')
    assert_not_compatible(gru(cudnn=True), gru(),
                          'CuDNNGRU is not compatible with GRU(reset_after=False)')
    assert_not_compatible(gru(), gru(reset_after=True),
                          'GRU(reset_after=False) is not compatible with GRU(reset_after=True)')
    assert_not_compatible(gru(reset_after=True), gru(),
                          'GRU(reset_after=True) is not compatible with GRU(reset_after=False)') 
Example 14
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cudnn_recurrent_test.py    License: MIT License 5 votes vote down vote up
def test_preprocess_weights_for_loading_gru_incompatible():
    """
    Loading weights between incompatible layers should fail fast with an exception.
    """
    def gru(cudnn=False, **kwargs):
        layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRU
        return layer_class(2, input_shape=[3, 5], **kwargs)

    def initialize_weights(layer):
        # A model is needed to initialize weights.
        _ = keras.models.Sequential([layer])
        return layer

    def assert_not_compatible(src, dest, message):
        with pytest.raises(ValueError) as ex:
            keras.engine.topology.preprocess_weights_for_loading(
                dest, initialize_weights(src).get_weights())
        assert message in ex.value.message

    assert_not_compatible(gru(), gru(cudnn=True),
                          'GRU(reset_after=False) is not compatible with CuDNNGRU')
    assert_not_compatible(gru(cudnn=True), gru(),
                          'CuDNNGRU is not compatible with GRU(reset_after=False)')
    assert_not_compatible(gru(), gru(reset_after=True),
                          'GRU(reset_after=False) is not compatible with GRU(reset_after=True)')
    assert_not_compatible(gru(reset_after=True), gru(),
                          'GRU(reset_after=True) is not compatible with GRU(reset_after=False)') 
Example 15
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cudnn_recurrent_test.py    License: MIT License 5 votes vote down vote up
def test_preprocess_weights_for_loading_gru_incompatible():
    """
    Loading weights between incompatible layers should fail fast with an exception.
    """
    def gru(cudnn=False, **kwargs):
        layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRU
        return layer_class(2, input_shape=[3, 5], **kwargs)

    def initialize_weights(layer):
        # A model is needed to initialize weights.
        _ = keras.models.Sequential([layer])
        return layer

    def assert_not_compatible(src, dest, message):
        with pytest.raises(ValueError) as ex:
            keras.engine.topology.preprocess_weights_for_loading(
                dest, initialize_weights(src).get_weights())
        assert message in ex.value.message

    assert_not_compatible(gru(), gru(cudnn=True),
                          'GRU(reset_after=False) is not compatible with CuDNNGRU')
    assert_not_compatible(gru(cudnn=True), gru(),
                          'CuDNNGRU is not compatible with GRU(reset_after=False)')
    assert_not_compatible(gru(), gru(reset_after=True),
                          'GRU(reset_after=False) is not compatible with GRU(reset_after=True)')
    assert_not_compatible(gru(reset_after=True), gru(),
                          'GRU(reset_after=True) is not compatible with GRU(reset_after=False)') 
Example 16
Project: Machine-Learning-with-Python   Author: devAmoghS   File: hparams_grid_search_keras_nn.py    License: MIT License 5 votes vote down vote up
def create_model():
    model = Sequential()
    model.add(Dense(12, input_dim=5, kernel_initializer='uniform', activation='relu'))
    model.add(Dense(8, kernel_initializer='uniform', activation='relu'))
    model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

    return model 
Example 17
Project: Generative-Adversarial-Networks-Projects   Author: PacktPublishing   File: run.py    License: MIT License 5 votes vote down vote up
def build_discriminator():
    dis_model = Sequential()
    dis_model.add(
        Conv2D(128, (5, 5),
               padding='same',
               input_shape=(64, 64, 3))
    )
    dis_model.add(LeakyReLU(alpha=0.2))
    dis_model.add(MaxPooling2D(pool_size=(2, 2)))

    dis_model.add(Conv2D(256, (3, 3)))
    dis_model.add(LeakyReLU(alpha=0.2))
    dis_model.add(MaxPooling2D(pool_size=(2, 2)))

    dis_model.add(Conv2D(512, (3, 3)))
    dis_model.add(LeakyReLU(alpha=0.2))
    dis_model.add(MaxPooling2D(pool_size=(2, 2)))

    dis_model.add(Flatten())
    dis_model.add(Dense(1024))
    dis_model.add(LeakyReLU(alpha=0.2))

    dis_model.add(Dense(1))
    dis_model.add(Activation('sigmoid'))

    return dis_model 
Example 18
Project: Generative-Adversarial-Networks-Projects   Author: PacktPublishing   File: run.py    License: MIT License 5 votes vote down vote up
def build_adversarial_model(gen_model, dis_model):
    model = Sequential()
    model.add(gen_model)
    dis_model.trainable = False
    model.add(dis_model)
    return model 
Example 19
Project: AiLearning   Author: apachecn   File: text_PoetryModel.py    License: GNU General Public License v3.0 5 votes vote down vote up
def build_model(self):
        '''构建模型'''
        model = keras.Sequential()
        model.add(Embedding(len(self.num2word) + 2, 300, input_length=self.config.max_len))
        model.add(Bidirectional(GRU(128, return_sequences=True)))
        model.add(Dropout(0.6))
        model.add(Flatten())
        model.add(Dense(len(self.words), activation='softmax'))
        # 设置优化器
        optimizer = Adam(lr=self.config.learning_rate)
        model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
        self.model = model 
Example 20
Project: keras-video-classifier   Author: chen0040   File: convolutional.py    License: MIT License 5 votes vote down vote up
def create_model(self, input_shape, nb_classes):
        model = Sequential()
        model.add(Conv2D(filters=32, input_shape=input_shape, padding='same', kernel_size=(3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(filters=32, padding='same', kernel_size=(3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Dropout(rate=0.25))

        model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='same'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Conv2D(filters=64, padding='same', kernel_size=(3, 3)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Dropout(rate=0.25))

        model.add(Flatten())
        model.add(Dense(units=512))
        model.add(Activation('relu'))
        model.add(Dropout(rate=0.5))
        model.add(Dense(units=nb_classes))
        model.add(Activation('softmax'))

        model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

        return model 
Example 21
Project: optuna   Author: optuna   File: test_keras.py    License: MIT License 5 votes vote down vote up
def test_keras_pruning_callback(interval: int, epochs: int) -> None:
    def objective(trial: optuna.trial.Trial) -> float:

        model = Sequential()
        model.add(Dense(1, activation="sigmoid", input_dim=20))
        model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
        model.fit(
            np.zeros((16, 20), np.float32),
            np.zeros((16,), np.int32),
            batch_size=1,
            epochs=epochs,
            callbacks=[KerasPruningCallback(trial, "accuracy", interval=interval)],
            verbose=0,
        )

        return 1.0

    study = optuna.create_study(pruner=DeterministicPruner(True))
    study.optimize(objective, n_trials=1)
    if interval <= epochs:
        assert study.trials[0].state == optuna.trial.TrialState.PRUNED
    else:
        assert study.trials[0].state == optuna.trial.TrialState.COMPLETE

    study = optuna.create_study(pruner=DeterministicPruner(False))
    study.optimize(objective, n_trials=1)
    assert study.trials[0].state == optuna.trial.TrialState.COMPLETE
    assert study.trials[0].value == 1.0 
Example 22
Project: neuron_poker   Author: dickreuter   File: agent_keras_rl_dqn.py    License: MIT License 5 votes vote down vote up
def initiate_agent(self, env):
        """initiate a deep Q agent"""
        tf.compat.v1.disable_eager_execution()
        self.env = env

        nb_actions = self.env.action_space.n

        self.model = Sequential()
        self.model.add(Dense(512, activation='relu', input_shape=env.observation_space))
        self.model.add(Dropout(0.2))
        self.model.add(Dense(512, activation='relu'))
        self.model.add(Dropout(0.2))
        self.model.add(Dense(512, activation='relu'))
        self.model.add(Dropout(0.2))
        self.model.add(Dense(nb_actions, activation='linear'))

        # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
        # even the metrics!
        memory = SequentialMemory(limit=memory_limit, window_length=window_length)
        policy = TrumpPolicy()

        nb_actions = env.action_space.n

        self.dqn = DQNAgent(model=self.model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=nb_steps_warmup,
                            target_model_update=1e-2, policy=policy,
                            processor=CustomProcessor(),
                            batch_size=batch_size, train_interval=train_interval, enable_double_dqn=enable_double_dqn)
        self.dqn.compile(tf.keras.optimizers.Adam(lr=1e-3), metrics=['mae']) 
Example 23
Project: Orbit   Author: shivaverma   File: agent.py    License: MIT License 5 votes vote down vote up
def build_model(self):

        model = Sequential()
        model.add(Dense(64, input_shape=(self.state_space,), activation='relu'))
        model.add(Dense(64, activation='relu'))
        model.add(Dense(self.action_space, activation='linear'))
        model.compile(loss='mse', optimizer=adam(lr=self.learning_rate))
        return model 
Example 24
Project: cs-ranking   Author: kiudee   File: test_callbacks.py    License: Apache License 2.0 5 votes vote down vote up
def create_model():
    lr = 0.015
    model = Sequential()
    model.add(Dense(10, activation="relu"))
    model.add(Dense(5, activation="relu"))
    model.add(Dense(1, activation="sigmoid"))
    model.compile(optimizer=SGD(lr=lr), loss="binary_crossentropy")
    return model, lr 
Example 25
Project: keras-malicious-url-detector   Author: chen0040   File: lstm.py    License: MIT License 5 votes vote down vote up
def make_lstm_model(num_input_tokens):
    model = Sequential()
    model.add(LSTM(NB_LSTM_CELLS, input_shape=(None, num_input_tokens), return_sequences=False, return_state=False, dropout=0.2))
    model.add(Dense(NB_DENSE_CELLS))
    model.add(Dropout(0.3))
    model.add(Dense(2))
    model.add(Activation('softmax'))

    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    return model 
Example 26
Project: keras-malicious-url-detector   Author: chen0040   File: cnn_lstm.py    License: MIT License 5 votes vote down vote up
def make_cnn_lstm_model(num_input_tokens, max_len):
    model = Sequential()
    model.add(Embedding(input_dim=num_input_tokens, input_length=max_len, output_dim=EMBEDDING_SIZE))
    model.add(SpatialDropout1D(0.2))
    model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu'))
    model.add(MaxPooling1D(pool_size=4))
    model.add(LSTM(NB_LSTM_CELLS))
    model.add(Dense(units=2, activation='softmax'))

    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    return model 
Example 27
Project: keras-malicious-url-detector   Author: chen0040   File: lstm_embed.py    License: MIT License 5 votes vote down vote up
def make_lstm_embed_model(num_input_tokens, max_len):
    model = Sequential()
    model.add(Embedding(input_dim=num_input_tokens, input_length=max_len, output_dim=EMBEDDING_SIZE))
    model.add(LSTM(NB_LSTM_CELLS, return_sequences=False, return_state=False, dropout=0.2))
    model.add(Dense(2, activation='softmax'))

    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    return model 
Example 28
Project: livelossplot   Author: stared   File: external_test_keras.py    License: MIT License 5 votes vote down vote up
def test_keras():
    callback = PlotLossesKeras(outputs=(CheckOutput(), ))
    model = Sequential()
    model.add(LSTM(5, input_shape=(1, NUM_OF_GENERATED)))
    model.add(Dense(NUM_OF_GENERATED, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    X_train, Y_train = generate_data()
    X_test, Y_test = generate_data()
    model.fit(X_train, Y_train, epochs=2, validation_data=(X_test, Y_test), callbacks=[callback], verbose=False) 
Example 29
Project: cactus-maml   Author: kylehkhsu   File: baselines.py    License: MIT License 4 votes vote down vote up
def embedding_mlp(num_classes=FLAGS.way, num_shots=FLAGS.shot, num_tasks=FLAGS.num_tasks,
                  num_encoding_dims=FLAGS.num_encoding_dims, test_set=FLAGS.test_set, dataset=FLAGS.dataset,
                  units=FLAGS.units, dropout=FLAGS.dropout):
    import keras
    from keras.layers import Dense, Dropout
    from keras.losses import categorical_crossentropy
    from keras.callbacks import EarlyStopping
    from keras import backend as K

    if dataset != 'celeba':
        _, _, _, X_test, Y_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
        task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
        partition = task_generator.get_partition_from_labels(Y_test)
        partitions = [partition]
    else:
        _, _, _, X_test, attributes_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
        task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
        partitions = task_generator.get_celeba_task_pool(attributes_test)
    tasks = task_generator.get_tasks(num_tasks=num_tasks, partitions=partitions)

    train_accuracies, test_accuracies = [], []

    start = time.time()
    for i_task, task in enumerate(tqdm(tasks)):
        if (i_task + 1) % (num_tasks // 10) == 0:
            tqdm.write('test {}, accuracy {:.5}'.format(i_task + 1, np.mean(test_accuracies)))
        ind_train_few, Y_train_few, ind_test_few, Y_test_few = task
        Z_train_few, Z_test_few = Z_test[ind_train_few], Z_test[ind_test_few]
        Y_train_few, Y_test_few = keras.utils.to_categorical(Y_train_few, num_classes=num_classes), keras.utils.to_categorical(Y_test_few, num_classes=num_classes)

        model = keras.Sequential()
        model.add(Dense(units=units, activation='relu', input_dim=Z_train_few.shape[1]))
        model.add(Dropout(rate=dropout))
        model.add(Dense(units=num_classes, activation='softmax'))
        model.compile(loss=categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
        early_stopping = EarlyStopping(monitor='val_loss', patience=2)
        model.fit(Z_train_few, Y_train_few, batch_size=Z_train_few.shape[0], epochs=500, verbose=0, validation_data=(Z_test_few, Y_test_few), callbacks=[early_stopping])
        train_score = model.evaluate(Z_train_few, Y_train_few, verbose=0)
        train_accuracies.append(train_score[1])
        test_score = model.evaluate(Z_test_few, Y_test_few, verbose=0)
        test_accuracies.append(test_score[1])
        K.clear_session()

    print('units={}, dropout={}'.format(units, dropout))
    print('{}-way {}-shot embedding mlp: {:.5} with 95% CI {:.5} over {} tests'.format(num_classes, num_shots, np.mean(test_accuracies), 1.96*np.std(test_accuracies)/np.sqrt(num_tasks), num_tasks))
    print('Mean training accuracy: {:.5}; standard deviation: {:.5}'.format(np.mean(train_accuracies), np.std(train_accuracies)))
    print('{} few-shot classification tasks: {:.5} seconds.'.format(num_tasks, time.time() - start)) 
Example 30
Project: MMFinder   Author: nladuo   File: feature_extraction.py    License: MIT License 4 votes vote down vote up
def build_model():
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Convolution2D(4096, (7, 7), activation='relu'))
    model.add(Dropout(0.5))
    model.add(Convolution2D(4096, (1, 1), activation='relu'))
    model.add(Dropout(0.5))
    model.add(Convolution2D(2622, (1, 1)))
    model.add(Flatten())
    model.add(Activation('softmax'))

    return model