Python tensorflow.python.keras.models.Sequential() Examples

The following are 11 code examples of tensorflow.python.keras.models.Sequential(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.keras.models , or try the search function .
Example #1
Source File: model.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def keras_estimator(model_dir, config, learning_rate, vocab_size):
  """Creates a Keras Sequential model with layers.

  Args:
    model_dir: (str) file path where training files will be written.
    config: (tf.estimator.RunConfig) Configuration options to save model.
    learning_rate: (int) Learning rate.
    vocab_size: (int) Size of the vocabulary in number of words.

  Returns:
      A keras.Model
  """
  model = models.Sequential()
  model.add(Embedding(vocab_size, 16))
  model.add(GlobalAveragePooling1D())
  model.add(Dense(16, activation=tf.nn.relu))
  model.add(Dense(1, activation=tf.nn.sigmoid))

  # Compile model with learning parameters.
  optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
  model.compile(
      optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
  estimator = tf.keras.estimator.model_to_estimator(
      keras_model=model, model_dir=model_dir, config=config)
  return estimator 
Example #2
Source File: plot_segment_rep.py    From seglearn with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def crnn_model(width=100, n_vars=6, n_classes=7, conv_kernel_size=5,
               conv_filters=3, lstm_units=3):
    input_shape = (width, n_vars)
    model = Sequential()
    model.add(Conv1D(filters=conv_filters, kernel_size=conv_kernel_size,
                     padding='valid', activation='relu', input_shape=input_shape))
    model.add(Conv1D(filters=conv_filters, kernel_size=conv_kernel_size,
                     padding='valid', activation='relu'))
    model.add(LSTM(units=lstm_units, dropout=0.1, recurrent_dropout=0.1))
    model.add(Dense(n_classes, activation="softmax"))

    model.compile(loss='categorical_crossentropy', optimizer='adam',
                  metrics=['accuracy'])

    return model


# load the data 
Example #3
Source File: plot_model_selection2.py    From seglearn with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def crnn_model(width=100, n_vars=6, n_classes=7, conv_kernel_size=5,
               conv_filters=2, lstm_units=2):
    # create a crnn model with keras with one cnn layers, and one rnn layer
    input_shape = (width, n_vars)
    model = Sequential()
    model.add(Conv1D(filters=conv_filters, kernel_size=conv_kernel_size,
                     padding='valid', activation='relu', input_shape=input_shape))
    model.add(LSTM(units=lstm_units, dropout=0.1, recurrent_dropout=0.1))
    model.add(Dense(n_classes, activation="softmax"))

    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    return model


# load the data 
Example #4
Source File: plot_nn_training_curves.py    From seglearn with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def crnn_model(width=100, n_vars=6, n_classes=7, conv_kernel_size=5,
               conv_filters=3, lstm_units=3):
    input_shape = (width, n_vars)
    model = Sequential()
    model.add(Conv1D(filters=conv_filters, kernel_size=conv_kernel_size,
                     padding='valid', activation='relu', input_shape=input_shape))
    model.add(LSTM(units=lstm_units, dropout=0.1, recurrent_dropout=0.1))
    model.add(Dense(n_classes, activation="softmax"))

    model.compile(loss='categorical_crossentropy', optimizer='adam',
                  metrics=['accuracy'])

    return model


##############################################
# Setup
##############################################

# load the data 
Example #5
Source File: prepare_model.py    From camera-trap-classifier with MIT License 6 votes vote down vote up
def set_last_layer_to_random(model_trained, model_random):
    """ Set all layers with and after layer_name to random """

    logging.info("Replacing layers of model with random layers")

    layer_names = [x.name for x in model_trained.layers]
    layer = layer_names[-1]

    # find layers which have to be kept unchanged
    id_to_set_random = layer_names.index(layer)

    # combine old, trained layers with new random layers
    comb_layers = model_trained.layers[0:id_to_set_random]
    new_layers = model_random.layers[id_to_set_random:]
    comb_layers.extend(new_layers)

    # define new model
    new_model = Sequential(comb_layers)

    # print layers of new model
    for layer, i in zip(new_model.layers, range(0, len(new_model.layers))):
        logging.info("New model - layer %s: %s" % (i, layer.name))

    return new_model 
Example #6
Source File: model.py    From cloudml-samples with Apache License 2.0 5 votes vote down vote up
def keras_estimator(model_dir, config, params):
	"""Creates a Keras Sequential model with layers.

	Mean Squared Error (MSE) is a common loss function used for regression.
	A common regression metric is Mean Absolute Error (MAE).

	Args:
		model_dir: (str) file path where training files will be written.
		config: (tf.estimator.RunConfig) Configuration options to save model.
		params: (dict)

	Returns:
		A keras.Model
	"""
	model = models.Sequential()
	model.add(
		Dense(64, activation=tf.nn.relu, input_shape=(params['num_features'],)))
	model.add(Dense(64, activation=tf.nn.relu))
	model.add(Dense(1))

	# Compile model with learning parameters.
	optimizer = tf.train.RMSPropOptimizer(learning_rate=params['learning_rate'])
	model.compile(optimizer=optimizer, loss='mse', metrics=['mae'])

	return tf.keras.estimator.model_to_estimator(
		keras_model=model, model_dir=model_dir, config=config) 
Example #7
Source File: model.py    From cloudml-samples with Apache License 2.0 5 votes vote down vote up
def keras_estimator(model_dir, config, learning_rate):
  """Creates a Keras Sequential model with layers.

  Args:
    model_dir: (str) file path where training files will be written.
    config: (tf.estimator.RunConfig) Configuration options to save model.
    learning_rate: (int) Learning rate.

  Returns:
    A keras.Model
  """
  model = models.Sequential()
  model.add(Flatten(input_shape=(28, 28)))
  model.add(Dense(128, activation=tf.nn.relu))
  model.add(Dense(10, activation=tf.nn.softmax))

  # Compile model with learning parameters.
  optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
  model.compile(
      optimizer=optimizer,
      loss='sparse_categorical_crossentropy',
      metrics=['accuracy'])

  estimator = tf.keras.estimator.model_to_estimator(
      keras_model=model, model_dir=model_dir, config=config)
  return estimator 
Example #8
Source File: prepare_model.py    From camera-trap-classifier with MIT License 5 votes vote down vote up
def set_specific_layers_to_random(model_trained, model_random, layer):
    """ Set all layers with and after layer_name to random """

    logging.info("Replacing layers of model with random layers")

    layer_names = [x.name for x in model_trained.layers]

    # check if target layer is in model
    if layer not in layer_names:
        logging.error("Layer %s not in model.layers" % layer)
        logging.error("Available Layers %s" % layer_names)
        raise IOError("Layer %s not in model.layers" % layer)

    # find layers which have to be kept unchanged
    id_to_set_random = layer_names.index(layer)

    # combine old, trained layers with new random layers
    comb_layers = model_trained.layers[0:id_to_set_random]
    new_layers = model_random.layers[id_to_set_random:]
    comb_layers.extend(new_layers)

    # define new model
    new_model = Sequential(comb_layers)

    # print layers of new model
    for layer, i in zip(new_model.layers, range(0, len(new_model.layers))):
        logging.debug("New model - layer %s: %s" % (i, layer.name))

    return new_model 
Example #9
Source File: keras.py    From estimator with Apache License 2.0 5 votes vote down vote up
def _assert_valid_model(model, custom_objects=None):
  is_subclass = (not model._is_graph_network and
                 not isinstance(model, models.Sequential))
  if is_subclass:
    try:
      custom_objects = custom_objects or {}
      with tf.keras.utils.CustomObjectScope(custom_objects):
        model.__class__.from_config(model.get_config())
    except NotImplementedError:
      raise ValueError(
          'Subclassed `Model`s passed to `model_to_estimator` must '
          'implement `Model.get_config` and `Model.from_config`.') 
Example #10
Source File: tsne_grid.py    From tsne-grid with MIT License 5 votes vote down vote up
def build_model():
    base_model = VGG16(weights='imagenet')
    top_model = Sequential()
    top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
    return Model(inputs=base_model.input, outputs=top_model(base_model.output)) 
Example #11
Source File: conv_network.py    From mnist_digits_classification with MIT License 4 votes vote down vote up
def model(train_x, train_y, test_x, test_y, epoch):
    '''

    :param train_x: train features
    :param train_y: train labels
    :param test_x:  test features
    :param test_y: test labels
    :param epoch: no. of epochs
    :return:
    '''
    conv_model = Sequential()
    # first layer with input shape (img_rows, img_cols, 1) and 12 filters
    conv_model.add(Conv2D(12, kernel_size=(3, 3), activation='relu',
                          input_shape=(img_rows, img_cols, 1)))
    # second layer with 12 filters
    conv_model.add(Conv2D(12, kernel_size=(3, 3), activation='relu'))
    # third layer with 12 filers
    conv_model.add(Conv2D(12, kernel_size=(3, 3), activation='relu'))
    # flatten layer
    conv_model.add(Flatten())
    # adding a Dense layer
    conv_model.add(Dense(100, activation='relu'))
    # adding the final Dense layer with softmax
    conv_model.add(Dense(num_classes, activation='softmax'))

    # compile the model
    conv_model.compile(optimizer=keras.optimizers.Adadelta(),
                       loss='categorical_crossentropy',
                       metrics=['accuracy'])
    print("\n Training the Convolution Neural Network on MNIST data\n")
    # fit the model
    conv_model.fit(train_x, train_y, batch_size=128, epochs=epoch,
                   validation_split=0.1, verbose=2)
    predicted_train_y = conv_model.predict(train_x)
    train_accuracy = (sum(np.argmax(predicted_train_y, axis=1)
                          == np.argmax(train_y, axis=1))/(float(len(train_y))))
    print('Train accuracy : ', train_accuracy)
    predicted_test_y = conv_model.predict(test_x)
    test_accuracy = (sum(np.argmax(predicted_test_y, axis=1)
                         == np.argmax(test_y, axis=1))/(float(len(test_y))))
    print('Test accuracy : ', test_accuracy)
    CNN_accuracy = {'train_accuracy': train_accuracy,
                    'test_accuracy': test_accuracy, 'epoch': epoch}
    return conv_model, CNN_accuracy