Python tensorflow.keras.Sequential() Examples

The following are 30 code examples of tensorflow.keras.Sequential(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras , or try the search function .
Example #1
Source File: model.py    From DexiNed with MIT License 7 votes vote down vote up
def __init__(self, out_features,**kwargs):
        super(_DenseLayer, self).__init__(**kwargs)
        k_reg = None if w_decay is None else l2(w_decay)
        self.layers = []
        self.layers.append(tf.keras.Sequential(
            [
                layers.ReLU(),
                layers.Conv2D(
                    filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same',
                    use_bias=True, kernel_initializer=weight_init,
                kernel_regularizer=k_reg),
                layers.BatchNormalization(),
                layers.ReLU(),
                layers.Conv2D(
                    filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same',
                    use_bias=True, kernel_initializer=weight_init,
                    kernel_regularizer=k_reg),
                layers.BatchNormalization(),
            ])) # first relu can be not needed 
Example #2
Source File: layers.py    From RLs with Apache License 2.0 6 votes vote down vote up
def ConvLayer(conv_function=Conv2D,
              filters=[32, 64, 64],
              kernels=[[8, 8], [4, 4], [3, 3]],
              strides=[[4, 4], [2, 2], [1, 1]],
              padding='valid',
              activation='relu'):
    '''
    Params:
        conv_function: the convolution function
        filters: list of flitter of all hidden conv layers
        kernels: list of kernel of all hidden conv layers
        strides: list of stride of all hidden conv layers
        padding: padding mode
        activation: activation function
    Return:
        A sequential of multi-convolution layers, with Flatten.
    '''
    layers = Sequential([conv_function(filters=f, kernel_size=k, strides=s, padding=padding, activation=activation) for f, k, s in zip(filters, kernels, strides)])
    layers.add(Flatten())
    return layers 
Example #3
Source File: tf_example.py    From ray with Apache License 2.0 6 votes vote down vote up
def create_keras_model():
    from tensorflow import keras
    from tensorflow.keras import layers
    model = keras.Sequential()
    # Adds a densely-connected layer with 64 units to the model:
    model.add(layers.Dense(64, activation="relu", input_shape=(32, )))
    # Add another:
    model.add(layers.Dense(64, activation="relu"))
    # Add a softmax layer with 10 output units:
    model.add(layers.Dense(10, activation="softmax"))

    model.compile(
        optimizer=keras.optimizers.RMSprop(0.01),
        loss=keras.losses.categorical_crossentropy,
        metrics=[keras.metrics.categorical_accuracy])
    return model
# __tf_model_end__
# yapf: enable

# yapf: disable
# __ray_start__ 
Example #4
Source File: macro.py    From nni with MIT License 6 votes vote down vote up
def __init__(self, num_layers=12, filters=24, num_classes=10, dropout_rate=0.0):
        super().__init__()
        self.num_layers = num_layers

        self.stem = Sequential([
            Conv2D(filters, kernel_size=3, padding='same', use_bias=False),
            BatchNormalization()
        ])

        labels = ['layer_{}'.format(i) for i in range(num_layers)]
        self.enas_layers = []
        for i in range(num_layers):
            layer = ENASLayer(labels[i], labels[:i], filters)
            self.enas_layers.append(layer)

        pool_num = 2
        self.pool_distance = num_layers // (pool_num + 1)
        self.pool_layers = [FactorizedReduce(filters) for _ in range(pool_num)]

        self.gap = GlobalAveragePooling2D()
        self.dropout = Dropout(dropout_rate)
        self.dense = Dense(num_classes) 
Example #5
Source File: persist_load_test.py    From keract with MIT License 6 votes vote down vote up
def test_load_persist(self):
        # define the model.
        model = Sequential()
        model.add(Dense(16, input_shape=(10,)))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))
        model.compile(optimizer='adam', loss='categorical_crossentropy')

        # fetch activations.
        x = np.ones((2, 10))
        activations = get_activations(model, x)

        # persist the activations to the disk.
        output = 'activations.json'
        persist_to_json_file(activations, output)

        # read them from the disk.
        activations2 = load_activations_from_json_file(output)

        for a1, a2 in zip(list(activations.values()), list(activations2.values())):
            np.testing.assert_almost_equal(a1, a2) 
Example #6
Source File: utils.py    From DeepConcolic with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def eval_batch(o, ims, allow_input_layer = False):
  layer_functions, has_input_layer = (
    get_layer_functions (o) if isinstance (o, (keras.Sequential, keras.Model))
    # TODO: Check it's sequential? --------------------------------------^
    else o)
  having_input_layer = allow_input_layer and has_input_layer
  activations = []
  for l, func in enumerate(layer_functions):
    if not having_input_layer:
      if l==0:
        activations.append(func([ims])[0])
      else:
        activations.append(func([activations[l-1]])[0])
    else:
      if l==0:
        activations.append([]) #activations.append(func([ims])[0])
      elif l==1:
        activations.append(func([ims])[0])
      else:
        activations.append(func([activations[l-1]])[0])
  return activations 
Example #7
Source File: model.py    From pipelines with Apache License 2.0 6 votes vote down vote up
def create_keras_model(input_dim, learning_rate, window_size):
    """Creates Keras model for regression.

    Args:
      input_dim: How many features the input has
      learning_rate: Learning rate for training

    Returns:
      The compiled Keras model (still needs to be trained)
    """

    model = keras.Sequential([
        layers.LSTM(4, dropout = 0.2, input_shape = (input_dim, window_size)),
        layers.Dense(1)
    ])

    model.compile(loss='mean_squared_error', optimizer=tf.train.AdamOptimizer(
        learning_rate=learning_rate))  

    return model 
Example #8
Source File: TfFeedForward.py    From rafiki with Apache License 2.0 6 votes vote down vote up
def _build_model(self, num_classes, image_size):
        units = self._knobs['hidden_layer_units']
        layers = self._knobs['hidden_layer_count']
        lr = self._knobs['learning_rate']
         
        model = keras.Sequential()
        model.add(keras.layers.Flatten(input_shape=(image_size, image_size, 3)))
        model.add(keras.layers.BatchNormalization())

        for _ in range(layers):
            model.add(keras.layers.Dense(units, activation=tf.nn.relu))

        model.add(keras.layers.Dense(
            num_classes, 
            activation=tf.nn.softmax
        ))
        
        model.compile(
            optimizer=keras.optimizers.Adam(lr=lr),
            loss='sparse_categorical_crossentropy',
            metrics=['accuracy']
        )
        return model 
Example #9
Source File: helloworld.py    From keras-tuner with Apache License 2.0 6 votes vote down vote up
def build_model(hp):
    model = keras.Sequential()
    model.add(layers.Flatten(input_shape=(28, 28)))
    min_layers = 2
    max_layers = 5
    for i in range(hp.Int('num_layers', min_layers, max_layers)):
        model.add(layers.Dense(units=hp.Int('units_' + str(i),
                                            32,
                                            256,
                                            32,
                                            parent_name='num_layers',
                                            parent_values=list(range(i + 1, max_layers + 1))),
                               activation='relu'))
    model.add(layers.Dense(10, activation='softmax'))
    model.compile(
        optimizer=keras.optimizers.Adam(1e-4),
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])
    return model 
Example #10
Source File: tuner_correctness_test.py    From keras-tuner with Apache License 2.0 6 votes vote down vote up
def test_checkpoint_removal(tmp_dir):
    def build_model(hp):
        model = keras.Sequential([
            keras.layers.Dense(hp.Int('size', 5, 10)),
            keras.layers.Dense(1)])
        model.compile('sgd', 'mse', metrics=['accuracy'])
        return model

    tuner = kerastuner.Tuner(
        oracle=kerastuner.tuners.randomsearch.RandomSearchOracle(
            objective='val_accuracy',
            max_trials=1,
            seed=1337),
        hypermodel=build_model,
        directory=tmp_dir,
    )
    x, y = np.ones((1, 5)), np.ones((1, 1))
    tuner.search(x,
                 y,
                 validation_data=(x, y),
                 epochs=21)
    trial = list(tuner.oracle.trials.values())[0]
    trial_id = trial.trial_id
    assert tf.io.gfile.exists(tuner._get_checkpoint_fname(trial_id, 20))
    assert not tf.io.gfile.exists(tuner._get_checkpoint_fname(trial_id, 10)) 
Example #11
Source File: main.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def build_model(input_shape):
    """Build a logistic regression model with tf.keras."""
    model = keras.Sequential(
        [
            layers.Dense(
                1, use_bias=False, activation="sigmoid", input_shape=[input_shape]
            ),
        ]
    )

    model.compile(
        loss="binary_crossentropy",
        optimizer=tf.train.AdamOptimizer(),
        metrics=["accuracy"],
    )

    return model 
Example #12
Source File: nn.py    From FATE with Apache License 2.0 6 votes vote down vote up
def build_nn_model(input_shape, nn_define, loss, optimizer, metrics,
                   is_supported_layer=has_builder,
                   default_layer=None) -> KerasNNModel:
    model = Sequential()
    is_first_layer = True
    for layer_config in nn_define:
        layer = layer_config.get("layer", default_layer)
        if layer and is_supported_layer(layer):
            del layer_config["layer"]
            if is_first_layer:
                layer_config["input_shape"] = input_shape
                is_first_layer = False
            builder = get_builder(layer)
            model.add(builder(**layer_config))

        else:
            raise ValueError(f"dnn not support layer {layer}")

    return from_keras_sequential_model(model=model,
                                       loss=loss,
                                       optimizer=optimizer,
                                       metrics=metrics) 
Example #13
Source File: train_variational_autoencoder_tensorflow.py    From variational-autoencoder with MIT License 6 votes vote down vote up
def generative_network(z, hidden_size):
  """Build a generative network parametrizing the likelihood of the data

  Args:
    z: Samples of latent variables
    hidden_size: Size of the hidden state of the neural net

  Returns:
    bernoulli_logits: logits for the Bernoulli likelihood of the data
  """
  generative_net = tfk.Sequential([
    tfkl.Dense(hidden_size, activation=tf.nn.relu),
    tfkl.Dense(hidden_size, activation=tf.nn.relu),
    tfkl.Dense(28 * 28, activation=None)
    ])
  bernoulli_logits = generative_net(z)
  return tf.reshape(bernoulli_logits, [-1, 28, 28, 1]) 
Example #14
Source File: layers.py    From astroNN with MIT License 6 votes vote down vote up
def __call__(self, model):
        """
        :param model: Keras model to be accelerated
        :type model: Union[keras.Model, keras.Sequential]
        :return: Accelerated Keras model
        :rtype: Union[keras.Model, keras.Sequential]
        """
        if isinstance(model, tfk.Model) or isinstance(model, tfk.Sequential):
            self.model = model
        else:
            raise TypeError(f'FastMCInference expects tensorflow.keras Model, you gave {type(model)}')
        new_input = tfk.layers.Input(shape=(self.model.input_shape[1:]), name='input')
        mc_model = tfk.models.Model(inputs=self.model.inputs, outputs=self.model.outputs)

        mc = FastMCInferenceMeanVar()(tfk.layers.TimeDistributed(mc_model)(FastMCRepeat(self.n)(new_input)))
        new_mc_model = tfk.models.Model(inputs=new_input, outputs=mc)

        return new_mc_model 
Example #15
Source File: train_variational_autoencoder_tensorflow.py    From variational-autoencoder with MIT License 6 votes vote down vote up
def inference_network(x, latent_dim, hidden_size):
  """Construct an inference network parametrizing a Gaussian.

  Args:
    x: A batch of MNIST digits.
    latent_dim: The latent dimensionality.
    hidden_size: The size of the neural net hidden layers.

  Returns:
    mu: Mean parameters for the variational family Normal
    sigma: Standard deviation parameters for the variational family Normal
  """
  inference_net = tfk.Sequential([
    tfkl.Flatten(),
    tfkl.Dense(hidden_size, activation=tf.nn.relu),
    tfkl.Dense(hidden_size, activation=tf.nn.relu),
    tfkl.Dense(latent_dim * 2, activation=None)
    ])
  gaussian_params = inference_net(x)
  # The mean parameter is unconstrained
  mu = gaussian_params[:, :latent_dim]
  # The standard deviation must be positive. Parametrize with a softplus
  sigma = tf.nn.softplus(gaussian_params[:, latent_dim:])
  return mu, sigma 
Example #16
Source File: Unet_family.py    From TF.Keras-Commonly-used-models with Apache License 2.0 6 votes vote down vote up
def __init__(self, filters):
        super(Attention_block, self).__init__()

        self.W_g = Sequential([
            Conv2D(filters, kernel_size=1, strides=1, padding='same'),
            BatchNormalization()
        ])

        self.W_x = Sequential([
            Conv2D(filters, kernel_size=1, strides=1, padding='same'),
            BatchNormalization()
        ])

        self.psi = Sequential([
            Conv2D(filters, kernel_size=1, strides=1, padding='same'),
            BatchNormalization(),
            Activation('sigmoid')
        ])

        self.relu = Activation('relu') 
Example #17
Source File: mincut_pool.py    From spektral with MIT License 6 votes vote down vote up
def build(self, input_shape):
        assert isinstance(input_shape, list)
        layer_kwargs = dict(
            kernel_initializer=self.kernel_initializer,
            bias_initializer=self.bias_initializer,
            kernel_regularizer=self.kernel_regularizer,
            bias_regularizer=self.bias_regularizer,
            kernel_constraint=self.kernel_constraint,
            bias_constraint=self.bias_constraint
        )
        mlp_layers = []
        for i, channels in enumerate(self.mlp_hidden):
            mlp_layers.append(
                Dense(channels, self.mlp_activation, **layer_kwargs)
            )
        mlp_layers.append(
            Dense(self.k, 'softmax', **layer_kwargs)
        )
        self.mlp = Sequential(mlp_layers)

        super().build(input_shape) 
Example #18
Source File: helloworld.py    From keras-tuner with Apache License 2.0 5 votes vote down vote up
def build_model(hp):
    model = keras.Sequential()
    model.add(layers.Flatten(input_shape=(28, 28)))
    for i in range(hp.get('num_layers')):
        model.add(layers.Dense(32,
                               activation='relu'))
    model.add(layers.Dense(10, activation='softmax'))
    model.compile(
        optimizer=keras.optimizers.Adam(hp.get('learning_rate')),
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])
    return model 
Example #19
Source File: helloworld.py    From keras-tuner with Apache License 2.0 5 votes vote down vote up
def build(self, hp):
        model = keras.Sequential()
        model.add(layers.Flatten(input_shape=self.img_size))
        for i in range(hp.Int('num_layers', 2, 20)):
            model.add(layers.Dense(units=hp.Int('units_' + str(i), 32, 512, 32),
                                   activation='relu'))
        model.add(layers.Dense(self.classes, activation='softmax'))
        model.compile(
            optimizer=keras.optimizers.Adam(
                hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
            loss='sparse_categorical_crossentropy',
            metrics=['accuracy'])
        return model 
Example #20
Source File: helloworld.py    From keras-tuner with Apache License 2.0 5 votes vote down vote up
def build_model(hp):
    model = keras.Sequential()
    model.add(layers.Flatten(input_shape=(28, 28)))
    for i in range(hp.Int('num_layers', 2, 20)):
        model.add(layers.Dense(units=hp.Int('units_' + str(i), 32, 512, 32),
                               activation='relu'))
    model.add(layers.Dense(10, activation='softmax'))
    model.compile(
        optimizer=keras.optimizers.Adam(
            hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])
    return model 
Example #21
Source File: text_regression.py    From AiLearning with GNU General Public License v3.0 5 votes vote down vote up
def build_model():
  model = keras.Sequential([
    layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
    layers.Dense(64, activation='relu'),
    layers.Dense(1)
  ])

  optimizer = tf.keras.optimizers.RMSprop(0.001)

  model.compile(loss='mse',
                optimizer=optimizer,
                metrics=['mae', 'mse'])
  return model 
Example #22
Source File: model.py    From learn2branch with MIT License 5 votes vote down vote up
def __init__(self, emb_size, activation, initializer, right_to_left=False):
        super().__init__()
        self.emb_size = emb_size
        self.activation = activation
        self.initializer = initializer
        self.right_to_left = right_to_left

        # feature layers
        self.feature_module_left = K.Sequential([
            K.layers.Dense(units=self.emb_size, activation=None, use_bias=True, kernel_initializer=self.initializer)
        ])
        self.feature_module_edge = K.Sequential([
            K.layers.Dense(units=self.emb_size, activation=None, use_bias=False, kernel_initializer=self.initializer)
        ])
        self.feature_module_right = K.Sequential([
            K.layers.Dense(units=self.emb_size, activation=None, use_bias=False, kernel_initializer=self.initializer)
        ])
        self.feature_module_final = K.Sequential([
            PreNormLayer(1, shift=False),  # normalize after summation trick
            K.layers.Activation(self.activation),
            K.layers.Dense(units=self.emb_size, activation=None, kernel_initializer=self.initializer)
        ])

        # output_layers
        self.output_module = K.Sequential([
            K.layers.Dense(units=self.emb_size, activation=None, kernel_initializer=self.initializer),
            K.layers.Activation(self.activation),
            K.layers.Dense(units=self.emb_size, activation=None, kernel_initializer=self.initializer),
        ]) 
Example #23
Source File: Unet_family.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def __init__(self, out_ch, t=2):
        super(RRCNN_block, self).__init__()

        self.RCNN = Sequential([
            Recurrent_block(out_ch, t=t),
            Recurrent_block(out_ch, t=t)
        ])
        self.Conv = Conv2D(out_ch, kernel_size=(1, 1), strides=1, padding='same') 
Example #24
Source File: model.py    From learn2branch with MIT License 5 votes vote down vote up
def __init__(self, emb_size, activation, initializer, right_to_left=False):
        super().__init__()
        self.emb_size = emb_size
        self.activation = activation
        self.initializer = initializer
        self.right_to_left = right_to_left

        # feature layers
        self.feature_module_left = K.Sequential([
            K.layers.Dense(units=self.emb_size, activation=None, use_bias=True, kernel_initializer=self.initializer)
        ])
        self.feature_module_edge = K.Sequential([
            K.layers.Dense(units=self.emb_size, activation=None, use_bias=False, kernel_initializer=self.initializer)
        ])
        self.feature_module_right = K.Sequential([
            K.layers.Dense(units=self.emb_size, activation=None, use_bias=False, kernel_initializer=self.initializer)
        ])
        self.feature_module_final = K.Sequential([
            PreNormLayer(1, shift=False),  # normalize after summation trick
            K.layers.Activation(self.activation),
            K.layers.Dense(units=self.emb_size, activation=None, kernel_initializer=self.initializer)
        ])

        # output_layers
        self.output_module = K.Sequential([
            K.layers.Dense(units=self.emb_size, activation=None, kernel_initializer=self.initializer),
            K.layers.Activation(self.activation),
            K.layers.Dense(units=self.emb_size, activation=None, kernel_initializer=self.initializer),
        ]) 
Example #25
Source File: model.py    From learn2branch with MIT License 5 votes vote down vote up
def __init__(self, emb_size, activation, initializer, right_to_left=False):
        super().__init__()
        self.emb_size = emb_size
        self.activation = activation
        self.initializer = initializer
        self.right_to_left = right_to_left

        # feature layers
        self.feature_module_left = K.Sequential([
            K.layers.Dense(units=self.emb_size, activation=None, use_bias=True, kernel_initializer=self.initializer)
        ])
        self.feature_module_edge = K.Sequential([
            K.layers.Dense(units=self.emb_size, activation=None, use_bias=False, kernel_initializer=self.initializer)
        ])
        self.feature_module_right = K.Sequential([
            K.layers.Dense(units=self.emb_size, activation=None, use_bias=False, kernel_initializer=self.initializer)
        ])
        self.feature_module_final = K.Sequential([
            PreNormLayer(1, shift=False),  # normalize after summation trick
            K.layers.Activation(self.activation),
            K.layers.Dense(units=self.emb_size, activation=None, kernel_initializer=self.initializer)
        ])

        self.post_conv_module = K.Sequential([
            PreNormLayer(1, shift=False),  # normalize after convolution
        ])

        # output_layers
        self.output_module = K.Sequential([
            K.layers.Dense(units=self.emb_size, activation=None, kernel_initializer=self.initializer),
            K.layers.Activation(self.activation),
            K.layers.Dense(units=self.emb_size, activation=None, kernel_initializer=self.initializer),
        ]) 
Example #26
Source File: macro.py    From nni with MIT License 5 votes vote down vote up
def build_conv(filters, kernel_size, name=None):
    return Sequential([
        Conv2D(filters, kernel_size=1, use_bias=False),
        BatchNormalization(trainable=False),
        ReLU(),
        Conv2D(filters, kernel_size, padding='same'),
        BatchNormalization(trainable=False),
        ReLU(),
    ], name) 
Example #27
Source File: helloworld.py    From keras-tuner with Apache License 2.0 5 votes vote down vote up
def build_model(hp):
    model = keras.Sequential()
    model.add(layers.Flatten(input_shape=(28, 28)))
    min_layers = 2
    max_layers = 5
    for i in range(hp.Int('num_layers', min_layers, max_layers)):
        with hp.conditional_scope('num_layers', list(range(i + 1, max_layers + 1))):
            model.add(layers.Dense(units=hp.Int('units_' + str(i), 32, 256, 32),
                                   activation='relu'))
    model.add(layers.Dense(10, activation='softmax'))
    model.compile(
        optimizer=keras.optimizers.Adam(1e-4),
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])
    return model 
Example #28
Source File: Unet_family.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def __init__(self, out_ch, t=2):
        super(Recurrent_block, self).__init__()

        self.t = t
        self.out_ch = out_ch
        self.conv = Sequential([
            Conv2D(out_ch, kernel_size=(3, 3), strides=1, padding='same'),
            BatchNormalization(),
            Activation('relu')
        ]) 
Example #29
Source File: multi_layer_perceptron.py    From openrec with Apache License 2.0 5 votes vote down vote up
def MLP(units_list, use_bias=True, activation='relu', out_activation=None):
    
    mlp = Sequential()
    
    for units in units_list[:-1]:
        mlp.add(Dense(units, 
                        activation=activation, 
                        use_bias=use_bias))
    
    mlp.add(Dense(units_list[-1], 
                activation=out_activation, 
                use_bias=use_bias))
    
    return mlp 
Example #30
Source File: imdb_utils_native_keras.py    From tfx with Apache License 2.0 5 votes vote down vote up
def _build_keras_model() -> keras.Model:
  """Creates a LSTM Keras model for classifying imdb data.

  Reference: https://www.tensorflow.org/tutorials/text/text_classification_rnn

  Returns:
    A Keras Model.
  """
  # The model below is built with Sequential API, please refer to
  # https://www.tensorflow.org/guide/keras/sequential_model
  model = keras.Sequential([
      keras.layers.Embedding(
          _VOCAB_SIZE + 2,
          _EMBEDDING_UNITS,
          name=_transformed_name(_FEATURE_KEY)),
      keras.layers.Bidirectional(
          keras.layers.LSTM(_LSTM_UNITS, dropout=_DROPOUT_RATE)),
      keras.layers.Dense(_HIDDEN_UNITS, activation='relu'),
      keras.layers.Dense(1)
  ])

  model.compile(
      loss=keras.losses.BinaryCrossentropy(from_logits=True),
      optimizer=keras.optimizers.Adam(_LEARNING_RATE),
      metrics=['accuracy'])

  model.summary(print_fn=absl.logging.info)
  return model