Python tensorflow.keras.layers.Flatten() Examples

The following are 30 code examples of tensorflow.keras.layers.Flatten(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.layers , or try the search function .
Example #1
Source File: seqtoseq.py    From deepchem with MIT License 6 votes vote down vote up
def _create_encoder(self, n_layers, dropout):
    """Create the encoder as a tf.keras.Model."""
    input = self._create_features()
    gather_indices = Input(shape=(2,), dtype=tf.int32)
    prev_layer = input
    for i in range(len(self._filter_sizes)):
      filter_size = self._filter_sizes[i]
      kernel_size = self._kernel_sizes[i]
      if dropout > 0.0:
        prev_layer = Dropout(rate=dropout)(prev_layer)
      prev_layer = Conv1D(
          filters=filter_size, kernel_size=kernel_size,
          activation=tf.nn.relu)(prev_layer)
    prev_layer = Flatten()(prev_layer)
    prev_layer = Dense(
        self._decoder_dimension, activation=tf.nn.relu)(prev_layer)
    prev_layer = BatchNormalization()(prev_layer)
    return tf.keras.Model(inputs=[input, gather_indices], outputs=prev_layer) 
Example #2
Source File: layers.py    From attention-mechanisms with MIT License 6 votes vote down vote up
def call(self, inputs):  # (B, S, H)
        # Expand weights to include batch size through implicit broadcasting
        W1, W2 = self.W1[None, :, :], self.W2[None, :, :]
        hidden_states_transposed = Permute(dims=(2, 1))(inputs)                                     # (B, H, S)
        attention_score = tf.matmul(W1, hidden_states_transposed)                                   # (B, size, S)
        attention_score = Activation('tanh')(attention_score)                                       # (B, size, S)
        attention_weights = tf.matmul(W2, attention_score)                                          # (B, num_hops, S)
        attention_weights = Activation('softmax')(attention_weights)                                # (B, num_hops, S)
        embedding_matrix = tf.matmul(attention_weights, inputs)                                     # (B, num_hops, H)
        embedding_matrix_flattened = Flatten()(embedding_matrix)                                    # (B, num_hops*H)

        if self.use_penalization:
            attention_weights_transposed = Permute(dims=(2, 1))(attention_weights)                  # (B, S, num_hops)
            product = tf.matmul(attention_weights, attention_weights_transposed)                    # (B, num_hops, num_hops)
            identity = tf.eye(self.num_hops, batch_shape=(inputs.shape[0],))                        # (B, num_hops, num_hops)
            frobenius_norm = tf.sqrt(tf.reduce_sum(tf.square(product - identity)))  # distance
            self.add_loss(self.penalty_coefficient * frobenius_norm)  # loss

        if self.model_api == 'functional':
            return embedding_matrix_flattened, attention_weights
        elif self.model_api == 'sequential':
            return embedding_matrix_flattened 
Example #3
Source File: run.py    From polyaxon with Apache License 2.0 6 votes vote down vote up
def get_model(args):
    model = models.Sequential()
    model.add(
        layers.Conv2D(args.conv1_size, (3, 3), activation=args.conv_activation, input_shape=(28, 28, 1)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(args.conv2_size, (3, 3), activation=args.conv_activation))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation=args.conv_activation))
    model.add(layers.Dropout(args.dropout))
    model.add(layers.Flatten())
    model.add(layers.Dense(args.hidden1_size, activation=args.dense_activation))
    model.add(layers.Dense(10, activation='softmax'))

    model.summary()

    model.compile(optimizer=OPTIMIZERS[args.optimizer](learning_rate=args.learning_rate),
                  loss=args.loss,
                  metrics=['accuracy'])

    return model 
Example #4
Source File: CNN.py    From nn_builder with MIT License 6 votes vote down vote up
def process_hidden_layers(self, x, training):
        """Puts the data x through all the hidden layers"""
        flattened=False
        training = training or training is None
        valid_batch_norm_layer_ix = 0
        for layer_ix, layer in enumerate(self.hidden_layers):
            if type(layer) in self.valid_layer_types_with_no_parameters:
                x = layer(x)
            else:
                if type(layer) == Dense and not flattened:
                    x = Flatten()(x)
                    flattened = True
                x = layer(x)
                if self.batch_norm:
                    x = self.batch_norm_layers[valid_batch_norm_layer_ix](x, training=False)
                    valid_batch_norm_layer_ix += 1
                if self.dropout != 0.0 and training: x = self.dropout_layer(x)
        if not flattened: x = Flatten()(x)
        return x 
Example #5
Source File: test_conv_layer.py    From TensorNetwork with Apache License 2.0 6 votes vote down vote up
def make_model(dummy_data):
  # pylint: disable=redefined-outer-name
  data, _ = dummy_data
  model = Sequential()
  model.add(
      Conv2DMPO(filters=4,
                kernel_size=3,
                num_nodes=2,
                bond_dim=10,
                padding='same',
                input_shape=data.shape[1:],
                name=LAYER_NAME)
      )
  model.add(Flatten())
  model.add(Dense(1, activation='sigmoid'))
  return model 
Example #6
Source File: helloworld.py    From keras-tuner with Apache License 2.0 6 votes vote down vote up
def build_model(hp):
    model = keras.Sequential()
    model.add(layers.Flatten(input_shape=(28, 28)))
    min_layers = 2
    max_layers = 5
    for i in range(hp.Int('num_layers', min_layers, max_layers)):
        model.add(layers.Dense(units=hp.Int('units_' + str(i),
                                            32,
                                            256,
                                            32,
                                            parent_name='num_layers',
                                            parent_values=list(range(i + 1, max_layers + 1))),
                               activation='relu'))
    model.add(layers.Dense(10, activation='softmax'))
    model.compile(
        optimizer=keras.optimizers.Adam(1e-4),
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])
    return model 
Example #7
Source File: reduction.py    From autokeras with MIT License 6 votes vote down vote up
def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        if len(inputs) == 1:
            return inputs

        merge_type = self.merge_type or hp.Choice('merge_type',
                                                  ['add', 'concatenate'],
                                                  default='add')

        if not all([shape_compatible(input_node.shape, inputs[0].shape) for
                    input_node in inputs]):
            new_inputs = []
            for input_node in inputs:
                new_inputs.append(Flatten().build(hp, input_node))
            inputs = new_inputs

        # TODO: Even inputs have different shape[-1], they can still be Add(
        #  ) after another layer. Check if the inputs are all of the same
        #  shape
        if all([input_node.shape == inputs[0].shape for input_node in inputs]):
            if merge_type == 'add':
                return layers.Add(inputs)

        return layers.Concatenate()(inputs) 
Example #8
Source File: test_continuous.py    From keras-rl2 with MIT License 6 votes vote down vote up
def test_ddpg():
    # TODO: replace this with a simpler environment where we can actually test if it finds a solution
    env = gym.make('Pendulum-v0')
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.shape[0]

    actor = Sequential()
    actor.add(Flatten(input_shape=(1,) + env.observation_space.shape))
    actor.add(Dense(16))
    actor.add(Activation('relu'))
    actor.add(Dense(nb_actions))
    actor.add(Activation('linear'))

    action_input = Input(shape=(nb_actions,), name='action_input')
    observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
    flattened_observation = Flatten()(observation_input)
    x = Concatenate()([action_input, flattened_observation])
    x = Dense(16)(x)
    x = Activation('relu')(x)
    x = Dense(1)(x)
    x = Activation('linear')(x)
    critic = Model(inputs=[action_input, observation_input], outputs=x)
    
    memory = SequentialMemory(limit=1000, window_length=1)
    random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3)
    agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
                      memory=memory, nb_steps_warmup_critic=50, nb_steps_warmup_actor=50,
                      random_process=random_process, gamma=.99, target_model_update=1e-3)
    agent.compile([Adam(lr=1e-3), Adam(lr=1e-3)])

    agent.fit(env, nb_steps=400, visualize=False, verbose=0, nb_max_episode_steps=100)
    h = agent.test(env, nb_episodes=2, visualize=False, nb_max_episode_steps=100)
    # TODO: evaluate history 
Example #9
Source File: reduction.py    From autokeras with MIT License 6 votes vote down vote up
def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        # No need to reduce.
        if len(output_node.shape) <= 2:
            return output_node

        reduction_type = self.reduction_type or hp.Choice('reduction_type',
                                                          ['flatten',
                                                           'global_max',
                                                           'global_avg'],
                                                          default='global_avg')
        if reduction_type == 'flatten':
            output_node = Flatten().build(hp, output_node)
        elif reduction_type == 'global_max':
            output_node = layer_utils.get_global_max_pooling(
                output_node.shape)()(output_node)
        elif reduction_type == 'global_avg':
            output_node = layer_utils.get_global_average_pooling(
                output_node.shape)()(output_node)
        return output_node 
Example #10
Source File: test_dqn.py    From keras-rl2 with MIT License 6 votes vote down vote up
def test_single_continuous_dqn_input():
    nb_actions = 2

    V_model = Sequential()
    V_model.add(Flatten(input_shape=(2, 3)))
    V_model.add(Dense(1))

    mu_model = Sequential()
    mu_model.add(Flatten(input_shape=(2, 3)))
    mu_model.add(Dense(nb_actions))

    L_input = Input(shape=(2, 3))
    L_input_action = Input(shape=(nb_actions,))
    x = Concatenate()([Flatten()(L_input), L_input_action])
    x = Dense(((nb_actions * nb_actions + nb_actions) // 2))(x)
    L_model = Model(inputs=[L_input_action, L_input], outputs=x)

    memory = SequentialMemory(limit=10, window_length=2)
    agent = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
                     memory=memory, nb_steps_warmup=5, batch_size=4)
    agent.compile('sgd')
    agent.fit(MultiInputTestEnv((3,)), nb_steps=10) 
Example #11
Source File: cnn_tf2.py    From ecg-classification with MIT License 6 votes vote down vote up
def define_model(self):

         inputs = tf.keras.Input(shape=(n_inputs, 1), name='input')

         # 64 filters, 10 kernel size
         x = Conv1D(64, 10, activation='relu')(inputs)
         x = MaxPool1D()(x)
         x = BatchNormalization()(x)

         x = Conv1D(128, 10, activation='relu')(x)
         x = MaxPool1D()(x)
         x = BatchNormalization()(x)

         x = Conv1D(128, 10, activation='relu')(x)
         x = MaxPool1D()(x)
         x = BatchNormalization()(x)

         x = Conv1D(256, 10, activation='relu')(x)
         x = MaxPool1D()(x)
         x = BatchNormalization()(x)

         x = Flatten()(x)
         x = Dense(1024, activation='relu', name='dense_1')(x)
         x = BatchNormalization()(x)
         x = Dropout(dropout)(x)

         x = Dense(2048, activation='relu', name='dense_2')(x)
         x = BatchNormalization()(x)
         x = Dropout(dropout)(x)

         outputs = Dense(n_classes, activation='softmax', name='predictions')(x)

         self.cnn_model = tf.keras.Model(inputs=inputs, outputs=outputs)
         optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
         accuracy = CategoricalAccuracy()
         self.cnn_model.compile(optimizer=optimizer, loss='categorical_crossentropy',
                                metrics=[accuracy]) 
Example #12
Source File: utils.py    From CVPR2019-DeepTreeLearningForZeroShotFaceAntispoofing with MIT License 6 votes vote down vote up
def __init__(self, filters, size=3, apply_batchnorm=True):
        super(SFL, self).__init__()
        self.apply_batchnorm = apply_batchnorm
        # depth map
        self.cru1 = CRU(filters, size, stride=1)
        self.conv1 = Conv(2, size, activation=False, apply_batchnorm=False)

        # class
        self.conv2 = Downsample(filters*1, size)
        self.conv3 = Downsample(filters*1, size)
        self.conv4 = Downsample(filters*2, size)
        self.conv5 = Downsample(filters*4, 4, padding='VALID')
        self.flatten = layers.Flatten()
        self.fc1 = Dense(256)
        self.fc2 = Dense(1, activation=False, apply_batchnorm=False)

        self.dropout = tf.keras.layers.Dropout(0.3) 
Example #13
Source File: layers.py    From RLs with Apache License 2.0 6 votes vote down vote up
def ConvLayer(conv_function=Conv2D,
              filters=[32, 64, 64],
              kernels=[[8, 8], [4, 4], [3, 3]],
              strides=[[4, 4], [2, 2], [1, 1]],
              padding='valid',
              activation='relu'):
    '''
    Params:
        conv_function: the convolution function
        filters: list of flitter of all hidden conv layers
        kernels: list of kernel of all hidden conv layers
        strides: list of stride of all hidden conv layers
        padding: padding mode
        activation: activation function
    Return:
        A sequential of multi-convolution layers, with Flatten.
    '''
    layers = Sequential([conv_function(filters=f, kernel_size=k, strides=s, padding=padding, activation=activation) for f, k, s in zip(filters, kernels, strides)])
    layers.add(Flatten())
    return layers 
Example #14
Source File: atari_model.py    From tf2rl with MIT License 6 votes vote down vote up
def __init__(self, state_shape, action_dim, units=None,
                 name="AtariCategoricalActorCritic"):
        tf.keras.Model.__init__(self, name=name)
        self.dist = Categorical(dim=action_dim)
        self.action_dim = action_dim

        self.conv1 = Conv2D(32, kernel_size=(8, 8), strides=(4, 4),
                            padding='valid', activation='relu')
        self.conv2 = Conv2D(64, kernel_size=(4, 4), strides=(2, 2),
                            padding='valid', activation='relu')
        self.conv3 = Conv2D(64, kernel_size=(3, 3), strides=(1, 1),
                            padding='valid', activation='relu')
        self.flat = Flatten()
        self.fc1 = Dense(512, activation='relu')
        self.prob = Dense(action_dim, activation='softmax')
        self.v = Dense(1, activation="linear")

        self(tf.constant(
            np.zeros(shape=(1,)+state_shape, dtype=np.float32))) 
Example #15
Source File: run.py    From polyaxon-examples with Apache License 2.0 6 votes vote down vote up
def get_model(args):
    model = models.Sequential()
    model.add(
        layers.Conv2D(args.conv1_size, (3, 3), activation=args.conv_activation, input_shape=(28, 28, 1)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(args.conv2_size, (3, 3), activation=args.conv_activation))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation=args.conv_activation))
    model.add(layers.Dropout(args.dropout))
    model.add(layers.Flatten())
    model.add(layers.Dense(args.hidden1_size, activation=args.dense_activation))
    model.add(layers.Dense(10, activation='softmax'))

    model.summary()

    model.compile(optimizer=OPTIMIZERS[args.optimizer](learning_rate=args.learning_rate),
                  loss=args.loss,
                  metrics=['accuracy'])

    return model 
Example #16
Source File: iic-13.5.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def build_model(self):
        """Build the n_heads of the IIC model
        """
        inputs = Input(shape=self.train_gen.input_shape, name='x')
        x = self.backbone(inputs)
        x = Flatten()(x)
        # number of output heads
        outputs = []
        for i in range(self.args.heads):
            name = "z_head%d" % i
            outputs.append(Dense(self.n_labels,
                                 activation='softmax',
                                 name=name)(x))
        self._model = Model(inputs, outputs, name='encoder')
        optimizer = Adam(lr=1e-3)
        self._model.compile(optimizer=optimizer, loss=self.mi_loss)
        self._model.summary() 
Example #17
Source File: test_ddpg.py    From keras-rl2 with MIT License 6 votes vote down vote up
def test_single_ddpg_input():
    nb_actions = 2

    actor = Sequential()
    actor.add(Flatten(input_shape=(2, 3)))
    actor.add(Dense(nb_actions))

    action_input = Input(shape=(nb_actions,), name='action_input')
    observation_input = Input(shape=(2, 3), name='observation_input')
    x = Concatenate()([action_input, Flatten()(observation_input)])
    x = Dense(1)(x)
    critic = Model(inputs=[action_input, observation_input], outputs=x)

    memory = SequentialMemory(limit=10, window_length=2)
    agent = DDPGAgent(actor=actor, critic=critic, critic_action_input=action_input, memory=memory,
                      nb_actions=2, nb_steps_warmup_critic=5, nb_steps_warmup_actor=5, batch_size=4)
    agent.compile('sgd')
    agent.fit(MultiInputTestEnv((3,)), nb_steps=10) 
Example #18
Source File: train.py    From reloading with MIT License 5 votes vote down vote up
def __init__(self):
    super(MyModel, self).__init__()
    self.conv1 = Conv2D(32, 3, activation='relu')
    self.flatten = Flatten()
    self.d1 = Dense(128, activation='relu')
    self.d2 = Dense(10) 
Example #19
Source File: tf_mnist_example.py    From ray with Apache License 2.0 5 votes vote down vote up
def __init__(self, hiddens=128):
        super(MyModel, self).__init__()
        self.conv1 = Conv2D(32, 3, activation="relu")
        self.flatten = Flatten()
        self.d1 = Dense(hiddens, activation="relu")
        self.d2 = Dense(10, activation="softmax") 
Example #20
Source File: cifar_tf_example.py    From ray with Apache License 2.0 5 votes vote down vote up
def create_model(config):
    import tensorflow as tf
    model = Sequential()
    model.add(Conv2D(32, (3, 3), padding="same", input_shape=input_shape))
    model.add(Activation("relu"))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding="same"))
    model.add(Activation("relu"))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation("relu"))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes))
    model.add(Activation("softmax"))

    # initiate RMSprop optimizer
    opt = tf.keras.optimizers.RMSprop(lr=0.001, decay=1e-6)

    # Let"s train the model using RMSprop
    model.compile(
        loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
    return model 
Example #21
Source File: wide_resnet.py    From image_recognition with MIT License 5 votes vote down vote up
def __call__(self):
        logging.debug("Creating model...")

        assert ((self._depth - 4) % 6 == 0)
        n = (self._depth - 4) / 6

        inputs = Input(shape=self._input_shape)

        n_stages = [16, 16 * self._k, 32 * self._k, 64 * self._k]

        conv1 = Convolution2D(filters=n_stages[0], kernel_size=(3, 3),
                              strides=(1, 1),
                              padding="same",
                              kernel_initializer=self._weight_init,
                              kernel_regularizer=l2(self._weight_decay),
                              use_bias=self._use_bias)(inputs)  # "One conv at the beginning (spatial size: 32x32)"

        # Add wide residual blocks
        block_fn = self._wide_basic
        conv2 = self._layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1, 1))(conv1)
        conv3 = self._layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2, 2))(conv2)
        conv4 = self._layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2, 2))(conv3)
        batch_norm = BatchNormalization(axis=self._channel_axis)(conv4)
        relu = Activation("relu")(batch_norm)

        # Classifier block
        pool = AveragePooling2D(pool_size=(8, 8), strides=(1, 1), padding="same")(relu)
        flatten = Flatten()(pool)
        predictions_g = Dense(units=2, kernel_initializer=self._weight_init, use_bias=self._use_bias,
                              kernel_regularizer=l2(self._weight_decay), activation="softmax",
                              name="pred_gender")(flatten)
        predictions_a = Dense(units=101, kernel_initializer=self._weight_init, use_bias=self._use_bias,
                              kernel_regularizer=l2(self._weight_decay), activation="softmax",
                              name="pred_age")(flatten)
        model = Model(inputs=inputs, outputs=[predictions_g, predictions_a])

        return model 
Example #22
Source File: generate_test_models.py    From tfjs-to-tf with MIT License 5 votes vote down vote up
def deepmind_atari_net(num_classes: int = 10,
                       input_shape: Iterable[int] = (128, 128)) -> Model:
    """Generate optimisable test model

        Test model features multiple convolution layers with activation.
        This translates to sub-graphs [Conv2D, BiasAdd, Activation] per
        convolution-layer.

        Grappler should convert variables to constants and get rid of the
        BiasAdd.

        The two dense layers at the bottom translate to
        [MatMul, BiasAdd, Relu|Softmax]. Here, grappler should be able to
        remove the BiasAdd just like with the convolution layers.
    """
    inp = Input(shape=input_shape)
    x = Conv2D(32, kernel_size=(8, 8), strides=(4, 4), padding='same',
               activation='relu', name='conv1')(inp)
    x = Conv2D(64, kernel_size=(4, 4), strides=(2, 2), padding='same',
               activation='relu', name='conv2')(x)
    x = Conv2D(64, kernel_size=(3, 3), padding='same',
               activation='relu', name='conv3')(x)
    x = Flatten(name='flatten')(x)
    x = Dense(512, activation='relu', name='dense1')(x)
    out = Dense(num_classes, activation='softmax', name='output')(x)
    return Model(inp, out) 
Example #23
Source File: model.py    From cloudml-samples with Apache License 2.0 5 votes vote down vote up
def keras_estimator(model_dir, config, learning_rate):
  """Creates a Keras Sequential model with layers.

  Args:
    model_dir: (str) file path where training files will be written.
    config: (tf.estimator.RunConfig) Configuration options to save model.
    learning_rate: (int) Learning rate.

  Returns:
    A keras.Model
  """
  model = models.Sequential()
  model.add(Flatten(input_shape=(28, 28)))
  model.add(Dense(128, activation=tf.nn.relu))
  model.add(Dense(10, activation=tf.nn.softmax))

  # Compile model with learning parameters.
  optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
  model.compile(
      optimizer=optimizer,
      loss='sparse_categorical_crossentropy',
      metrics=['accuracy'])

  estimator = tf.keras.estimator.model_to_estimator(
      keras_model=model, model_dir=model_dir, config=config)
  return estimator 
Example #24
Source File: train_keras_model.py    From gym-2048 with MIT License 5 votes vote down vote up
def build_model(board_size=4, board_layers=16, outputs=4, filters=64, residual_blocks=4):
  # Functional API model
  inputs = layers.Input(shape=(board_size * board_size * board_layers,))
  x = layers.Reshape((board_size, board_size, board_layers))(inputs)

  # Initial convolutional block
  x = layers.Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(x)
  x = layers.BatchNormalization()(x)
  x = layers.Activation('relu')(x)

  # residual blocks
  for i in range(residual_blocks):
    # x at the start of a block
    temp_x = layers.Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(x)
    temp_x = layers.BatchNormalization()(temp_x)
    temp_x = layers.Activation('relu')(temp_x)
    temp_x = layers.Conv2D(filters=filters, kernel_size=(3, 3), padding='same')(temp_x)
    temp_x = layers.BatchNormalization()(temp_x)
    x = layers.add([x, temp_x])
    x = layers.Activation('relu')(x)

  # policy head
  x = layers.Conv2D(filters=2, kernel_size=(1, 1), padding='same')(x)
  x = layers.BatchNormalization()(x)
  x = layers.Activation('relu')(x)
  x = layers.Flatten()(x)
  predictions = layers.Dense(outputs, activation='softmax')(x)

  # Create model
  return models.Model(inputs=inputs, outputs=predictions) 
Example #25
Source File: data_interface.py    From kryptoflow with GNU General Public License v3.0 5 votes vote down vote up
def keras_model():  # pragma: no cover
    from tensorflow.keras.layers import Flatten, Dense, Dropout, Input

    inputs = Input(shape=(4, 1, 1))
    x = Dense(512, activation='relu')
    x = Flatten()(x)
    x = Dense(512, activation='relu')(x)
    x = Dropout(0.5)(x)
    outputs = Dense(_NUM_CLASSES, activation='softmax')(x)

    return tf.keras.Model(inputs, outputs) 
Example #26
Source File: model.py    From DeepMusicClassification with MIT License 5 votes vote down vote up
def create_model(input_shape):
    model = Sequential()

    model.add(Conv2D(64, (3,3), input_shape=input_shape))
    model.add(Activation('elu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2,2)))

    model.add(Conv2D(128, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)))
    model.add(Activation('elu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2,2)))

    model.add(Conv2D(256, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)))
    model.add(Activation('elu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2,2)))

    model.add(Conv2D(512, (3,3), kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)))
    model.add(Activation('elu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2,2)))

    model.add(Flatten())

    model.add(Dense(1024))
    model.add(Activation('elu'))
    model.add(Dropout(0.5))

    model.add(Dense(10))
    model.add(Activation('softmax'))

    # compile model
    model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics = ['accuracy'])

    return model 
Example #27
Source File: factory.py    From mtcnn with MIT License 5 votes vote down vote up
def build_rnet(self, input_shape=None):
        if input_shape is None:
            input_shape = (24, 24, 3)

        r_inp = Input(input_shape)

        r_layer = Conv2D(28, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_inp)
        r_layer = PReLU(shared_axes=[1, 2])(r_layer)
        r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(r_layer)

        r_layer = Conv2D(48, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_layer)
        r_layer = PReLU(shared_axes=[1, 2])(r_layer)
        r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(r_layer)

        r_layer = Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding="valid")(r_layer)
        r_layer = PReLU(shared_axes=[1, 2])(r_layer)
        r_layer = Flatten()(r_layer)
        r_layer = Dense(128)(r_layer)
        r_layer = PReLU()(r_layer)

        r_layer_out1 = Dense(2)(r_layer)
        r_layer_out1 = Softmax(axis=1)(r_layer_out1)

        r_layer_out2 = Dense(4)(r_layer)

        r_net = Model(r_inp, [r_layer_out2, r_layer_out1])

        return r_net 
Example #28
Source File: factory.py    From mtcnn with MIT License 5 votes vote down vote up
def build_onet(self, input_shape=None):
        if input_shape is None:
            input_shape = (48, 48, 3)

        o_inp = Input(input_shape)
        o_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_inp)
        o_layer = PReLU(shared_axes=[1, 2])(o_layer)
        o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(o_layer)

        o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer)
        o_layer = PReLU(shared_axes=[1, 2])(o_layer)
        o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(o_layer)

        o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer)
        o_layer = PReLU(shared_axes=[1, 2])(o_layer)
        o_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(o_layer)

        o_layer = Conv2D(128, kernel_size=(2, 2), strides=(1, 1), padding="valid")(o_layer)
        o_layer = PReLU(shared_axes=[1, 2])(o_layer)

        o_layer = Flatten()(o_layer)
        o_layer = Dense(256)(o_layer)
        o_layer = PReLU()(o_layer)

        o_layer_out1 = Dense(2)(o_layer)
        o_layer_out1 = Softmax(axis=1)(o_layer_out1)
        o_layer_out2 = Dense(4)(o_layer)
        o_layer_out3 = Dense(10)(o_layer)

        o_net = Model(o_inp, [o_layer_out2, o_layer_out3, o_layer_out1])
        return o_net 
Example #29
Source File: reduction.py    From autokeras with MIT License 5 votes vote down vote up
def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        if len(input_node.shape) > 2:
            return layers.Flatten()(input_node)
        return input_node 
Example #30
Source File: reduction.py    From autokeras with MIT License 5 votes vote down vote up
def build(self, hp, inputs=None):
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        input_node = inputs[0]
        output_node = input_node

        # No need to reduce.
        if len(output_node.shape) <= 2:
            return output_node

        reduction_type = self.reduction_type or hp.Choice('reduction_type',
                                                          ['flatten',
                                                           'global_max',
                                                           'global_avg'],
                                                          default='global_avg')

        if reduction_type == 'flatten':
            output_node = Flatten().build(hp, output_node)
        elif reduction_type == 'global_max':
            output_node = tf.math.reduce_max(output_node, axis=-2)
        elif reduction_type == 'global_avg':
            output_node = tf.math.reduce_mean(output_node, axis=-2)
        elif reduction_type == 'global_min':
            output_node = tf.math.reduce_min(output_node, axis=-2)

        return output_node