Python tensorflow.python.keras.layers.Flatten() Examples

The following are 12 code examples of tensorflow.python.keras.layers.Flatten(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.keras.layers , or try the search function .
Example #1
Source File: baisc.py    From FATE with Apache License 2.0 5 votes vote down vote up
def _build_flatten(data_format=None, **kwargs):
    return layers.Flatten(data_format=data_format, **kwargs) 
Example #2
Source File: test_explanation_model.py    From cxplain with MIT License 5 votes vote down vote up
def test_mnist_unet_with_shape_valid(self):
        num_subsamples = 100
        (x_train, y_train), (x_test, y_test) = TestUtil.get_mnist(flattened=False, num_subsamples=num_subsamples)

        explained_model_builder = MLPModelBuilder(num_layers=2, num_units=64, activation="relu", p_dropout=0.2,
                                                  verbose=0, batch_size=256, learning_rate=0.001, num_epochs=2,
                                                  early_stopping_patience=128)
        input_shape = x_train.shape[1:]
        input_layer = Input(shape=input_shape)
        last_layer = Flatten()(input_layer)
        last_layer = explained_model_builder.build(last_layer)
        last_layer = Dense(y_train.shape[-1], activation="softmax")(last_layer)
        explained_model = Model(input_layer, last_layer)
        explained_model.compile(loss="categorical_crossentropy",
                                optimizer="adam")
        explained_model.fit(x_train, y_train)
        masking_operation = ZeroMasking()
        loss = categorical_crossentropy

        downsample_factors = [(2, 2), (4, 4), (4, 7), (7, 4), (7, 7)]
        with_bns = [True if i % 2 == 0 else False for i in range(len(downsample_factors))]
        for downsample_factor, with_bn in zip(downsample_factors, with_bns):
            model_builder = UNetModelBuilder(downsample_factor, num_layers=2, num_units=64, activation="relu",
                                             p_dropout=0.2, verbose=0, batch_size=256, learning_rate=0.001,
                                             num_epochs=2, early_stopping_patience=128, with_bn=with_bn)

            explainer = CXPlain(explained_model, model_builder, masking_operation, loss,
                                downsample_factors=downsample_factor)

            explainer.fit(x_train, y_train)
            eval_score = explainer.score(x_test, y_test)
            train_score = explainer.get_last_fit_score()
            median = explainer.predict(x_test)
            self.assertTrue(median.shape == x_test.shape) 
Example #3
Source File: small_cnn.py    From camera-trap-classifier with MIT License 5 votes vote down vote up
def architecture(inputs):
    """ Architecture of model """

    conv1 = Conv2D(32, kernel_size=(3, 3),
                   activation='relu')(inputs)
    max1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = Conv2D(32, (3, 3), activation='relu')(max1)
    max2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    conv3 = Conv2D(64, (3, 3), activation='relu')(max2)
    max3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    flat1 = Flatten()(max3)
    dense1 = Dense(64, activation='relu')(flat1)
    drop1 = Dropout(0.5)(dense1)

    return drop1 
Example #4
Source File: utils.py    From DeepCTR with Apache License 2.0 5 votes vote down vote up
def combined_dnn_input(sparse_embedding_list, dense_value_list):
    if len(sparse_embedding_list) > 0 and len(dense_value_list) > 0:
        sparse_dnn_input = Flatten()(concat_func(sparse_embedding_list))
        dense_dnn_input = Flatten()(concat_func(dense_value_list))
        return concat_func([sparse_dnn_input, dense_dnn_input])
    elif len(sparse_embedding_list) > 0:
        return Flatten()(concat_func(sparse_embedding_list))
    elif len(dense_value_list) > 0:
        return Flatten()(concat_func(dense_value_list))
    else:
        raise NotImplementedError("dnn_feature_columns can not be empty list") 
Example #5
Source File: RTSNNet.py    From alpha-zero-general with MIT License 5 votes vote down vote up
def __init__(self, game, encoder):
        """
        NNet model, copied from Othello NNet, with reduced fully connected layers fc1 and fc2 and reduced nnet_args.num_channels
        :param game: game configuration
        :param encoder: Encoder, used to encode game boards
        """
        from rts.src.config_class import CONFIG

        # game params
        self.board_x, self.board_y, num_encoders = game.getBoardSize()
        self.action_size = game.getActionSize()

        """
        num_encoders = CONFIG.nnet_args.encoder.num_encoders
        """
        num_encoders = encoder.num_encoders

        # Neural Net
        self.input_boards = Input(shape=(self.board_x, self.board_y, num_encoders))  # s: batch_size x board_x x board_y x num_encoders

        x_image = Reshape((self.board_x, self.board_y, num_encoders))(self.input_boards)  # batch_size  x board_x x board_y x num_encoders
        h_conv1 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(x_image)))  # batch_size  x board_x x board_y x num_channels
        h_conv2 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(h_conv1)))  # batch_size  x board_x x board_y x num_channels
        h_conv3 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv2)))  # batch_size  x (board_x-2) x (board_y-2) x num_channels
        h_conv4 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv3)))  # batch_size  x (board_x-4) x (board_y-4) x num_channels
        h_conv4_flat = Flatten()(h_conv4)
        s_fc1 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(256, use_bias=False)(h_conv4_flat))))  # batch_size x 1024
        s_fc2 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(128, use_bias=False)(s_fc1))))  # batch_size x 1024
        self.pi = Dense(self.action_size, activation='softmax', name='pi')(s_fc2)  # batch_size x self.action_size
        self.v = Dense(1, activation='tanh', name='v')(s_fc2)  # batch_size x 1

        self.model = Model(inputs=self.input_boards, outputs=[self.pi, self.v])
        self.model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=Adam(CONFIG.nnet_args.lr)) 
Example #6
Source File: tsne_grid.py    From tsne-grid with MIT License 5 votes vote down vote up
def build_model():
    base_model = VGG16(weights='imagenet')
    top_model = Sequential()
    top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
    return Model(inputs=base_model.input, outputs=top_model(base_model.output)) 
Example #7
Source File: backend.py    From FATE with Apache License 2.0 4 votes vote down vote up
def build(self, lambda_u=0.0001, lambda_v=0.0001, optimizer='rmsprop',
              loss='mse', metrics='mse', initializer='uniform'):
        """
        Init session and create model architecture.
        :param lambda_u: lambda value of l2 norm for user embeddings.
        :param lambda_v: lambda value of l2 norm for item embeddings.
        :param optimizer: optimizer type.
        :param loss: loss type.
        :param metrics: evaluation metrics.
        :param initializer: initializer of embedding
        :return:
        """
        # init session on first time ref
        sess = self.session
        # user embedding
        user_input_layer = Input(shape=(1,), dtype='int32', name='user_input')
        user_embedding_layer = Embedding(
            input_dim=self.user_num,
            output_dim=self.embedding_dim,
            input_length=1,
            name='user_embedding',
            embeddings_regularizer=l2(lambda_u),
            embeddings_initializer=initializer)(user_input_layer)
        user_embedding_layer = Flatten(name='user_flatten')(user_embedding_layer)

        # item embedding
        item_input_layer = Input(shape=(1,), dtype='int32', name='item_input')
        item_embedding_layer = Embedding(
            input_dim=self.item_num,
            output_dim=self.embedding_dim,
            input_length=1,
            name='item_embedding',
            embeddings_regularizer=l2(lambda_v),
            embeddings_initializer=initializer)(item_input_layer)
        item_embedding_layer = Flatten(name='item_flatten')(item_embedding_layer)

        # rating prediction
        dot_layer = Dot(axes=-1,
                        name='dot_layer')([user_embedding_layer,
                                           item_embedding_layer])
        self._model = Model(
            inputs=[user_input_layer, item_input_layer], outputs=[dot_layer])

        # compile model
        optimizer_instance = getattr(
            tf.keras.optimizers, optimizer.optimizer)(**optimizer.kwargs)
        losses = getattr(tf.keras.losses, loss)
        self._model.compile(optimizer=optimizer_instance,
                            loss=losses, metrics=metrics)
        # pick user_embedding for aggregating
        self._trainable_weights = {v.name.split(
            "/")[0]: v for v in self._model.trainable_weights}
        self._aggregate_weights = {
            "user_embedding": self._trainable_weights["user_embedding"]} 
Example #8
Source File: backend.py    From FATE with Apache License 2.0 4 votes vote down vote up
def _build(self, lamda_u=0.0001, lamda_v=0.0001, optimizer='rmsprop',
               loss='mse', metrics='mse', initializer='uniform'):
        # init session on first time ref
        sess = self.session

        # user embedding
        user_InputLayer = Input(shape=(1,), dtype='int32', name='user_input')
        user_EmbeddingLayer = Embedding(input_dim=self.user_num,
                                        output_dim=self.embedding_dim,
                                        input_length=1,
                                        name='user_embedding',
                                        embeddings_regularizer=l2(lamda_u),
                                        embeddings_initializer=initializer)(user_InputLayer)
        user_EmbeddingLayer = Flatten(name='user_flatten')(user_EmbeddingLayer)

        # user bias
        user_BiasLayer = Embedding(input_dim=self.user_num, output_dim=1, input_length=1,
                                   name='user_bias', embeddings_regularizer=l2(lamda_u),
                                   embeddings_initializer=Zeros())(user_InputLayer)
        user_BiasLayer = Flatten(name='user_bias_flatten')(user_BiasLayer)

        # item embedding
        item_InputLayer = Input(shape=(1,), dtype='int32', name='item_input')
        item_EmbeddingLayer = Embedding(input_dim=self.item_num,
                                        output_dim=self.embedding_dim,
                                        input_length=1,
                                        name='item_embedding',
                                        embeddings_regularizer=l2(lamda_v),
                                        embeddings_initializer=initializer)(item_InputLayer)
        item_EmbeddingLayer = Flatten(name='item_flatten')(item_EmbeddingLayer)

        # item bias
        item_BiasLayer = Embedding(input_dim=self.item_num, output_dim=1, input_length=1,
                                   name='item_bias', embeddings_regularizer=l2(lamda_v),
                                   embeddings_initializer=Zeros())(item_InputLayer)
        item_BiasLayer = Flatten(name='item_bias_flatten')(item_BiasLayer)

        # rating prediction
        dotLayer = Dot(axes=-1, name='dot_layer')([user_EmbeddingLayer, item_EmbeddingLayer])

        # add mu, user bias and item bias
        dotLayer = ConstantLayer(mu=self.mu)(dotLayer)
        dotLayer = Add()([dotLayer, user_BiasLayer])
        dotLayer = Add()([dotLayer, item_BiasLayer])

        # create model
        self._model = Model(inputs=[user_InputLayer, item_InputLayer], outputs=[dotLayer])

        # compile model
        optimizer_instance = getattr(tf.keras.optimizers, optimizer.optimizer)(**optimizer.kwargs)
        losses = getattr(tf.keras.losses, loss)
        self._model.compile(optimizer=optimizer_instance,
                            loss=losses, metrics=metrics)
        # pick user_embedding and user_bias for aggregating
        self._trainable_weights = {v.name.split("/")[0]: v for v in self._model.trainable_weights}
        LOGGER.debug(f"trainable weights {self._trainable_weights}")
        self._aggregate_weights = {"user_embedding": self._trainable_weights["user_embedding"],
                                   "user_bias": self._trainable_weights["user_bias"]} 
Example #9
Source File: pbt_tune_cifar10_with_keras.py    From ray with Apache License 2.0 4 votes vote down vote up
def _build_model(self, input_shape):
        x = Input(shape=(32, 32, 3))
        y = x
        y = Convolution2D(
            filters=64,
            kernel_size=3,
            strides=1,
            padding="same",
            activation="relu",
            kernel_initializer="he_normal")(y)
        y = Convolution2D(
            filters=64,
            kernel_size=3,
            strides=1,
            padding="same",
            activation="relu",
            kernel_initializer="he_normal")(y)
        y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)

        y = Convolution2D(
            filters=128,
            kernel_size=3,
            strides=1,
            padding="same",
            activation="relu",
            kernel_initializer="he_normal")(y)
        y = Convolution2D(
            filters=128,
            kernel_size=3,
            strides=1,
            padding="same",
            activation="relu",
            kernel_initializer="he_normal")(y)
        y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)

        y = Convolution2D(
            filters=256,
            kernel_size=3,
            strides=1,
            padding="same",
            activation="relu",
            kernel_initializer="he_normal")(y)
        y = Convolution2D(
            filters=256,
            kernel_size=3,
            strides=1,
            padding="same",
            activation="relu",
            kernel_initializer="he_normal")(y)
        y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)

        y = Flatten()(y)
        y = Dropout(self.config.get("dropout", 0.5))(y)
        y = Dense(
            units=10, activation="softmax", kernel_initializer="he_normal")(y)

        model = Model(inputs=x, outputs=y, name="model1")
        return model 
Example #10
Source File: model.py    From speaker-recognition-3d-cnn with MIT License 4 votes vote down vote up
def _3d_cnn_model(input_shape, num_classes):
    # Define Model
    inputs = Input(shape=input_shape, name="input-layer")

    # Conv 1
    X = Conv3D(filters=16, kernel_size=(3, 1, 5), strides=(1, 1, 1), name="conv1-1")(inputs)
    X = PReLU(name="activation1-1")(X)
    X = Conv3D(filters=16, kernel_size=(3, 9, 1), strides=(1, 2, 1), name="conv1-2")(X)
    X = PReLU(name="activation1-2")(X)
    X = MaxPool3D(pool_size=(1, 1, 2), strides=(1, 1, 2), padding="valid", name="pool-1")(X)
    # X = Dropout(0.2)(X)

    # Conv 2
    X = Conv3D(filters=32, kernel_size=(3, 1, 4), strides=(1, 1, 1), name="conv2-1")(X)
    X = PReLU(name="activation2-1")(X)
    X = Conv3D(filters=32, kernel_size=(3, 8, 1), strides=(1, 2, 1), name="conv2-2")(X)
    X = PReLU(name="activation2-2")(X)
    X = MaxPool3D(pool_size=(1, 1, 2), strides=(1, 1, 2), padding="valid", name="pool-2")(X)
    # X = Dropout(0.2)(X)

    # Conv 3
    X = Conv3D(filters=64, kernel_size=(3, 1, 3), strides=(1, 1, 1), name="conv3-1")(X)
    X = PReLU(name="activation3-1")(X)
    X = Conv3D(filters=64, kernel_size=(3, 7, 1), strides=(1, 1, 1), name="conv3-2")(X)
    X = PReLU(name="activation3-2")(X)
    # X = Dropout(0.2)(X)

    # Conv 4
    X = Conv3D(filters=128, kernel_size=(3, 1, 3), strides=(1, 1, 1), name="conv4-1")(X)
    X = PReLU(name="activation4-1")(X)
    X = Conv3D(filters=128, kernel_size=(3, 7, 1), strides=(1, 1, 1), name="conv4-2")(X)
    X = PReLU(name="activation4-2")(X)
    # X = Dropout(0.2)(X)

    # Flaten
    X = Flatten()(X)

    # FC
    X = Dense(units=128, name="fc", activation='relu')(X)

    # Final Activation
    X = Dense(units=num_classes, activation='softmax', name="ac_softmax")(X)
    model = Model(inputs=inputs, outputs=X)

    return model 
Example #11
Source File: conv_network.py    From mnist_digits_classification with MIT License 4 votes vote down vote up
def model(train_x, train_y, test_x, test_y, epoch):
    '''

    :param train_x: train features
    :param train_y: train labels
    :param test_x:  test features
    :param test_y: test labels
    :param epoch: no. of epochs
    :return:
    '''
    conv_model = Sequential()
    # first layer with input shape (img_rows, img_cols, 1) and 12 filters
    conv_model.add(Conv2D(12, kernel_size=(3, 3), activation='relu',
                          input_shape=(img_rows, img_cols, 1)))
    # second layer with 12 filters
    conv_model.add(Conv2D(12, kernel_size=(3, 3), activation='relu'))
    # third layer with 12 filers
    conv_model.add(Conv2D(12, kernel_size=(3, 3), activation='relu'))
    # flatten layer
    conv_model.add(Flatten())
    # adding a Dense layer
    conv_model.add(Dense(100, activation='relu'))
    # adding the final Dense layer with softmax
    conv_model.add(Dense(num_classes, activation='softmax'))

    # compile the model
    conv_model.compile(optimizer=keras.optimizers.Adadelta(),
                       loss='categorical_crossentropy',
                       metrics=['accuracy'])
    print("\n Training the Convolution Neural Network on MNIST data\n")
    # fit the model
    conv_model.fit(train_x, train_y, batch_size=128, epochs=epoch,
                   validation_split=0.1, verbose=2)
    predicted_train_y = conv_model.predict(train_x)
    train_accuracy = (sum(np.argmax(predicted_train_y, axis=1)
                          == np.argmax(train_y, axis=1))/(float(len(train_y))))
    print('Train accuracy : ', train_accuracy)
    predicted_test_y = conv_model.predict(test_x)
    test_accuracy = (sum(np.argmax(predicted_test_y, axis=1)
                         == np.argmax(test_y, axis=1))/(float(len(test_y))))
    print('Test accuracy : ', test_accuracy)
    CNN_accuracy = {'train_accuracy': train_accuracy,
                    'test_accuracy': test_accuracy, 'epoch': epoch}
    return conv_model, CNN_accuracy 
Example #12
Source File: fibinet.py    From DeepCTR with Apache License 2.0 4 votes vote down vote up
def FiBiNET(linear_feature_columns, dnn_feature_columns, bilinear_type='interaction', reduction_ratio=3,
            dnn_hidden_units=(128, 128), l2_reg_linear=1e-5,
            l2_reg_embedding=1e-5, l2_reg_dnn=0, seed=1024, dnn_dropout=0, dnn_activation='relu',
            task='binary'):
    """Instantiates the Feature Importance and Bilinear feature Interaction NETwork architecture.

    :param linear_feature_columns: An iterable containing all the features used by linear part of the model.
    :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
    :param bilinear_type: str,bilinear function type used in Bilinear Interaction Layer,can be ``'all'`` , ``'each'`` or ``'interaction'``
    :param reduction_ratio: integer in [1,inf), reduction ratio used in SENET Layer
    :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
    :param l2_reg_linear: float. L2 regularizer strength applied to wide part
    :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
    :param l2_reg_dnn: float. L2 regularizer strength applied to DNN
    :param seed: integer ,to use as random seed.
    :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
    :param dnn_activation: Activation function to use in DNN
    :param task: str, ``"binary"`` for  binary logloss or  ``"regression"`` for regression loss
    :return: A Keras model instance.
    """

    features = build_input_features(linear_feature_columns + dnn_feature_columns)

    inputs_list = list(features.values())

    linear_logit = get_linear_logit(features, linear_feature_columns, seed=seed, prefix='linear',
                                    l2_reg=l2_reg_linear)

    sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
                                                                         l2_reg_embedding, seed)

    senet_embedding_list = SENETLayer(
        reduction_ratio, seed)(sparse_embedding_list)

    senet_bilinear_out = BilinearInteraction(
        bilinear_type=bilinear_type, seed=seed)(senet_embedding_list)
    bilinear_out = BilinearInteraction(
        bilinear_type=bilinear_type, seed=seed)(sparse_embedding_list)

    dnn_input = combined_dnn_input(
        [Flatten()(concat_func([senet_bilinear_out, bilinear_out]))], dense_value_list)
    dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
                  False, seed)(dnn_input)
    dnn_logit = Dense(
        1, use_bias=False, activation=None)(dnn_out)

    final_logit = add_func([linear_logit, dnn_logit])
    output = PredictionLayer(task)(final_logit)

    model = Model(inputs=inputs_list, outputs=output)
    return model