Python tensorflow.keras.Input() Examples

The following are 30 code examples of tensorflow.keras.Input(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras , or try the search function .
Example #1
Source File: test_convolutional.py    From spektral with MIT License 6 votes vote down vote up
def _test_single_mode(layer, **kwargs):
    sparse = kwargs.pop('sparse', False)
    A_in = Input(shape=(None,), sparse=sparse)
    X_in = Input(shape=(F,))
    inputs = [X_in, A_in]
    if sparse:
        input_data = [X, sp_matrix_to_sp_tensor(A)]
    else:
        input_data = [X, A]

    if kwargs.pop('edges', None):
        E_in = Input(shape=(S, ))
        inputs.append(E_in)
        input_data.append(E_single)

    layer_instance = layer(**kwargs)
    output = layer_instance(inputs)
    model = Model(inputs, output)

    output = model(input_data)

    assert output.shape == (N, kwargs['channels']) 
Example #2
Source File: test_convolutional.py    From spektral with MIT License 6 votes vote down vote up
def _test_batch_mode(layer, **kwargs):
    A_batch = np.stack([A] * batch_size)
    X_batch = np.stack([X] * batch_size)

    A_in = Input(shape=(N, N))
    X_in = Input(shape=(N, F))
    inputs = [X_in, A_in]
    input_data = [X_batch, A_batch]

    if kwargs.pop('edges', None):
        E_batch = np.stack([E] * batch_size)
        E_in = Input(shape=(N, N, S))
        inputs.append(E_in)
        input_data.append(E_batch)

    layer_instance = layer(**kwargs)
    output = layer_instance(inputs)
    model = Model(inputs, output)

    output = model(input_data)

    assert output.shape == (batch_size, N, kwargs['channels']) 
Example #3
Source File: test_convolutional.py    From spektral with MIT License 6 votes vote down vote up
def _test_mixed_mode(layer, **kwargs):
    sparse = kwargs.pop('sparse', False)
    X_batch = np.stack([X] * batch_size)
    A_in = Input(shape=(N,), sparse=sparse)
    X_in = Input(shape=(N, F))
    inputs = [X_in, A_in]
    if sparse:
        input_data = [X_batch, sp_matrix_to_sp_tensor(A)]
    else:
        input_data = [X_batch, A]

    layer_instance = layer(**kwargs)
    output = layer_instance(inputs)
    model = Model(inputs, output)

    output = model(input_data)

    assert output.shape == (batch_size, N, kwargs['channels']) 
Example #4
Source File: get_activations_test.py    From keract with MIT License 6 votes vote down vote up
def test_shape_1(self):
        # model definition
        i1 = Input(shape=(10,), name='i1')
        i2 = Input(shape=(10,), name='i2')

        a = Dense(1, name='fc1')(i1)
        b = Dense(1, name='fc2')(i2)

        c = concatenate([a, b], name='concat')
        d = Dense(1, name='out')(c)
        model = Model(inputs=[i1, i2], outputs=[d])

        # inputs to the model
        x = [np.random.uniform(size=(32, 10)),
             np.random.uniform(size=(32, 10))]

        # call to fetch the activations of the model.
        activations = get_activations(model, x, auto_compile=True)

        # OrderedDict so its ok to .values()
        self.assertListEqual([a.shape for a in activations.values()],
                             [(32, 10), (32, 10), (32, 1), (32, 1), (32, 2), (32, 1)]) 
Example #5
Source File: get_activations_test.py    From keract with MIT License 6 votes vote down vote up
def test_inputs_order(self):
        i10 = Input(shape=(10,), name='i1')
        i40 = Input(shape=(40,), name='i4')
        i30 = Input(shape=(30,), name='i3')
        i20 = Input(shape=(20,), name='i2')

        a = Dense(1, name='fc1')(concatenate([i10, i40, i30, i20], name='concat'))
        model = Model(inputs=[i40, i30, i20, i10], outputs=[a])
        x = [
            np.random.uniform(size=(1, 40)),
            np.random.uniform(size=(1, 30)),
            np.random.uniform(size=(1, 20)),
            np.random.uniform(size=(1, 10))
        ]

        acts = get_activations(model, x)
        self.assertListEqual(list(acts['i1'].shape), [1, 10])
        self.assertListEqual(list(acts['i2'].shape), [1, 20])
        self.assertListEqual(list(acts['i3'].shape), [1, 30])
        self.assertListEqual(list(acts['i4'].shape), [1, 40]) 
Example #6
Source File: end_to_end_test.py    From keras-tuner with Apache License 2.0 6 votes vote down vote up
def build_model(hp):
    inputs = keras.Input(shape=(28, 28))
    x = keras.layers.Reshape((28 * 28,))(inputs)
    for i in range(hp.Int('num_layers', 1, 4)):
        x = keras.layers.Dense(
            units=hp.Int('units_' + str(i), 128, 512, 32, default=256),
            activation='relu')(x)
    x = keras.layers.Dropout(hp.Float('dp', 0., 0.6, 0.1, default=0.5))(x)
    outputs = keras.layers.Dense(10, activation='softmax')(x)
    model = keras.Model(inputs, outputs)
    model.compile(
        optimizer=keras.optimizers.Adam(
            hp.Choice('learning_rate', [1e-2, 2e-3, 5e-4])),
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])
    return model 
Example #7
Source File: module.py    From DCGAN-LSGAN-WGAN-GP-DRAGAN-Tensorflow-2 with MIT License 6 votes vote down vote up
def ConvDiscriminator(input_shape=(64, 64, 3),
                      dim=64,
                      n_downsamplings=4,
                      norm='batch_norm',
                      name='ConvDiscriminator'):
    Norm = _get_norm_layer(norm)

    # 0
    h = inputs = keras.Input(shape=input_shape)

    # 1: downsamplings, ... -> 16x16 -> 8x8 -> 4x4
    h = keras.layers.Conv2D(dim, 4, strides=2, padding='same')(h)
    h = tf.nn.leaky_relu(h, alpha=0.2)  # or keras.layers.LeakyReLU(alpha=0.2)(h)

    for i in range(n_downsamplings - 1):
        d = min(dim * 2 ** (i + 1), dim * 8)
        h = keras.layers.Conv2D(d, 4, strides=2, padding='same', use_bias=False)(h)
        h = Norm()(h)
        h = tf.nn.leaky_relu(h, alpha=0.2)  # or h = keras.layers.LeakyReLU(alpha=0.2)(h)

    # 2: logit
    h = keras.layers.Conv2D(1, 4, strides=1, padding='valid')(h)

    return keras.Model(inputs=inputs, outputs=h, name=name) 
Example #8
Source File: networks.py    From RLs with Apache License 2.0 5 votes vote down vote up
def __init__(self, vector_dim, visual_dim=[], visual_feature=128, encoder_type='nature'):
        super().__init__()
        self.camera_num = visual_dim[0]
        self.nets = MultiCameraCNN(n=self.camera_num, feature_dim=visual_feature, activation_fn=default_activation, encoder_type=encoder_type)
        self.hdim = vector_dim + (visual_feature * self.camera_num) * (self.camera_num > 0)
        self(I(shape=vector_dim), I(shape=visual_dim)) 
Example #9
Source File: bprmf.py    From DeepRec with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, num_user, num_item, learning_rate=0.001, reg_rate=0.1, epoch=500, batch_size=1024,
                 verbose=False, t=5, display_step=1000):
        self.num_user = num_user
        self.num_item = num_item
        self.learning_rate = learning_rate
        self.reg_rate = reg_rate
        self.epochs = epoch
        self.batch_size = batch_size
        self.verbose = verbose
        self.T = t
        self.display_step = display_step

        self.user_id = Input(shape=(1,), dtype=tf.int32, name='user_id')
        self.item_id = Input(shape=(1,), dtype=tf.int32, name='item_id')
        self.neg_item_id = Input(shape=(1,), dtype=tf.int32, name='neg_item_id')
        self.P = None
        self.Q = None
        self.pred_y = None
        self.pred_y_neg = None
        self.loss = None
        self.loss_estimator = tf.keras.metrics.Mean(name='train_loss')
        self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)

        self.test_data = None
        self.user = None
        self.item = None
        self.neg_items = None
        self.test_users = None

        self.num_training = None
        self.total_batch = None
        print("You are running BPRMF.") 
Example #10
Source File: conftest.py    From snn_toolbox with MIT License 5 votes vote down vote up
def _model_2(_dataset):
    x_train, y_train, x_test, y_test = _dataset

    axis = 1 if keras.backend.image_data_format() == 'channels_first' else -1

    input_shape = x_train.shape[1:]
    input_layer = Input(input_shape)

    layer = Conv2D(filters=16,
                   kernel_size=(5, 5),
                   strides=(2, 2))(input_layer)
    layer = BatchNormalization(axis=axis)(layer)
    layer = Activation('relu')(layer)
    layer = AveragePooling2D()(layer)
    branch1 = Conv2D(filters=32,
                     kernel_size=(3, 3),
                     padding='same',
                     activation='relu')(layer)
    branch2 = Conv2D(filters=8,
                     kernel_size=(1, 1),
                     activation='relu')(layer)
    layer = Concatenate(axis=axis)([branch1, branch2])
    layer = Conv2D(filters=10,
                   kernel_size=(3, 3),
                   activation='relu')(layer)
    layer = Flatten()(layer)
    layer = Dropout(1e-5)(layer)
    layer = Dense(units=10,
                  activation='softmax')(layer)

    model = Model(input_layer, layer)

    model.compile('adam', 'categorical_crossentropy', ['accuracy'])

    # Train model with backprop.
    history = model.fit(x_train, y_train, batch_size=64, epochs=1, verbose=2,
                        validation_data=(x_test, y_test))

    assert history.history['val_accuracy'][-1] > 0.96

    return model 
Example #11
Source File: conftest.py    From snn_toolbox with MIT License 5 votes vote down vote up
def _model_1(_dataset):

    x_train, y_train, x_test, y_test = _dataset

    input_shape = x_train.shape[1:]
    input_layer = Input(input_shape)

    layer = Conv2D(filters=16,
                   kernel_size=(5, 5),
                   strides=(2, 2),
                   activation='relu',
                   use_bias=False)(input_layer)
    layer = Conv2D(filters=32,
                   kernel_size=(3, 3),
                   activation='relu',
                   use_bias=False)(layer)
    layer = AveragePooling2D()(layer)
    layer = Conv2D(filters=8,
                   kernel_size=(3, 3),
                   padding='same',
                   activation='relu',
                   use_bias=False)(layer)
    layer = Flatten()(layer)
    layer = Dropout(0.01)(layer)
    layer = Dense(units=10,
                  activation='softmax',
                  use_bias=False)(layer)

    model = Model(input_layer, layer)

    model.compile('adam', 'categorical_crossentropy', ['accuracy'])

    history = model.fit(x_train, y_train, batch_size=64, epochs=1, verbose=2,
                        validation_data=(x_test, y_test))

    assert history.history['val_accuracy'][-1] > 0.95

    return model 
Example #12
Source File: classic_unet.py    From stacks-usecase with Apache License 2.0 5 votes vote down vote up
def model():
    """generate Unet model and return instance of Model."""
    input_layer = Input((None, None, 1))
    output_layer = unet_model(input_layer, 64, test=False)
    return Model(inputs=[input_layer], outputs=[output_layer]) 
Example #13
Source File: module.py    From CycleGAN-Tensorflow-2 with MIT License 5 votes vote down vote up
def ConvDiscriminator(input_shape=(256, 256, 3),
                      dim=64,
                      n_downsamplings=3,
                      norm='instance_norm'):
    dim_ = dim
    Norm = _get_norm_layer(norm)

    # 0
    h = inputs = keras.Input(shape=input_shape)

    # 1
    h = keras.layers.Conv2D(dim, 4, strides=2, padding='same')(h)
    h = tf.nn.leaky_relu(h, alpha=0.2)

    for _ in range(n_downsamplings - 1):
        dim = min(dim * 2, dim_ * 8)
        h = keras.layers.Conv2D(dim, 4, strides=2, padding='same', use_bias=False)(h)
        h = Norm()(h)
        h = tf.nn.leaky_relu(h, alpha=0.2)

    # 2
    dim = min(dim * 2, dim_ * 8)
    h = keras.layers.Conv2D(dim, 4, strides=1, padding='same', use_bias=False)(h)
    h = Norm()(h)
    h = tf.nn.leaky_relu(h, alpha=0.2)

    # 3
    h = keras.layers.Conv2D(1, 4, strides=1, padding='same')(h)

    return keras.Model(inputs=inputs, outputs=h)


# ==============================================================================
# =                          learning rate scheduler                           =
# ============================================================================== 
Example #14
Source File: model.py    From Fast-SRGAN with MIT License 5 votes vote down vote up
def build_discriminator(self):
        """Builds a discriminator network based on the SRGAN design."""

        def d_block(layer_input, filters, strides=1, bn=True):
            """Discriminator layer block.
            Args:
                layer_input: Input feature map for the convolutional block.
                filters: Number of filters in the convolution.
                strides: The stride of the convolution.
                bn: Whether to use batch norm or not.
            """
            d = keras.layers.Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)
            if bn:
                d = keras.layers.BatchNormalization(momentum=0.8)(d)
            d = keras.layers.LeakyReLU(alpha=0.2)(d)
                
            return d

        # Input img
        d0 = keras.layers.Input(shape=self.hr_shape)

        d1 = d_block(d0, self.df, bn=False)
        d2 = d_block(d1, self.df, strides=2)
        d3 = d_block(d2, self.df)
        d4 = d_block(d3, self.df, strides=2)
        d5 = d_block(d4, self.df * 2)
        d6 = d_block(d5, self.df * 2, strides=2)
        d7 = d_block(d6, self.df * 2)
        d8 = d_block(d7, self.df * 2, strides=2)

        validity = keras.layers.Conv2D(1, kernel_size=1, strides=1, activation='sigmoid', padding='same')(d8)

        return keras.models.Model(d0, validity) 
Example #15
Source File: networks.py    From RLs with Apache License 2.0 5 votes vote down vote up
def __init__(self, dim, hidden_units):
        super().__init__()
        self.rnn_type = 'lstm'
        # self.masking = tf.keras.layers.Masking(mask_value=0.)

        # ValueError: Tried to convert 'tensor' to a tensor and failed. Error: None values not supported.
        # https://github.com/tensorflow/tensorflow/issues/31998
        cell = tf.keras.layers.LSTMCell(hidden_units)
        self.lstm_net = tf.keras.layers.RNN(cell, return_state=True, return_sequences=True)
        self(I(shape=(None, dim)), I(shape=(hidden_units,)), I(shape=(hidden_units,))) 
Example #16
Source File: tf2nn.py    From RLs with Apache License 2.0 5 votes vote down vote up
def __init__(self, vector_dim, action_dim, quantiles_idx, hidden_units):
        super().__init__()
        self.action_dim = action_dim
        self.q_net_head = mlp(hidden_units['q_net'], out_layer=False)   # [B, vector_dim]
        self.quantile_net = mlp(hidden_units['quantile'], out_layer=False)  # [N*B, quantiles_idx]
        self.q_net_tile = mlp(hidden_units['tile'], output_shape=action_dim, out_activation=None)   # [N*B, hidden_units['quantile'][-1]]
        self(I(shape=vector_dim), I(shape=quantiles_idx)) 
Example #17
Source File: tf2nn.py    From RLs with Apache License 2.0 5 votes vote down vote up
def __init__(self, vector_dim, action_dim, atoms, hidden_units):
        super().__init__()
        self.action_dim = action_dim
        self.atoms = atoms
        self.share = mlp(hidden_units['share'], layer=Noisy, out_layer=False)
        self.v = mlp(hidden_units['v'], layer=Noisy, output_shape=atoms, out_activation=None)
        self.adv = mlp(hidden_units['adv'], layer=Noisy, output_shape=action_dim * atoms, out_activation=None)
        self(I(shape=vector_dim)) 
Example #18
Source File: tf2nn.py    From RLs with Apache License 2.0 5 votes vote down vote up
def __init__(self, vector_dim, action_dim, nums, hidden_units):
        super().__init__()
        self.action_dim = action_dim
        self.nums = nums
        self.net = mlp(hidden_units, output_shape=nums * action_dim, out_activation=None)
        self(I(shape=vector_dim)) 
Example #19
Source File: biasedmf.py    From lkpy with MIT License 5 votes vote down vote up
def _build_model(self, n_users, n_items):
        n_features = self.features
        _log.info('configuring TensorFlow model for %d features from %d users and %d items',
                  n_features, n_users, n_items)

        init_tf_rng(self.rng_spec)

        # User input layer
        u_input = k.Input(shape=(1,), dtype='int32', name='user')
        # User embedding layer.
        u_reg = k.regularizers.l2(self.reg / n_users)
        u_embed = k.layers.Embedding(input_dim=n_users, output_dim=n_features, input_length=1,
                                     embeddings_regularizer=u_reg,
                                     embeddings_initializer='random_normal',
                                     name='user-embed')(u_input)
        # The embedding layer produces an extra dimension. Remove it.
        u_flat = k.layers.Flatten(name='user-vector')(u_embed)

        # Do the same thing for items
        i_input = k.Input(shape=(1,), dtype='int32', name='item')
        i_reg = k.regularizers.l2(self.reg / n_items)
        i_embed = k.layers.Embedding(input_dim=n_items, output_dim=n_features, input_length=1,
                                     embeddings_regularizer=i_reg,
                                     embeddings_initializer='random_normal',
                                     name='item-embed')(i_input)
        i_flat = k.layers.Flatten(name='item-vector')(i_embed)

        # Predict ratings using a dot product of users and items
        prod = k.layers.Dot(name='score', axes=1)([u_flat, i_flat])

        # Assemble the model and configure to optimize
        model = k.Model([u_input, i_input], prod, name='classic-mf')
        model.compile('adam', 'mean_squared_error')

        return model 
Example #20
Source File: display_activations_test.py    From keract with MIT License 5 votes vote down vote up
def dummy_model_and_inputs():
    i1 = Input(shape=(10,), name='i1')
    a = Dense(1, name='fc1')(i1)
    model = Model(inputs=[i1], outputs=[a])
    x = np.random.uniform(size=(1, 10))
    return model, x 
Example #21
Source File: get_activations_test.py    From keract with MIT License 5 votes vote down vote up
def dummy_model_and_inputs(**kwargs):
    i1 = Input(shape=(10,), name='i1')
    a = NestedModel(name='model')(i1)
    b = NestedLayer(name='block')(a)
    c = Dense(1, name='fc1')(b)
    model = Model(inputs=[i1], outputs=[c], **kwargs)
    x = np.random.uniform(size=(32, 10))
    return model, x 
Example #22
Source File: tuner_workflows_test.py    From keras-tuner with Apache License 2.0 5 votes vote down vote up
def build_model(hp):
    inputs = keras.Input(shape=(INPUT_DIM,))
    x = inputs
    for i in range(hp.Int('num_layers', 1, 4)):
        x = keras.layers.Dense(
            units=hp.Int('units_' + str(i), 5, 9, 1, default=6),
            activation='relu')(x)
    outputs = keras.layers.Dense(NUM_CLASSES, activation='softmax')(x)
    model = keras.Model(inputs, outputs)
    model.compile(
        optimizer=keras.optimizers.Adam(
            hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])
    return model 
Example #23
Source File: tuner_workflows_test.py    From keras-tuner with Apache License 2.0 5 votes vote down vote up
def build(self, hp):
        inputs = keras.Input(shape=(INPUT_DIM,))
        x = inputs
        for i in range(hp.Int('num_layers', 1, 4)):
            x = keras.layers.Dense(
                units=hp.Int('units_' + str(i), 5, 9, 1, default=6),
                activation='relu')(x)
        outputs = keras.layers.Dense(NUM_CLASSES, activation='softmax')(x)
        model = keras.Model(inputs, outputs)
        model.compile(
            optimizer=keras.optimizers.Adam(
                hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
            loss='sparse_categorical_crossentropy',
            metrics=['accuracy'])
        return model 
Example #24
Source File: tuner_workflows_test.py    From keras-tuner with Apache License 2.0 5 votes vote down vote up
def test_static_space(tmp_dir):

    def build_model_static(hp):
        inputs = keras.Input(shape=(INPUT_DIM,))
        x = inputs
        for i in range(hp.get('num_layers')):
            x = keras.layers.Dense(
                units=hp.get('units_' + str(i)),
                activation='relu')(x)
        outputs = keras.layers.Dense(NUM_CLASSES, activation='softmax')(x)
        model = keras.Model(inputs, outputs)
        model.compile(
            optimizer=keras.optimizers.Adam(
                hp.get('learning_rate')),
            loss='sparse_categorical_crossentropy',
            metrics=['accuracy'])
        return model

    hp = kerastuner.HyperParameters()
    hp.Int('num_layers', 1, 3, 1, default=2)
    hp.Int('units_0', 4, 6, 1, default=5)
    hp.Int('units_1', 4, 6, 1, default=5)
    hp.Int('units_2', 4, 6, 1, default=5)
    hp.Choice('learning_rate', [0.01, 0.001])
    tuner = kerastuner.tuners.RandomSearch(
        build_model_static,
        objective='val_accuracy',
        max_trials=4,
        directory=tmp_dir,
        hyperparameters=hp,
        allow_new_entries=False)

    assert tuner.oracle.hyperparameters == hp
    tuner.search(
        x=TRAIN_INPUTS,
        y=TRAIN_TARGETS,
        epochs=2,
        validation_data=(VAL_INPUTS, VAL_TARGETS))
    assert len(tuner.oracle.trials) == 4 
Example #25
Source File: hyperparameters_test.py    From keras-tuner with Apache License 2.0 5 votes vote down vote up
def test_build_with_conditional_scope():

    def build_model(hp):
        model = hp.Choice('model', ['v1', 'v2'])
        with hp.conditional_scope('model', 'v1'):
            v1_params = {'layers': hp.Int('layers', 1, 3),
                         'units': hp.Int('units', 16, 32)}
        with hp.conditional_scope('model', 'v2'):
            v2_params = {'layers': hp.Int('layers', 2, 4),
                         'units': hp.Int('units', 32, 64)}

        params = v1_params if model == 'v1' else v2_params
        inputs = keras.Input(10)
        x = inputs
        for _ in range(params['layers']):
            x = keras.layers.Dense(params['units'])(x)
        outputs = keras.layers.Dense(1)(x)
        model = keras.Model(inputs, outputs)
        model.compile('sgd', 'mse')
        return model

    hp = hp_module.HyperParameters()
    build_model(hp)
    assert hp.values == {
        'model': 'v1',
        'layers': 1,
        'units': 16,
    } 
Example #26
Source File: tcn.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def build_model(sequence_length: int,
                channels: int,
                filters: List[int],
                num_classes:int,
                kernel_size: int,
                return_sequence:bool = False):
    """
    Builds a simple TCN model for a classification task

    :param sequence_length: lenght of the input sequence
    :param channels: number of channels of the input sequence
    :param filters: number of conv filters per residual block
    :param num_classes: number of output classes
    :param kernel_size: size of the conv kernels
    :param return_sequence: flag if the last sequence should be returned or only last element

    :return: a tf keras model
    """

    inputs = Input(shape=(sequence_length, channels), name="inputs")
    tcn_block = TCN(filters, kernel_size, return_sequence)
    x = tcn_block(inputs)

    outputs = layers.Dense(num_classes,
                           activation="softmax",
                           name="output")(x)

    model = Model(inputs, outputs, name="tcn")

    print(f"Input sequence lenght: {sequence_length}, model receptive field: {tcn_block.receptive_field_size}")
    return model 
Example #27
Source File: load_keras_model.py    From checkmate with Apache License 2.0 5 votes vote down vote up
def simple_model(input_shape=(224, 224, 3), num_classes: int = 1000):
    inputs = tf.keras.Input(shape=input_shape)
    x = tf.keras.layers.Conv2D(64, (3, 3), activation="relu", name="in_conv")(inputs)
    a = tf.keras.layers.Conv2D(128, (1, 1), activation="relu", name="conv1")(x)
    b = tf.keras.layers.Conv2D(128, (1, 1), activation="relu", name="conv2")(x)
    c = tf.keras.layers.Add(name="addc1c2")([a, b])
    d = tf.keras.layers.GlobalAveragePooling2D(name="flatten")(c)
    predictions = tf.keras.layers.Dense(num_classes, activation="softmax", name="predictions")(d)
    return tf.keras.Model(inputs=inputs, outputs=predictions) 
Example #28
Source File: load_keras_model.py    From checkmate with Apache License 2.0 5 votes vote down vote up
def linear_model(i, input_shape=(224, 224, 3), num_classes=1000):
    input = tf.keras.Input(shape=input_shape)
    x = input
    for i in range(i):
        x = tf.keras.layers.Conv2D(64, (3, 3), activation=None, use_bias=False, name="conv" + str(i))(x)
    d = tf.keras.layers.GlobalAveragePooling2D(name="flatten")(x)
    predictions = tf.keras.layers.Dense(num_classes, activation="softmax", name="predictions")(d)
    return tf.keras.Model(inputs=input, outputs=predictions) 
Example #29
Source File: load_keras_model.py    From checkmate with Apache License 2.0 5 votes vote down vote up
def testBertModel(num_layers, heads, input_size):
    hidden_size = input_size[1]
    intermediate_size = 4 * hidden_size
    seq_length = input_size[0]
    num_layers = num_layers
    inputs = keras.Input(shape=(input_size))
    x = inputs
    for i in range(num_layers):
        query = Dense(hidden_size, name="query_{}".format(i))(x)
        key = Dense(hidden_size, name="key_{}".format(i))(x)
        value = Dense(hidden_size, name="value_{}".format(i))(x)
        query = Reshape((heads, seq_length, hidden_size // heads))(query)
        key = Reshape((heads, hidden_size // heads, seq_length))(key)
        value = Reshape((heads, seq_length, hidden_size // heads))(value)
        acts = Lambda(lambda x: tf.matmul(x[0], x[1]), name="acts_{}".format(i))([query, key])
        fin = Lambda(lambda x: tf.matmul(x[0], x[1]), name="fin_{}".format(i))([acts, value])
        fin = Reshape((seq_length, hidden_size))(fin)
        # layer.append(TFBertSelfAttention(config, name="layer_{}".format(i)))
        att = Dense(hidden_size, name="att_{}".format(i))(fin)
        relu = Activation("relu", name="relu0_{}".format(i))(att)
        x = LayerNormalization(name="f_att_{}".format(i))(relu + x)
        inter = Dense(intermediate_size, name="inter_{}".format(i))(x)
        relu1 = Activation("relu", name="relu1_{}".format(i))(inter)
        shrink = Dense(hidden_size, name="shrink_{}".format(i))(relu1)
        relu2 = Activation("relu", name="relu2_{}".format(i))(shrink)
        x = LayerNormalization(name="layer_out_{}".format(i))(x + relu2)
    return keras.Model(inputs=inputs, outputs=x) 
Example #30
Source File: tf2nn.py    From RLs with Apache License 2.0 5 votes vote down vote up
def __init__(self, vector_dim, output_shape, hidden_units):
        super().__init__()
        self.share = mlp(hidden_units['share'], out_layer=False)
        self.mu = mlp(hidden_units['mu'], output_shape=output_shape, out_activation='tanh')
        self.v = mlp(hidden_units['v'], output_shape=1, out_activation=None)
        self(I(shape=vector_dim))