Python tensorflow.keras.layers.Input() Examples

The following are 30 code examples of tensorflow.keras.layers.Input(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.layers , or try the search function .
Example #1
Source File: test_generator_evaluator.py    From deepchem with MIT License 6 votes vote down vote up
def test_compute_model_performance_multitask_classifier(self):
    n_data_points = 20
    n_features = 1
    n_tasks = 2
    n_classes = 2

    X = np.ones(shape=(n_data_points // 2, n_features)) * -1
    X1 = np.ones(shape=(n_data_points // 2, n_features))
    X = np.concatenate((X, X1))
    class_1 = np.array([[0.0, 1.0] for x in range(int(n_data_points / 2))])
    class_0 = np.array([[1.0, 0.0] for x in range(int(n_data_points / 2))])
    y1 = np.concatenate((class_0, class_1))
    y2 = np.concatenate((class_1, class_0))
    y = np.stack([y1, y2], axis=1)
    dataset = NumpyDataset(X, y)

    features = layers.Input(shape=(n_data_points // 2, n_features))
    dense = layers.Dense(n_tasks * n_classes)(features)
    logits = layers.Reshape((n_tasks, n_classes))(dense)
    output = layers.Softmax()(logits)
    keras_model = tf.keras.Model(inputs=features, outputs=[output, logits])
    model = dc.models.KerasModel(
        keras_model,
        dc.models.losses.SoftmaxCrossEntropy(),
        output_types=['prediction', 'loss'],
        learning_rate=0.01,
        batch_size=n_data_points)

    model.fit(dataset, nb_epoch=1000)
    metric = dc.metrics.Metric(
        dc.metrics.roc_auc_score, np.mean, mode="classification")

    scores = model.evaluate_generator(
        model.default_generator(dataset), [metric], per_task_metrics=True)
    scores = list(scores[1].values())
    # Loosening atol to see if tests stop failing sporadically
    assert np.all(np.isclose(scores, [1.0, 1.0], atol=0.50)) 
Example #2
Source File: test_continuous.py    From keras-rl2 with MIT License 6 votes vote down vote up
def test_ddpg():
    # TODO: replace this with a simpler environment where we can actually test if it finds a solution
    env = gym.make('Pendulum-v0')
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.shape[0]

    actor = Sequential()
    actor.add(Flatten(input_shape=(1,) + env.observation_space.shape))
    actor.add(Dense(16))
    actor.add(Activation('relu'))
    actor.add(Dense(nb_actions))
    actor.add(Activation('linear'))

    action_input = Input(shape=(nb_actions,), name='action_input')
    observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
    flattened_observation = Flatten()(observation_input)
    x = Concatenate()([action_input, flattened_observation])
    x = Dense(16)(x)
    x = Activation('relu')(x)
    x = Dense(1)(x)
    x = Activation('linear')(x)
    critic = Model(inputs=[action_input, observation_input], outputs=x)
    
    memory = SequentialMemory(limit=1000, window_length=1)
    random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3)
    agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
                      memory=memory, nb_steps_warmup_critic=50, nb_steps_warmup_actor=50,
                      random_process=random_process, gamma=.99, target_model_update=1e-3)
    agent.compile([Adam(lr=1e-3), Adam(lr=1e-3)])

    agent.fit(env, nb_steps=400, visualize=False, verbose=0, nb_max_episode_steps=100)
    h = agent.test(env, nb_episodes=2, visualize=False, nb_max_episode_steps=100)
    # TODO: evaluate history 
Example #3
Source File: dqn.py    From keras-rl2 with MIT License 6 votes vote down vote up
def compute_output_shape(self, input_shape):
        if len(input_shape) != 3:
            raise RuntimeError("Expects 3 inputs: L, mu, a")
        for i, shape in enumerate(input_shape):
            if len(shape) != 2:
                raise RuntimeError("Input {} has {} dimensions but should have 2".format(i, len(shape)))
        assert self.mode in ('full','diag')
        if self.mode == 'full':
            expected_elements = (self.nb_actions * self.nb_actions + self.nb_actions) // 2
        elif self.mode == 'diag':
            expected_elements = self.nb_actions
        else:
            expected_elements = None
        assert expected_elements is not None
        if input_shape[0][1] != expected_elements:
            raise RuntimeError("Input 0 (L) should have {} elements but has {}".format(input_shape[0][1]))
        if input_shape[1][1] != self.nb_actions:
            raise RuntimeError(
                "Input 1 (mu) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1]))
        if input_shape[2][1] != self.nb_actions:
            raise RuntimeError(
                "Input 2 (action) should have {} elements but has {}".format(self.nb_actions, input_shape[1][1]))
        return input_shape[0][0], 1 
Example #4
Source File: ml_agent.py    From Grid2Op with Mozilla Public License 2.0 6 votes vote down vote up
def construct_q_network(self):
        # replacement of the Convolution layers by Dense layers, and change the size of the input space and output space

        # Uses the network architecture found in DeepMind paper
        self.model = Sequential()
        input_layer = Input(shape=(self.observation_size * self.training_param.NUM_FRAMES,))
        layer1 = Dense(self.observation_size * self.training_param.NUM_FRAMES)(input_layer)
        layer1 = Activation('relu')(layer1)
        layer2 = Dense(self.observation_size)(layer1)
        layer2 = Activation('relu')(layer2)
        layer3 = Dense(self.observation_size)(layer2)
        layer3 = Activation('relu')(layer3)
        layer4 = Dense(2 * self.action_size)(layer3)
        layer4 = Activation('relu')(layer4)
        output = Dense(self.action_size)(layer4)

        self.model = Model(inputs=[input_layer], outputs=[output])
        self.model.compile(loss='mse', optimizer=Adam(lr=self.lr_))

        self.target_model = Model(inputs=[input_layer], outputs=[output])
        self.target_model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
        self.target_model.set_weights(self.model.get_weights()) 
Example #5
Source File: dqn-cartpole-9.6.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def build_model(self, n_inputs, n_outputs):
        """Q Network is 256-256-256 MLP

        Arguments:
            n_inputs (int): input dim
            n_outputs (int): output dim

        Return:
            q_model (Model): DQN
        """
        inputs = Input(shape=(n_inputs, ), name='state')
        x = Dense(256, activation='relu')(inputs)
        x = Dense(256, activation='relu')(x)
        x = Dense(256, activation='relu')(x)
        x = Dense(n_outputs,
                  activation='linear', 
                  name='action')(x)
        q_model = Model(inputs, x)
        q_model.summary()
        return q_model 
Example #6
Source File: test_dqn.py    From keras-rl2 with MIT License 6 votes vote down vote up
def test_single_continuous_dqn_input():
    nb_actions = 2

    V_model = Sequential()
    V_model.add(Flatten(input_shape=(2, 3)))
    V_model.add(Dense(1))

    mu_model = Sequential()
    mu_model.add(Flatten(input_shape=(2, 3)))
    mu_model.add(Dense(nb_actions))

    L_input = Input(shape=(2, 3))
    L_input_action = Input(shape=(nb_actions,))
    x = Concatenate()([Flatten()(L_input), L_input_action])
    x = Dense(((nb_actions * nb_actions + nb_actions) // 2))(x)
    L_model = Model(inputs=[L_input_action, L_input], outputs=x)

    memory = SequentialMemory(limit=10, window_length=2)
    agent = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
                     memory=memory, nb_steps_warmup=5, batch_size=4)
    agent.compile('sgd')
    agent.fit(MultiInputTestEnv((3,)), nb_steps=10) 
Example #7
Source File: ml_agent.py    From Grid2Op with Mozilla Public License 2.0 6 votes vote down vote up
def _build_q_NN(self):
        input_states = Input(shape=(self.observation_size,))
        input_action = Input(shape=(self.action_size,))
        input_layer = Concatenate()([input_states, input_action])
        
        lay1 = Dense(self.observation_size)(input_layer)
        lay1 = Activation('relu')(lay1)
        
        lay2 = Dense(self.observation_size)(lay1)
        lay2 = Activation('relu')(lay2)
        
        lay3 = Dense(2*self.action_size)(lay2)
        lay3 = Activation('relu')(lay3)
        
        advantage = Dense(1, activation = 'linear')(lay3)
        
        model = Model(inputs=[input_states, input_action], outputs=[advantage])
        model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
        
        return model 
Example #8
Source File: seqtoseq.py    From deepchem with MIT License 6 votes vote down vote up
def _create_encoder(self, n_layers, dropout):
    """Create the encoder as a tf.keras.Model."""
    input = self._create_features()
    gather_indices = Input(shape=(2,), dtype=tf.int32)
    prev_layer = input
    for i in range(len(self._filter_sizes)):
      filter_size = self._filter_sizes[i]
      kernel_size = self._kernel_sizes[i]
      if dropout > 0.0:
        prev_layer = Dropout(rate=dropout)(prev_layer)
      prev_layer = Conv1D(
          filters=filter_size, kernel_size=kernel_size,
          activation=tf.nn.relu)(prev_layer)
    prev_layer = Flatten()(prev_layer)
    prev_layer = Dense(
        self._decoder_dimension, activation=tf.nn.relu)(prev_layer)
    prev_layer = BatchNormalization()(prev_layer)
    return tf.keras.Model(inputs=[input, gather_indices], outputs=prev_layer) 
Example #9
Source File: llr.py    From alibi-detect with Apache License 2.0 6 votes vote down vote up
def build_model(dist: Union[Distribution, PixelCNN], input_shape: tuple = None, filepath: str = None) \
        -> Tuple[tf.keras.Model, Union[Distribution, PixelCNN]]:
    """
    Create tf.keras.Model from TF distribution.

    Parameters
    ----------
    dist
        TensorFlow distribution.
    input_shape
        Input shape of the model.

    Returns
    -------
    TensorFlow model.
    """
    x_in = Input(shape=input_shape)
    log_prob = dist.log_prob(x_in)
    model = Model(inputs=x_in, outputs=log_prob)
    model.add_loss(-tf.reduce_mean(log_prob))
    if isinstance(filepath, str):
        model.load_weights(filepath)
    return model, dist 
Example #10
Source File: test_ddpg.py    From keras-rl2 with MIT License 6 votes vote down vote up
def test_single_ddpg_input():
    nb_actions = 2

    actor = Sequential()
    actor.add(Flatten(input_shape=(2, 3)))
    actor.add(Dense(nb_actions))

    action_input = Input(shape=(nb_actions,), name='action_input')
    observation_input = Input(shape=(2, 3), name='observation_input')
    x = Concatenate()([action_input, Flatten()(observation_input)])
    x = Dense(1)(x)
    critic = Model(inputs=[action_input, observation_input], outputs=x)

    memory = SequentialMemory(limit=10, window_length=2)
    agent = DDPGAgent(actor=actor, critic=critic, critic_action_input=action_input, memory=memory,
                      nb_actions=2, nb_steps_warmup_critic=5, nb_steps_warmup_actor=5, batch_size=4)
    agent.compile('sgd')
    agent.fit(MultiInputTestEnv((3,)), nb_steps=10) 
Example #11
Source File: qlayers_test.py    From qkeras with Apache License 2.0 6 votes vote down vote up
def qdense_util(layer_cls,
                kwargs=None,
                input_data=None,
                weight_data=None,
                expected_output=None):
  """qlayer test utility."""
  input_shape = input_data.shape
  input_dtype = input_data.dtype
  layer = layer_cls(**kwargs)
  x = Input(shape=input_shape[1:], dtype=input_dtype)
  y = layer(x)
  layer.set_weights(weight_data)
  model = Model(x, y)
  actual_output = model.predict(input_data)
  if expected_output is not None:
    assert_allclose(actual_output, expected_output, rtol=1e-4) 
Example #12
Source File: ml_agent.py    From Grid2Op with Mozilla Public License 2.0 6 votes vote down vote up
def construct_q_network(self):
        # construct double Q networks
        self.model_Q = self._build_q_NN()
        self.model_Q2 = self._build_q_NN()

        # state value function approximation
        self.model_value = self._build_model_value()
        self.model_value_target = self._build_model_value()
        self.model_value_target.set_weights(self.model_value.get_weights())

        # policy function approximation
        self.model_policy = Sequential()
        # proba of choosing action a depending on policy pi
        input_states = Input(shape = (self.observation_size,))
        lay1 = Dense(self.observation_size)(input_states)
        lay1 = Activation('relu')(lay1)
        lay2 = Dense(self.observation_size)(lay1)
        lay2 = Activation('relu')(lay2)
        lay3 = Dense(2*self.action_size)(lay2)
        lay3 = Activation('relu')(lay3)
        soft_proba = Dense(self.action_size, activation="softmax", kernel_initializer='uniform')(lay3)
        self.model_policy = Model(inputs=[input_states], outputs=[soft_proba])
        self.model_policy.compile(loss='categorical_crossentropy', optimizer=Adam(lr=self.lr_))
        
        print("Successfully constructed networks.") 
Example #13
Source File: vgg.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def build_model(self):
        """Model builder uses a helper function
            make_layers to read the config dict and
            create a VGG network model
        """
        inputs = Input(shape=self.input_shape, name='x')
        x = VGG.make_layers(self.cfg, inputs)
        self._model = Model(inputs, x, name='VGG') 
Example #14
Source File: autoqkeras_test.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def dense_model():
  """Creates test dense model."""

  x = x_in = Input((4,), name="input")
  x = Dense(20, name="dense_0")(x)
  x = BatchNormalization(name="bn0")(x)
  x = Dropout(0.1, name="dp0")(x)
  x = Activation("relu", name="relu_0")(x)
  x = Dense(3, name="dense")(x)
  x = Activation("softmax", name="softmax")(x)

  model = Model(inputs=x_in, outputs=x)
  return model 
Example #15
Source File: vgg.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def __init__(self, cfg, input_shape=(24, 24, 1)):
        """VGG network model creator to be used as backbone
            feature extractor

        Arguments:
            cfg (dict): Summarizes the network configuration
            input_shape (list): Input image dims
        """
        self.cfg = cfg
        self.input_shape = input_shape
        self._model = None
        self.build_model() 
Example #16
Source File: resnet.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def build_resnet(input_shape,
                 n_layers=4,
                 version=2,
                 n=6):
    """Build a resnet as backbone of SSD

    # Arguments:
        input_shape (list): Input image size and channels
        n_layers (int): Number of feature layers for SSD
        version (int): Supports ResNetv1 and v2 but v2 by default
        n (int): Determines number of ResNet layers
                 (Default is ResNet50)

    # Returns
        model (Keras Model)

    """
    # computed depth from supplied model parameter n
    if version == 1:
        depth = n * 6 + 2
    elif version == 2:
        depth = n * 9 + 2

    # model name, depth and version
    # input_shape (h, w, 3)
    if version==1:
        model = resnet_v1(input_shape=input_shape,
                          depth=depth,
                          n_layers=n_layers)
    else:
        model = resnet_v2(input_shape=input_shape,
                          depth=depth,
                          n_layers=n_layers)
    return model 
Example #17
Source File: dcgan-mnist-4.2.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def build_discriminator(inputs):
    """Build a Discriminator Model

    Stack of LeakyReLU-Conv2D to discriminate real from fake.
    The network does not converge with BN so it is not used here
    unlike in [1] or original paper.

    Arguments:
        inputs (Layer): Input layer of the discriminator (the image)

    Returns:
        discriminator (Model): Discriminator Model
    """
    kernel_size = 5
    layer_filters = [32, 64, 128, 256]

    x = inputs
    for filters in layer_filters:
        # first 3 convolution layers use strides = 2
        # last one uses strides = 1
        if filters == layer_filters[-1]:
            strides = 1
        else:
            strides = 2
        x = LeakyReLU(alpha=0.2)(x)
        x = Conv2D(filters=filters,
                   kernel_size=kernel_size,
                   strides=strides,
                   padding='same')(x)

    x = Flatten()(x)
    x = Dense(1)(x)
    x = Activation('sigmoid')(x)
    discriminator = Model(inputs, x, name='discriminator')
    return discriminator 
Example #18
Source File: test_preprocess.py    From alibi-detect with Apache License 2.0 5 votes vote down vote up
def model2():
    x_in = Input(shape=shape)
    x = Dense(n_hidden)(x_in)
    x_out = Dense(n_classes, activation='softmax')(x)
    return tf.keras.models.Model(inputs=x_in, outputs=x_out) 
Example #19
Source File: fcnet_pretraining.py    From deepchem with MIT License 5 votes vote down vote up
def _build_graph(self):
    inputs = Input(dtype=tf.float32, shape=(self.feature_dim,), name="Input")
    out1 = Dense(units=self.hidden_layer_size, activation='relu')(inputs)

    final = Dense(units=self.n_tasks, activation='sigmoid')(out1)
    outputs = [final]
    output_types = ['prediction']
    loss = dc.models.losses.BinaryCrossEntropy()

    model = tf.keras.Model(inputs=[inputs], outputs=outputs)
    return model, loss, output_types 
Example #20
Source File: print_qstats_test.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def create_network():
  xi = Input((28, 28, 1))
  x = Conv2D(32, (3, 3))(xi)
  x = Activation("relu")(x)
  x = Conv2D(32, (3, 3), activation="relu")(x)
  x = Activation("softmax")(x)
  return Model(inputs=xi, outputs=x) 
Example #21
Source File: dmnist.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def QDenseModel(weights_f, load_weights=False):
  """Construct QDenseModel."""

  x = x_in = Input((28*28,), name="input")
  x = QActivation("quantized_relu(2)", name="act_i")(x)

  x = Dense(300, name="d0")(x)
  x = BatchNormalization(name="bn0")(x)

  x = QActivation("quantized_relu(2)", name="act0_m")(x)

  x = Dense(100, name="d1")(x)
  x = BatchNormalization(name="bn0")(x)

  x = QActivation("quantized_relu(2)", name="act0_m")(x)

  x = Flatten(name="flatten")(x)

  x = QDense(
      NB_CLASSES,
      kernel_quantizer=quantized_bits(4, 0, 1),
      bias_quantizer=quantized_bits(4, 0, 1),
      name="dense2")(x)
  x = Activation("softmax", name="softmax")(x)

  model = Model(inputs=[x_in], outputs=[x])
  model.summary()
  model.compile(loss="categorical_crossentropy",
                optimizer=OPTIMIZER, metrics=["accuracy"])

  if load_weights and weights_f:
    model.load_weights(weights_f)

  return model 
Example #22
Source File: vgg.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def make_layers(cfg,
                    inputs, 
                    batch_norm=True, 
                    in_channels=1):
        """Helper function to ease the creation of VGG
            network model

        Arguments:
            cfg (dict): Summarizes the network layer 
                configuration
            inputs (tensor): Input from previous layer
            batch_norm (Bool): Whether to use batch norm
                between Conv2D and ReLU
            in_channel (int): Number of input channels
        """
        x = inputs
        for layer in cfg:
            if layer == 'M':
                x = MaxPooling2D()(x)
            elif layer == 'A':
                x = AveragePooling2D(pool_size=3)(x)
            else:
                x = Conv2D(layer,
                           kernel_size=3,
                           padding='same',
                           kernel_initializer='he_normal'
                           )(x)
                if batch_norm:
                    x = BatchNormalization()(x)
                x = Activation('relu')(x)
    
        return x 
Example #23
Source File: test_mmd.py    From alibi-detect with Apache License 2.0 5 votes vote down vote up
def mymodel(shape):
    x_in = Input(shape=shape)
    x = Dense(n_hidden)(x_in)
    x_out = Dense(n_classes, activation='softmax')(x)
    return tf.keras.models.Model(inputs=x_in, outputs=x_out) 
Example #24
Source File: example_qdense.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def QDenseModel(weights_f, load_weights=False):
  """Construct QDenseModel."""

  x = x_in = Input((RESHAPED,), name="input")
  x = QActivation("quantized_relu(4)", name="act_i")(x)
  x = QDense(N_HIDDEN, kernel_quantizer=ternary(),
             bias_quantizer=quantized_bits(4, 0, 1), name="dense0")(x)
  x = QActivation("quantized_relu(2)", name="act0")(x)
  x = QDense(
      NB_CLASSES,
      kernel_quantizer=quantized_bits(4, 0, 1),
      bias_quantizer=quantized_bits(4, 0, 1),
      name="dense2")(
          x)
  x = Activation("softmax", name="softmax")(x)

  model = Model(inputs=[x_in], outputs=[x])
  model.summary()
  model.compile(loss="categorical_crossentropy",
                optimizer=OPTIMIZER, metrics=["accuracy"])

  if load_weights and weights_f:
    model.load_weights(weights_f)

  print_qstats(model)
  return model 
Example #25
Source File: example_mnist_prune.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def build_model(input_shape):
    x = x_in = Input(shape=input_shape, name="input")
    x = QConv2D(
        32, (2, 2), strides=(2,2),
        kernel_quantizer=quantized_bits(4,0,1),
        bias_quantizer=quantized_bits(4,0,1),
        name="conv2d_0_m")(x)
    x = QActivation("quantized_relu(4,0)", name="act0_m")(x)
    x = QConv2D(
        64, (3, 3), strides=(2,2),
        kernel_quantizer=quantized_bits(4,0,1),
        bias_quantizer=quantized_bits(4,0,1),
        name="conv2d_1_m")(x)
    x = QActivation("quantized_relu(4,0)", name="act1_m")(x)
    x = QConv2D(
        64, (2, 2), strides=(2,2),
        kernel_quantizer=quantized_bits(4,0,1),
        bias_quantizer=quantized_bits(4,0,1),
        name="conv2d_2_m")(x)
    x = QActivation("quantized_relu(4,0)", name="act2_m")(x)
    x = Flatten()(x)
    x = QDense(num_classes, kernel_quantizer=quantized_bits(4,0,1),
               bias_quantizer=quantized_bits(4,0,1),
               name="dense")(x)
    x = Activation("softmax", name="softmax")(x)

    model = Model(inputs=[x_in], outputs=[x])
    return model 
Example #26
Source File: ml_agent.py    From Grid2Op with Mozilla Public License 2.0 5 votes vote down vote up
def _build_model_value(self):
        input_states = Input(shape=(self.observation_size,))
        lay1 = Dense(self.observation_size)(input_states)
        lay1 = Activation('relu')(lay1)

        lay3 = Dense(2 * self.action_size)(lay1)
        lay3 = Activation('relu')(lay3)
        advantage = Dense(self.action_size, activation='relu')(lay3)
        state_value = Dense(1, activation='linear')(advantage)
        model = Model(inputs=[input_states], outputs=[state_value])
        model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
        return model 
Example #27
Source File: ml_agent.py    From Grid2Op with Mozilla Public License 2.0 5 votes vote down vote up
def construct_q_network(self):
        # Uses the network architecture found in DeepMind paper
        # The inputs and outputs size have changed, as well as replacing the convolution by dense layers.
        self.model = Sequential()
        
        input_layer = Input(shape=(self.observation_size*self.training_param.NUM_FRAMES,))
        lay1 = Dense(self.observation_size*self.training_param.NUM_FRAMES)(input_layer)
        lay1 = Activation('relu')(lay1)
        
        lay2 = Dense(self.observation_size)(lay1)
        lay2 = Activation('relu')(lay2)
        
        lay3 = Dense(2*self.action_size)(lay2)
        lay3 = Activation('relu')(lay3)
        
        fc1 = Dense(self.action_size)(lay3)
        advantage = Dense(self.action_size)(fc1)
        fc2 = Dense(self.action_size)(lay3)
        value = Dense(1)(fc2)
        
        meaner = Lambda(lambda x: K.mean(x, axis=1) )
        mn_ = meaner(advantage)  
        tmp = subtract([advantage, mn_])
        policy = add([tmp, value])

        self.model = Model(inputs=[input_layer], outputs=[policy])
        self.model.compile(loss='mse', optimizer=Adam(lr=self.lr_))

        self.target_model = Model(inputs=[input_layer], outputs=[policy])
        self.target_model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
        print("Successfully constructed networks.")


# This class implements the "Sof Actor Critic" model.
# It is a custom implementation, courtesy to Clement Goubet
# The original paper is: https://arxiv.org/abs/1801.01290 
Example #28
Source File: prelim.py    From DeepXi with Mozilla Public License 2.0 5 votes vote down vote up
def __init__(
		self,
		n_feat,
		network
		):
		self.n_feat = n_feat
		self.n_outp = self.n_feat
		if self.n_feat < 5: raise ValueError('More input features are required for this exampple.')
		self.inp = Input(name='inp', shape=[None, self.n_feat], dtype='float32')
		self.mask = tf.keras.layers.Masking(mask_value=0.0)(self.inp)
		if network == 'ResNet': self.network = ResNet(self.mask, self.n_outp, B=40, d_model=256, d_f=64, k=3, max_d_rate=16)
		elif network == 'ResLSTM': self.network = ResLSTM(self.mask, self.n_outp, n_blocks=3, d_model=256)
		else: raise ValueError('Invalid network type.')
		self.model = Model(inputs=self.inp, outputs=self.network.outp)
		self.model.summary() 
Example #29
Source File: models.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def DenseLayerNet(inshape, layer_sizes, nb_labels=2, activation='relu', final_activation='softmax', dropout=None, batch_norm=None):
    """
    A densenet that connects a set of dense layers to  a classification
    output. 
    if nb_labels is 0 assume it is a regression net and use linear activation
    (if None specified)
    """
    inputs = KL.Input(shape=inshape, name='input')
    prev_layer = KL.Flatten(name='flat_inputs')(inputs)
    # to prevent overfitting include some kernel and bias regularization
    kreg = keras.regularizers.l1_l2(l1=1e-5, l2=1e-4)
    breg = keras.regularizers.l2(1e-4)

    # connect the list of dense layers to each other
    for lno, layer_size in enumerate(layer_sizes):
        prev_layer = KL.Dense(layer_size, name='dense%d' % lno, activation=activation,kernel_regularizer=kreg, bias_regularizer=breg)(prev_layer)
        if dropout is not None:
            prev_layer = KL.Dropout(dropout, name='dropout%d'%lno)(prev_layer)
        if batch_norm is not None:
            prev_layer = KL.BatchNormalization(name='BatchNorm%d'%lno)(prev_layer)
            
    # tie the previous dense layer to a onehot encoded output layer
    last_layer = KL.Dense(nb_labels, name='last_dense', activation=final_activation)(prev_layer)

    model = keras.models.Model(inputs=inputs, outputs=last_layer)
    return(model)


###############################################################################
# Helper function
############################################################################### 
Example #30
Source File: feature_aggregation_similarity_model.py    From redshells with MIT License 5 votes vote down vote up
def __init__(self,
                 feature_size: int,
                 embedding_size: int,
                 item_size: int,
                 max_feature_index: int,
                 embeddings_initializer=None,
                 bias_embeddings_initializer=None,
                 embeddings_regularizer=None):
        embeddings_initializer = embeddings_initializer or tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.005)
        embeddings_regularizer = embeddings_regularizer or tf.keras.regularizers.l2(0.0001)
        bias_embeddings_initializer = bias_embeddings_initializer or tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.005)
        self.input_x_index = layers.Input(shape=(1, ), name='input_x_index')
        self.input_y_index = layers.Input(shape=(1, ), name='input_y_index')
        self.input_x_feature = layers.Input(shape=(feature_size, ), name='input_x_feature')
        self.input_y_feature = layers.Input(shape=(feature_size, ), name='input_y_feature')

        self.embedding = layers.Embedding(
            max_feature_index + 1,
            embedding_size,
            mask_zero=True,
            embeddings_initializer=embeddings_initializer,
            embeddings_regularizer=embeddings_regularizer,
        )
        self.bias_embedding = tf.keras.layers.Embedding(
            item_size + 1,
            1,
            mask_zero=True,
            embeddings_initializer=bias_embeddings_initializer,
        )

        self.embedding_x = self.average(self.embedding(self.input_x_feature))
        self.embedding_y = self.average(self.embedding(self.input_y_feature))
        self.bias_x = self.average(self.bias_embedding(self.input_x_index))
        self.bias_y = self.average(self.bias_embedding(self.input_y_index))

        self.inner_prod = tf.keras.layers.dot([self.embedding_x, self.embedding_y], axes=1, normalize=True)
        self.similarity = tf.keras.layers.add([self.inner_prod, self.bias_x, self.bias_y])
        self.similarity = self.clip(self.similarity)