Python tensorflow.keras.models.Model() Examples

The following are 30 code examples of tensorflow.keras.models.Model(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.models , or try the search function .
Example #1
Source File: ml_agent.py    From Grid2Op with Mozilla Public License 2.0 6 votes vote down vote up
def construct_q_network(self):
        # construct double Q networks
        self.model_Q = self._build_q_NN()
        self.model_Q2 = self._build_q_NN()

        # state value function approximation
        self.model_value = self._build_model_value()
        self.model_value_target = self._build_model_value()
        self.model_value_target.set_weights(self.model_value.get_weights())

        # policy function approximation
        self.model_policy = Sequential()
        # proba of choosing action a depending on policy pi
        input_states = Input(shape = (self.observation_size,))
        lay1 = Dense(self.observation_size)(input_states)
        lay1 = Activation('relu')(lay1)
        lay2 = Dense(self.observation_size)(lay1)
        lay2 = Activation('relu')(lay2)
        lay3 = Dense(2*self.action_size)(lay2)
        lay3 = Activation('relu')(lay3)
        soft_proba = Dense(self.action_size, activation="softmax", kernel_initializer='uniform')(lay3)
        self.model_policy = Model(inputs=[input_states], outputs=[soft_proba])
        self.model_policy.compile(loss='categorical_crossentropy', optimizer=Adam(lr=self.lr_))
        
        print("Successfully constructed networks.") 
Example #2
Source File: dqn-cartpole-9.6.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def build_model(self, n_inputs, n_outputs):
        """Q Network is 256-256-256 MLP

        Arguments:
            n_inputs (int): input dim
            n_outputs (int): output dim

        Return:
            q_model (Model): DQN
        """
        inputs = Input(shape=(n_inputs, ), name='state')
        x = Dense(256, activation='relu')(inputs)
        x = Dense(256, activation='relu')(x)
        x = Dense(256, activation='relu')(x)
        x = Dense(n_outputs,
                  activation='linear', 
                  name='action')(x)
        q_model = Model(inputs, x)
        q_model.summary()
        return q_model 
Example #3
Source File: test_continuous.py    From keras-rl2 with MIT License 6 votes vote down vote up
def test_ddpg():
    # TODO: replace this with a simpler environment where we can actually test if it finds a solution
    env = gym.make('Pendulum-v0')
    np.random.seed(123)
    env.seed(123)
    random.seed(123)
    nb_actions = env.action_space.shape[0]

    actor = Sequential()
    actor.add(Flatten(input_shape=(1,) + env.observation_space.shape))
    actor.add(Dense(16))
    actor.add(Activation('relu'))
    actor.add(Dense(nb_actions))
    actor.add(Activation('linear'))

    action_input = Input(shape=(nb_actions,), name='action_input')
    observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
    flattened_observation = Flatten()(observation_input)
    x = Concatenate()([action_input, flattened_observation])
    x = Dense(16)(x)
    x = Activation('relu')(x)
    x = Dense(1)(x)
    x = Activation('linear')(x)
    critic = Model(inputs=[action_input, observation_input], outputs=x)
    
    memory = SequentialMemory(limit=1000, window_length=1)
    random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.3)
    agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
                      memory=memory, nb_steps_warmup_critic=50, nb_steps_warmup_actor=50,
                      random_process=random_process, gamma=.99, target_model_update=1e-3)
    agent.compile([Adam(lr=1e-3), Adam(lr=1e-3)])

    agent.fit(env, nb_steps=400, visualize=False, verbose=0, nb_max_episode_steps=100)
    h = agent.test(env, nb_episodes=2, visualize=False, nb_max_episode_steps=100)
    # TODO: evaluate history 
Example #4
Source File: adversarialae.py    From alibi-detect with Apache License 2.0 6 votes vote down vote up
def __init__(self, model: tf.keras.Model, hidden_layer: int, output_dim: int, hidden_dim: int = None) -> None:
        """
        Dense layer that extracts the feature map of a hidden layer in a model and computes
        output probabilities over that layer.

        Parameters
        ----------
        model
            tf.keras classification model.
        hidden_layer
            Hidden layer from model where feature map is extracted from.
        output_dim
            Output dimension for softmax layer.
        hidden_dim
            Dimension of optional additional dense layer.
        """
        super(DenseHidden, self).__init__()
        self.partial_model = Model(inputs=model.inputs, outputs=model.layers[hidden_layer].output)
        for layer in self.partial_model.layers:  # freeze model layers
            layer.trainable = False
        self.hidden_dim = hidden_dim
        if hidden_dim is not None:
            self.dense_layer = Dense(hidden_dim, activation=tf.nn.relu)
        self.output_layer = Dense(output_dim, activation=tf.nn.softmax) 
Example #5
Source File: test_ddpg.py    From keras-rl2 with MIT License 6 votes vote down vote up
def test_single_ddpg_input():
    nb_actions = 2

    actor = Sequential()
    actor.add(Flatten(input_shape=(2, 3)))
    actor.add(Dense(nb_actions))

    action_input = Input(shape=(nb_actions,), name='action_input')
    observation_input = Input(shape=(2, 3), name='observation_input')
    x = Concatenate()([action_input, Flatten()(observation_input)])
    x = Dense(1)(x)
    critic = Model(inputs=[action_input, observation_input], outputs=x)

    memory = SequentialMemory(limit=10, window_length=2)
    agent = DDPGAgent(actor=actor, critic=critic, critic_action_input=action_input, memory=memory,
                      nb_actions=2, nb_steps_warmup_critic=5, nb_steps_warmup_actor=5, batch_size=4)
    agent.compile('sgd')
    agent.fit(MultiInputTestEnv((3,)), nb_steps=10) 
Example #6
Source File: llr.py    From alibi-detect with Apache License 2.0 6 votes vote down vote up
def build_model(dist: Union[Distribution, PixelCNN], input_shape: tuple = None, filepath: str = None) \
        -> Tuple[tf.keras.Model, Union[Distribution, PixelCNN]]:
    """
    Create tf.keras.Model from TF distribution.

    Parameters
    ----------
    dist
        TensorFlow distribution.
    input_shape
        Input shape of the model.

    Returns
    -------
    TensorFlow model.
    """
    x_in = Input(shape=input_shape)
    log_prob = dist.log_prob(x_in)
    model = Model(inputs=x_in, outputs=log_prob)
    model.add_loss(-tf.reduce_mean(log_prob))
    if isinstance(filepath, str):
        model.load_weights(filepath)
    return model, dist 
Example #7
Source File: test_dqn.py    From keras-rl2 with MIT License 6 votes vote down vote up
def test_single_continuous_dqn_input():
    nb_actions = 2

    V_model = Sequential()
    V_model.add(Flatten(input_shape=(2, 3)))
    V_model.add(Dense(1))

    mu_model = Sequential()
    mu_model.add(Flatten(input_shape=(2, 3)))
    mu_model.add(Dense(nb_actions))

    L_input = Input(shape=(2, 3))
    L_input_action = Input(shape=(nb_actions,))
    x = Concatenate()([Flatten()(L_input), L_input_action])
    x = Dense(((nb_actions * nb_actions + nb_actions) // 2))(x)
    L_model = Model(inputs=[L_input_action, L_input], outputs=x)

    memory = SequentialMemory(limit=10, window_length=2)
    agent = NAFAgent(nb_actions=nb_actions, V_model=V_model, L_model=L_model, mu_model=mu_model,
                     memory=memory, nb_steps_warmup=5, batch_size=4)
    agent.compile('sgd')
    agent.fit(MultiInputTestEnv((3,)), nb_steps=10) 
Example #8
Source File: test_forgiving_factor.py    From qkeras with Apache License 2.0 6 votes vote down vote up
def get_model():
  """Returns sample model."""
  xi = Input((28, 28, 1), name="input")   # pylint: disable=undefined-variable
  x = Conv2D(32, 3, strides=1, padding="same", name="c1")(xi)   # pylint: disable=undefined-variable
  x = BatchNormalization(name="b1")(x)   # pylint: disable=undefined-variable
  x = Activation("relu", name="a1")(x)   # pylint: disable=undefined-variable
  x = MaxPooling2D(2, 2, name="mp1")(x)   # pylint: disable=undefined-variable
  x = QConv2D(32, 3, kernel_quantizer="binary", bias_quantizer="binary",   # pylint: disable=undefined-variable
              strides=1, padding="same", name="c2")(x)
  x = QBatchNormalization(name="b2")(x)   # pylint: disable=undefined-variable
  x = QActivation("binary", name="a2")(x)   # pylint: disable=undefined-variable
  x = MaxPooling2D(2, 2, name="mp2")(x)   # pylint: disable=undefined-variable
  x = QConv2D(32, 3, kernel_quantizer="ternary", bias_quantizer="ternary",   # pylint: disable=undefined-variable
              strides=1, padding="same", activation="binary", name="c3")(x)
  x = Flatten(name="flatten")(x)   # pylint: disable=undefined-variable
  x = Dense(1, name="dense", activation="softmax")(x)   # pylint: disable=undefined-variable

  model = Model(inputs=xi, outputs=x)

  return model 
Example #9
Source File: deep_classifier.py    From nlp-journey with Apache License 2.0 6 votes vote down vote up
def build_model(self):
        inputs = Input(shape=(self.max_len,))

        x = Embedding(len(self.embeddings),
                      300,
                      weights=[self.embeddings],
                      trainable=False)(inputs)

        x = Lambda(lambda t: tf.reduce_mean(t, axis=1))(x)
        x = Dense(128, activation='relu')(x)
        x = Dense(64, activation='relu')(x)
        x = Dense(16, activation='relu')(x)
        predictions = Dense(1, activation='sigmoid')(x)
        model = Model(inputs=inputs, outputs=predictions)
        model.compile(optimizer='adam',
                      loss='binary_crossentropy',
                      metrics=['accuracy'])
        model.summary()
        return model 
Example #10
Source File: deep_classifier.py    From nlp-journey with Apache License 2.0 6 votes vote down vote up
def build_model(self):
        # word part
        input_word = Input(shape=(int(self.max_len / 5),))
        x_word = Embedding(len(self.embeddings),
                           300,
                           weights=[self.embeddings],
                           trainable=False)(input_word)
        x_word = Bidirectional(LSTM(128, return_sequences=True))(x_word)
        x_word = VanillaRNNAttention(256)(x_word)
        model_word = Model(input_word, x_word)

        # Sentence part
        inputs = Input(shape=(self.max_len,))  # (5, self.max_len) :(篇章最多包含的句子,每句包含的最大词数)
        reshape = Reshape((5, int(self.max_len / 5)))(inputs)
        x_sentence = TimeDistributed(model_word)(reshape)
        x_sentence = Bidirectional(LSTM(128, return_sequences=True))(x_sentence)
        x_sentence = VanillaRNNAttention(256)(x_sentence)

        output = Dense(1, activation='sigmoid')(x_sentence)
        model = Model(inputs=inputs, outputs=output)
        model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
        return model 
Example #11
Source File: deep_classifier.py    From nlp-journey with Apache License 2.0 6 votes vote down vote up
def build_model(self):
        inputs = Input((self.max_len,))
        embedding = Embedding(len(self.embeddings),
                              300,
                              weights=[self.embeddings],
                              trainable=False)(inputs)
        x_context = Bidirectional(LSTM(128, return_sequences=True))(embedding)
        x = Concatenate()([embedding, x_context])
        cs = []
        for kernel_size in range(1, 5):
            c = Conv1D(128, kernel_size, activation='relu')(x)
            cs.append(c)
        pools = [GlobalAveragePooling1D()(c) for c in cs] + [GlobalMaxPooling1D()(c) for c in cs]
        x = Concatenate()(pools)
        output = Dense(1, activation='sigmoid')(x)
        model = Model(inputs=inputs, outputs=output)
        model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
        return model 
Example #12
Source File: model_triplet.py    From image_search_engine with MIT License 6 votes vote down vote up
def text_model(vocab_size, lr=0.0001):
    input_2 = Input(shape=(None,))

    embed = Embedding(vocab_size, 50, name="embed")
    gru = Bidirectional(GRU(256, return_sequences=True), name="gru_1")
    dense_2 = Dense(vec_dim, activation="linear", name="dense_text_1")

    x2 = embed(input_2)
    x2 = gru(x2)
    x2 = GlobalMaxPool1D()(x2)
    x2 = dense_2(x2)

    _norm = Lambda(lambda x: K.l2_normalize(x, axis=-1))

    x2 = _norm(x2)

    model = Model([input_2], x2)

    model.compile(loss="mae", optimizer=Adam(lr))

    model.summary()

    return model 
Example #13
Source File: densenet121_resisc45.py    From armory with MIT License 6 votes vote down vote up
def make_densenet121_resisc_model(**model_kwargs) -> tf.keras.Model:
    # Load ImageNet pre-trained DenseNet
    model_notop = DenseNet121(
        include_top=False, weights=None, input_shape=(224, 224, 3)
    )

    # Add new layers
    x = GlobalAveragePooling2D()(model_notop.output)
    predictions = Dense(num_classes, activation="softmax")(x)

    # Create graph of new model and freeze pre-trained layers
    new_model = Model(inputs=model_notop.input, outputs=predictions)

    for layer in new_model.layers[:-1]:
        layer.trainable = False
        if "bn" == layer.name[-2:]:  # allow batchnorm layers to be trainable
            layer.trainable = True

    # compile the model
    new_model.compile(
        optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
    )

    return new_model 
Example #14
Source File: ml_agent.py    From Grid2Op with Mozilla Public License 2.0 6 votes vote down vote up
def _build_q_NN(self):
        input_states = Input(shape=(self.observation_size,))
        input_action = Input(shape=(self.action_size,))
        input_layer = Concatenate()([input_states, input_action])
        
        lay1 = Dense(self.observation_size)(input_layer)
        lay1 = Activation('relu')(lay1)
        
        lay2 = Dense(self.observation_size)(lay1)
        lay2 = Activation('relu')(lay2)
        
        lay3 = Dense(2*self.action_size)(lay2)
        lay3 = Activation('relu')(lay3)
        
        advantage = Dense(1, activation = 'linear')(lay3)
        
        model = Model(inputs=[input_states, input_action], outputs=[advantage])
        model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
        
        return model 
Example #15
Source File: ml_agent.py    From Grid2Op with Mozilla Public License 2.0 6 votes vote down vote up
def construct_q_network(self):
        # replacement of the Convolution layers by Dense layers, and change the size of the input space and output space

        # Uses the network architecture found in DeepMind paper
        self.model = Sequential()
        input_layer = Input(shape=(self.observation_size * self.training_param.NUM_FRAMES,))
        layer1 = Dense(self.observation_size * self.training_param.NUM_FRAMES)(input_layer)
        layer1 = Activation('relu')(layer1)
        layer2 = Dense(self.observation_size)(layer1)
        layer2 = Activation('relu')(layer2)
        layer3 = Dense(self.observation_size)(layer2)
        layer3 = Activation('relu')(layer3)
        layer4 = Dense(2 * self.action_size)(layer3)
        layer4 = Activation('relu')(layer4)
        output = Dense(self.action_size)(layer4)

        self.model = Model(inputs=[input_layer], outputs=[output])
        self.model.compile(loss='mse', optimizer=Adam(lr=self.lr_))

        self.target_model = Model(inputs=[input_layer], outputs=[output])
        self.target_model.compile(loss='mse', optimizer=Adam(lr=self.lr_))
        self.target_model.set_weights(self.model.get_weights()) 
Example #16
Source File: networks.py    From rltrader with MIT License 6 votes vote down vote up
def __init__(self, *args, num_steps=1, **kwargs):
        super().__init__(*args, **kwargs)
        with graph.as_default():
            if sess is not None:
                set_session(sess)
            self.num_steps = num_steps
            inp = None
            output = None
            if self.shared_network is None:
                inp = Input((self.num_steps, self.input_dim, 1))
                output = self.get_network_head(inp).output
            else:
                inp = self.shared_network.input
                output = self.shared_network.output
            output = Dense(
                self.output_dim, activation=self.activation,
                kernel_initializer='random_normal')(output)
            self.model = Model(inp, output)
            self.model.compile(
                optimizer=SGD(lr=self.lr), loss=self.loss) 
Example #17
Source File: networks.py    From rltrader with MIT License 6 votes vote down vote up
def get_network_head(inp):
        output = LSTM(256, dropout=0.1, 
            return_sequences=True, stateful=False,
            kernel_initializer='random_normal')(inp)
        output = BatchNormalization()(output)
        output = LSTM(128, dropout=0.1,
            return_sequences=True, stateful=False,
            kernel_initializer='random_normal')(output)
        output = BatchNormalization()(output)
        output = LSTM(64, dropout=0.1,
            return_sequences=True, stateful=False,
            kernel_initializer='random_normal')(output)
        output = BatchNormalization()(output)
        output = LSTM(32, dropout=0.1,
            stateful=False,
            kernel_initializer='random_normal')(output)
        output = BatchNormalization()(output)
        return Model(inp, output) 
Example #18
Source File: networks.py    From rltrader with MIT License 6 votes vote down vote up
def __init__(self, *args, num_steps=1, **kwargs):
        super().__init__(*args, **kwargs)
        with graph.as_default():
            if sess is not None:
                set_session(sess)
            self.num_steps = num_steps
            inp = None
            output = None
            if self.shared_network is None:
                inp = Input((self.num_steps, self.input_dim))
                output = self.get_network_head(inp).output
            else:
                inp = self.shared_network.input
                output = self.shared_network.output
            output = Dense(
                self.output_dim, activation=self.activation, 
                kernel_initializer='random_normal')(output)
            self.model = Model(inp, output)
            self.model.compile(
                optimizer=SGD(lr=self.lr), loss=self.loss) 
Example #19
Source File: networks.py    From rltrader with MIT License 6 votes vote down vote up
def get_network_head(inp):
        output = Dense(256, activation='sigmoid', 
            kernel_initializer='random_normal')(inp)
        output = BatchNormalization()(output)
        output = Dropout(0.1)(output)
        output = Dense(128, activation='sigmoid', 
            kernel_initializer='random_normal')(output)
        output = BatchNormalization()(output)
        output = Dropout(0.1)(output)
        output = Dense(64, activation='sigmoid', 
            kernel_initializer='random_normal')(output)
        output = BatchNormalization()(output)
        output = Dropout(0.1)(output)
        output = Dense(32, activation='sigmoid', 
            kernel_initializer='random_normal')(output)
        output = BatchNormalization()(output)
        output = Dropout(0.1)(output)
        return Model(inp, output) 
Example #20
Source File: networks.py    From rltrader with MIT License 6 votes vote down vote up
def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        with graph.as_default():
            if sess is not None:
                set_session(sess)
            inp = None
            output = None
            if self.shared_network is None:
                inp = Input((self.input_dim,))
                output = self.get_network_head(inp).output
            else:
                inp = self.shared_network.input
                output = self.shared_network.output
            output = Dense(
                self.output_dim, activation=self.activation, 
                kernel_initializer='random_normal')(output)
            self.model = Model(inp, output)
            self.model.compile(
                optimizer=SGD(lr=self.lr), loss=self.loss) 
Example #21
Source File: model_triplet.py    From image_search_engine with MIT License 6 votes vote down vote up
def image_model(lr=0.0001):
    input_1 = Input(shape=(None, None, 3))

    base_model = ResNet50(weights='imagenet', include_top=False)

    x1 = base_model(input_1)
    x1 = GlobalMaxPool2D()(x1)

    dense_1 = Dense(vec_dim, activation="linear", name="dense_image_1")

    x1 = dense_1(x1)

    _norm = Lambda(lambda x: K.l2_normalize(x, axis=-1))

    x1 = _norm(x1)

    model = Model([input_1], x1)

    model.compile(loss="mae", optimizer=Adam(lr))

    model.summary()

    return model 
Example #22
Source File: deep_classifier.py    From nlp-journey with Apache License 2.0 6 votes vote down vote up
def build_model(self):
        inputs = Input(shape=(self.max_len,))
        output = Embedding(len(self.embeddings),
                           300,
                           weights=[self.embeddings],
                           trainable=False)(inputs)
        output = Bidirectional(LSTM(150,
                                    return_sequences=True,
                                    dropout=0.25,
                                    recurrent_dropout=0.25))(output)
        output = VanillaRNNAttention(300)(output)
        output = Dense(128, activation="relu")(output)
        output = Dropout(0.25)(output)
        output = Dense(1, activation="sigmoid")(output)
        model = Model(inputs=inputs, outputs=output)
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        model.summary()
        return model 
Example #23
Source File: dmnist.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def QDenseModel(weights_f, load_weights=False):
  """Construct QDenseModel."""

  x = x_in = Input((28*28,), name="input")
  x = QActivation("quantized_relu(2)", name="act_i")(x)

  x = Dense(300, name="d0")(x)
  x = BatchNormalization(name="bn0")(x)

  x = QActivation("quantized_relu(2)", name="act0_m")(x)

  x = Dense(100, name="d1")(x)
  x = BatchNormalization(name="bn0")(x)

  x = QActivation("quantized_relu(2)", name="act0_m")(x)

  x = Flatten(name="flatten")(x)

  x = QDense(
      NB_CLASSES,
      kernel_quantizer=quantized_bits(4, 0, 1),
      bias_quantizer=quantized_bits(4, 0, 1),
      name="dense2")(x)
  x = Activation("softmax", name="softmax")(x)

  model = Model(inputs=[x_in], outputs=[x])
  model.summary()
  model.compile(loss="categorical_crossentropy",
                optimizer=OPTIMIZER, metrics=["accuracy"])

  if load_weights and weights_f:
    model.load_weights(weights_f)

  return model 
Example #24
Source File: FcIDEC.py    From DEC-DA with MIT License 5 votes vote down vote up
def __init__(self, dims, n_clusters=10, alpha=1.0):
        super(FcIDEC, self).__init__(dims, n_clusters, alpha)
        self.model = Model(inputs=self.autoencoder.input,
                           outputs=[self.model.output, self.autoencoder.output]) 
Example #25
Source File: ConvIDEC.py    From DEC-DA with MIT License 5 votes vote down vote up
def __init__(self,
                 input_shape,
                 filters=[32, 64, 128, 10],
                 n_clusters=10):

        super(ConvIDEC, self).__init__(input_shape, filters, n_clusters)
        self.model = Model(inputs=self.autoencoder.input,
                           outputs=[self.model.output, self.autoencoder.output]) 
Example #26
Source File: ConvDEC.py    From DEC-DA with MIT License 5 votes vote down vote up
def CAE(input_shape=(28, 28, 1), filters=[32, 64, 128, 10]):
    model = Sequential()
    if input_shape[0] % 8 == 0:
        pad3 = 'same'
    else:
        pad3 = 'valid'

    model.add(InputLayer(input_shape))
    model.add(Conv2D(filters[0], 5, strides=2, padding='same', activation='relu', name='conv1'))

    model.add(Conv2D(filters[1], 5, strides=2, padding='same', activation='relu', name='conv2'))

    model.add(Conv2D(filters[2], 3, strides=2, padding=pad3, activation='relu', name='conv3'))

    model.add(Flatten())
    model.add(Dense(units=filters[3], name='embedding'))
    model.add(Dense(units=filters[2]*int(input_shape[0]/8)*int(input_shape[0]/8), activation='relu'))

    model.add(Reshape((int(input_shape[0]/8), int(input_shape[0]/8), filters[2])))
    model.add(Conv2DTranspose(filters[1], 3, strides=2, padding=pad3, activation='relu', name='deconv3'))

    model.add(Conv2DTranspose(filters[0], 5, strides=2, padding='same', activation='relu', name='deconv2'))

    model.add(Conv2DTranspose(input_shape[2], 5, strides=2, padding='same', name='deconv1'))
    encoder = Model(inputs=model.input, outputs=model.get_layer('embedding').output)
    return model, encoder 
Example #27
Source File: ConvDEC.py    From DEC-DA with MIT License 5 votes vote down vote up
def __init__(self,
                 input_shape,
                 filters=[32, 64, 128, 10],
                 n_clusters=10):

        self.n_clusters = n_clusters
        self.input_shape = input_shape
        self.datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, rotation_range=10)
        self.datagenx = ImageDataGenerator()
        self.autoencoder, self.encoder = CAE(input_shape, filters)

        # Define ConvIDEC model
        clustering_layer = ClusteringLayer(self.n_clusters, name='clustering')(self.encoder.output)
        self.model = Model(inputs=self.autoencoder.input,
                           outputs=clustering_layer) 
Example #28
Source File: preprocess.py    From alibi-detect with Apache License 2.0 5 votes vote down vote up
def hidden_output(X: np.ndarray,
                  model: tf.keras.Model = None,
                  layer: int = -1,
                  input_shape: tuple = None,
                  batch_size: int = int(1e10)) -> np.ndarray:
    """
    Return hidden layer output from a model on a batch of instances.

    Parameters
    ----------
    X
        Batch of instances.
    model
        tf.keras.Model.
    layer
        Hidden layer of model to use as output. The default of -1 would refer to the softmax layer.
    input_shape
        Optional input layer shape.
    batch_size
        Batch size used for the model predictions.

    Returns
    -------
    Model predictions using the specified hidden layer as output layer.
    """
    if input_shape and not model.inputs:
        inputs = Input(shape=input_shape)
        model.call(inputs)
    else:
        inputs = model.inputs
    hidden_model = Model(inputs=inputs, outputs=model.layers[layer].output)
    X_hidden = predict_batch(hidden_model, X, batch_size=batch_size)
    return X_hidden 
Example #29
Source File: stackedgan-mnist-6.2.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def build_discriminator(inputs, z_dim=50):
    """Build Discriminator 1 Model

    Classifies feature1 (features) as real/fake image and recovers
    the input noise or latent code (by minimizing entropy loss)

    # Arguments
        inputs (Layer): feature1
        z_dim (int): noise dimensionality

    # Returns
        dis1 (Model): feature1 as real/fake and recovered latent code
    """

    # input is 256-dim feature1
    x = Dense(256, activation='relu')(inputs)
    x = Dense(256, activation='relu')(x)

    # first output is probability that feature1 is real
    f1_source = Dense(1)(x)
    f1_source = Activation('sigmoid',
                           name='feature1_source')(f1_source)

    # z1 reonstruction (Q1 network)
    z1_recon = Dense(z_dim)(x) 
    z1_recon = Activation('tanh', name='z1')(z1_recon)
    
    discriminator_outputs = [f1_source, z1_recon]
    dis1 = Model(inputs, discriminator_outputs, name='dis1')
    return dis1 
Example #30
Source File: FcDEC.py    From DEC-DA with MIT License 5 votes vote down vote up
def autoencoder(dims, act='relu'):
    """
    Fully connected auto-encoder model, symmetric.
    Arguments:
        dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer.
            The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1
        act: activation, not applied to Input, Hidden and Output layers
    return:
        (ae_model, encoder_model), Model of autoencoder and model of encoder
    """
    n_stacks = len(dims) - 1
    init = VarianceScaling(scale=1. / 3., mode='fan_in', distribution='uniform')

    # input
    x = Input(shape=(dims[0],), name='input')
    h = x

    # internal layers in encoder
    for i in range(n_stacks-1):
        h = Dense(dims[i + 1], activation=act, kernel_initializer=init, name='encoder_%d' % i)(h)

    # hidden layer
    h = Dense(dims[-1], kernel_initializer=init, name='encoder_%d' % (n_stacks - 1))(h)  # hidden layer, features are extracted from here

    y = h
    # internal layers in decoder
    for i in range(n_stacks-1, 0, -1):
        y = Dense(dims[i], activation=act, kernel_initializer=init, name='decoder_%d' % i)(y)

    # output
    y = Dense(dims[0], kernel_initializer=init, name='decoder_0')(y)

    return Model(inputs=x, outputs=y, name='AE'), Model(inputs=x, outputs=h, name='encoder')