Python keras.models.Model() Examples

The following are 30 code examples for showing how to use keras.models.Model(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may want to check out the right sidebar which shows the related API usage.

You may also want to check out all available functions/classes of the module keras.models , or try the search function .

Example 1
Project: vergeml   Author: mme   File: imagenet.py    License: MIT License 6 votes vote down vote up
def _save(model, base_model, layers, labels, random_seed, checkpoints_dir):
    from keras.layers import Flatten, Dense
    from keras import Model
    nclasses = len(labels)
    x = Flatten()(base_model.output)
    x = _makenet(x, layers, dropout=None, random_seed=random_seed)
    predictions = Dense(nclasses, activation="softmax", name="predictions")(x)
    model_final = Model(inputs=base_model.input, outputs=predictions)

    for i in range(layers - 1):
        weights = model.get_layer(name='dense_layer_{}'.format(i)).get_weights()
        model_final.get_layer(name='dense_layer_{}'.format(i)).set_weights(weights)

    weights = model.get_layer(name='predictions').get_weights()
    model_final.get_layer(name='predictions').set_weights(weights)

    model_final.save(os.path.join(checkpoints_dir, "model.h5"))
    with open(os.path.join(checkpoints_dir, "labels.txt"), "w") as f:
        f.write("\n".join(labels))
    return model_final 
Example 2
Project: vergeml   Author: mme   File: features.py    License: MIT License 6 votes vote down vote up
def get_custom_architecture(name, trainings_dir, output_layer):
    from keras.models import load_model, Model
    name = name.lstrip("@")
    model = load_model(os.path.join(trainings_dir, name, 'checkpoints', 'model.h5'))
    try:
        if isinstance(output_layer, int):
            layer = model.layers[output_layer]
        else:
            layer = model.get_layer(output_layer)
    except Exception:
        if isinstance(output_layer, int):
            raise VergeMLError(f'output-layer {output_layer} not found - model has only {len(model.layers)} layers.')
        else:
            candidates = list(map(lambda l: l.name, model.layers))
            raise VergeMLError(f'output-layer named {output_layer} not found.',
                               suggestion=did_you_mean(candidates, output_layer))
    model = Model(inputs=model.input, outputs=layer.output)
    return model 
Example 3
Project: Image-Caption-Generator   Author: dabasajay   File: model.py    License: MIT License 6 votes vote down vote up
def RNNModel(vocab_size, max_len, rnnConfig, model_type):
	embedding_size = rnnConfig['embedding_size']
	if model_type == 'inceptionv3':
		# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(2048,))
	elif model_type == 'vgg16':
		# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(4096,))
	image_model_1 = Dropout(rnnConfig['dropout'])(image_input)
	image_model = Dense(embedding_size, activation='relu')(image_model_1)

	caption_input = Input(shape=(max_len,))
	# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
	caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
	caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1)
	caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2)

	# Merging the models and creating a softmax classifier
	final_model_1 = concatenate([image_model, caption_model])
	final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1)
	final_model = Dense(vocab_size, activation='softmax')(final_model_2)

	model = Model(inputs=[image_input, caption_input], outputs=final_model)
	model.compile(loss='categorical_crossentropy', optimizer='adam')
	return model 
Example 4
Project: keras-anomaly-detection   Author: chen0040   File: feedforward.py    License: MIT License 6 votes vote down vote up
def create_model(self, input_dim):
        encoding_dim = 14
        input_layer = Input(shape=(input_dim,))

        encoder = Dense(encoding_dim, activation="tanh",
                        activity_regularizer=regularizers.l1(10e-5))(input_layer)
        encoder = Dense(encoding_dim // 2, activation="relu")(encoder)

        decoder = Dense(encoding_dim // 2, activation='tanh')(encoder)
        decoder = Dense(input_dim, activation='relu')(decoder)

        model = Model(inputs=input_layer, outputs=decoder)
        model.compile(optimizer='adam',
                      loss='mean_squared_error',
                      metrics=['accuracy'])

        return model 
Example 5
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    License: Apache License 2.0 6 votes vote down vote up
def weather_l2(hidden_nums=100,l2=0.01): 
    input_img = Input(shape=(37,))
    hn = Dense(hidden_nums, activation='relu')(input_img)
    hn = Dense(hidden_nums, activation='relu',
               kernel_regularizer=regularizers.l2(l2))(hn)
    out_u = Dense(37, activation='sigmoid',                 
                  name='ae_part')(hn)
    out_sig = Dense(37, activation='linear', 
                    name='pred_part')(hn)
    out_both = concatenate([out_u, out_sig], axis=1, name = 'concatenate')

    #weather_model = Model(input_img, outputs=[out_ae, out_pred])
    mve_model = Model(input_img, outputs=[out_both])
    mve_model.compile(optimizer='adam', loss=mve_loss, loss_weights=[1.])
    
    return mve_model 
Example 6
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    License: Apache License 2.0 6 votes vote down vote up
def CausalCNN(n_filters, lr, decay, loss, 
               seq_len, input_features, 
               strides_len, kernel_size,
               dilation_rates):

    inputs = Input(shape=(seq_len, input_features), name='input_layer')   
    x=inputs
    for dilation_rate in dilation_rates:
        x = Conv1D(filters=n_filters,
               kernel_size=kernel_size, 
               padding='causal',
               dilation_rate=dilation_rate,
               activation='linear')(x) 
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    #x = Dense(7, activation='relu', name='dense_layer')(x)
    outputs = Dense(3, activation='sigmoid', name='output_layer')(x)
    causalcnn = Model(inputs, outputs=[outputs])

    return causalcnn 
Example 7
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    License: Apache License 2.0 6 votes vote down vote up
def weather_ae(layers, lr, decay, loss, 
               input_len, input_features):
    
    inputs = Input(shape=(input_len, input_features), name='input_layer')
    
    for i, hidden_nums in enumerate(layers):
        if i==0:
            hn = Dense(hidden_nums, activation='relu')(inputs)
        else:
            hn = Dense(hidden_nums, activation='relu')(hn)

    outputs = Dense(3, activation='sigmoid', name='output_layer')(hn)

    weather_model = Model(inputs, outputs=[outputs])

    return weather_model 
Example 8
Project: cnn-levelset   Author: wiseodd   File: localizer.py    License: MIT License 6 votes vote down vote up
def __init__(self, model_path=None):
        if model_path is not None:
            self.model = self.load_model(model_path)
        else:
            # VGG16 last conv features
            inputs = Input(shape=(7, 7, 512))
            x = Convolution2D(128, 1, 1)(inputs)
            x = Flatten()(x)

            # Cls head
            h_cls = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
            h_cls = Dropout(p=0.5)(h_cls)
            cls_head = Dense(20, activation='softmax', name='cls')(h_cls)

            # Reg head
            h_reg = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
            h_reg = Dropout(p=0.5)(h_reg)
            reg_head = Dense(4, activation='linear', name='reg')(h_reg)

            # Joint model
            self.model = Model(input=inputs, output=[cls_head, reg_head]) 
Example 9
Project: Keras-GAN   Author: eriklindernoren   File: sgan.py    License: MIT License 6 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(1, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        img = model(noise)

        return Model(noise, img) 
Example 10
Project: Keras-GAN   Author: eriklindernoren   File: discogan.py    License: MIT License 6 votes vote down vote up
def build_discriminator(self):

        def d_layer(layer_input, filters, f_size=4, normalization=True):
            """Discriminator layer"""
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if normalization:
                d = InstanceNormalization()(d)
            return d

        img = Input(shape=self.img_shape)

        d1 = d_layer(img, self.df, normalization=False)
        d2 = d_layer(d1, self.df*2)
        d3 = d_layer(d2, self.df*4)
        d4 = d_layer(d3, self.df*8)

        validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)

        return Model(img, validity) 
Example 11
Project: Keras-GAN   Author: eriklindernoren   File: ccgan.py    License: MIT License 6 votes vote down vote up
def build_discriminator(self):

        img = Input(shape=self.img_shape)

        model = Sequential()
        model.add(Conv2D(64, kernel_size=4, strides=2, padding='same', input_shape=self.img_shape))
        model.add(LeakyReLU(alpha=0.8))
        model.add(Conv2D(128, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        model.add(Conv2D(256, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())

        model.summary()

        img = Input(shape=self.img_shape)
        features = model(img)

        validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(features)

        label = Flatten()(features)
        label = Dense(self.num_classes+1, activation="softmax")(label)

        return Model(img, [validity, label]) 
Example 12
Project: Keras-GAN   Author: eriklindernoren   File: bigan.py    License: MIT License 6 votes vote down vote up
def build_encoder(self):
        model = Sequential()

        model.add(Flatten(input_shape=self.img_shape))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(self.latent_dim))

        model.summary()

        img = Input(shape=self.img_shape)
        z = model(img)

        return Model(img, z) 
Example 13
Project: Keras-GAN   Author: eriklindernoren   File: bigan.py    License: MIT License 6 votes vote down vote up
def build_generator(self):
        model = Sequential()

        model.add(Dense(512, input_dim=self.latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(np.prod(self.img_shape), activation='tanh'))
        model.add(Reshape(self.img_shape))

        model.summary()

        z = Input(shape=(self.latent_dim,))
        gen_img = model(z)

        return Model(z, gen_img) 
Example 14
Project: Keras-GAN   Author: eriklindernoren   File: bigan.py    License: MIT License 6 votes vote down vote up
def build_discriminator(self):

        z = Input(shape=(self.latent_dim, ))
        img = Input(shape=self.img_shape)
        d_in = concatenate([z, Flatten()(img)])

        model = Dense(1024)(d_in)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.5)(model)
        model = Dense(1024)(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.5)(model)
        model = Dense(1024)(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = Dropout(0.5)(model)
        validity = Dense(1, activation="sigmoid")(model)

        return Model([z, img], validity) 
Example 15
Project: Keras-GAN   Author: eriklindernoren   File: srgan.py    License: MIT License 6 votes vote down vote up
def build_vgg(self):
        """
        Builds a pre-trained VGG19 model that outputs image features extracted at the
        third block of the model
        """
        vgg = VGG19(weights="imagenet")
        # Set outputs to outputs of last conv. layer in block 3
        # See architecture at: https://github.com/keras-team/keras/blob/master/keras/applications/vgg19.py
        vgg.outputs = [vgg.layers[9].output]

        img = Input(shape=self.hr_shape)

        # Extract image features
        img_features = vgg(img)

        return Model(img, img_features) 
Example 16
Project: Keras-GAN   Author: eriklindernoren   File: pixelda.py    License: MIT License 6 votes vote down vote up
def build_classifier(self):

        def clf_layer(layer_input, filters, f_size=4, normalization=True):
            """Classifier layer"""
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if normalization:
                d = InstanceNormalization()(d)
            return d

        img = Input(shape=self.img_shape)

        c1 = clf_layer(img, self.cf, normalization=False)
        c2 = clf_layer(c1, self.cf*2)
        c3 = clf_layer(c2, self.cf*4)
        c4 = clf_layer(c3, self.cf*8)
        c5 = clf_layer(c4, self.cf*8)

        class_pred = Dense(self.num_classes, activation='softmax')(Flatten()(c5))

        return Model(img, class_pred) 
Example 17
Project: Keras-GAN   Author: eriklindernoren   File: infogan.py    License: MIT License 6 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
        model.add(Activation("tanh"))

        gen_input = Input(shape=(self.latent_dim,))
        img = model(gen_input)

        model.summary()

        return Model(gen_input, img) 
Example 18
Project: Keras-GAN   Author: eriklindernoren   File: wgan_gp.py    License: MIT License 6 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=4, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=4, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(self.channels, kernel_size=4, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        img = model(noise)

        return Model(noise, img) 
Example 19
Project: Keras-GAN   Author: eriklindernoren   File: cyclegan.py    License: MIT License 6 votes vote down vote up
def build_discriminator(self):

        def d_layer(layer_input, filters, f_size=4, normalization=True):
            """Discriminator layer"""
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if normalization:
                d = InstanceNormalization()(d)
            return d

        img = Input(shape=self.img_shape)

        d1 = d_layer(img, self.df, normalization=False)
        d2 = d_layer(d1, self.df*2)
        d3 = d_layer(d2, self.df*4)
        d4 = d_layer(d3, self.df*8)

        validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)

        return Model(img, validity) 
Example 20
Project: Keras-GAN   Author: eriklindernoren   File: pix2pix.py    License: MIT License 6 votes vote down vote up
def build_discriminator(self):

        def d_layer(layer_input, filters, f_size=4, bn=True):
            """Discriminator layer"""
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if bn:
                d = BatchNormalization(momentum=0.8)(d)
            return d

        img_A = Input(shape=self.img_shape)
        img_B = Input(shape=self.img_shape)

        # Concatenate image and conditioning image by channels to produce input
        combined_imgs = Concatenate(axis=-1)([img_A, img_B])

        d1 = d_layer(combined_imgs, self.df, bn=False)
        d2 = d_layer(d1, self.df*2)
        d3 = d_layer(d2, self.df*4)
        d4 = d_layer(d3, self.df*8)

        validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)

        return Model([img_A, img_B], validity) 
Example 21
Project: Keras-GAN   Author: eriklindernoren   File: lsgan.py    License: MIT License 6 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(256, input_dim=self.latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(1024))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(np.prod(self.img_shape), activation='tanh'))
        model.add(Reshape(self.img_shape))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        img = model(noise)

        return Model(noise, img) 
Example 22
Project: Keras-GAN   Author: eriklindernoren   File: lsgan.py    License: MIT License 6 votes vote down vote up
def build_discriminator(self):

        model = Sequential()

        model.add(Flatten(input_shape=self.img_shape))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))
        # (!!!) No softmax
        model.add(Dense(1))
        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity) 
Example 23
Project: Keras-GAN   Author: eriklindernoren   File: cogan.py    License: MIT License 6 votes vote down vote up
def build_discriminators(self):

        img1 = Input(shape=self.img_shape)
        img2 = Input(shape=self.img_shape)

        # Shared discriminator layers
        model = Sequential()
        model.add(Flatten(input_shape=self.img_shape))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))

        img1_embedding = model(img1)
        img2_embedding = model(img2)

        # Discriminator 1
        validity1 = Dense(1, activation='sigmoid')(img1_embedding)
        # Discriminator 2
        validity2 = Dense(1, activation='sigmoid')(img2_embedding)

        return Model(img1, validity1), Model(img2, validity2) 
Example 24
Project: Keras-GAN   Author: eriklindernoren   File: dualgan.py    License: MIT License 6 votes vote down vote up
def build_generator(self):

        X = Input(shape=(self.img_dim,))

        model = Sequential()
        model.add(Dense(256, input_dim=self.img_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dropout(0.4))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dropout(0.4))
        model.add(Dense(1024))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dropout(0.4))
        model.add(Dense(self.img_dim, activation='tanh'))

        X_translated = model(X)

        return Model(X, X_translated) 
Example 25
Project: Keras-GAN   Author: eriklindernoren   File: dcgan.py    License: MIT License 6 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        img = model(noise)

        return Model(noise, img) 
Example 26
Project: Keras-GAN   Author: eriklindernoren   File: gan.py    License: MIT License 6 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(256, input_dim=self.latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(1024))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(np.prod(self.img_shape), activation='tanh'))
        model.add(Reshape(self.img_shape))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        img = model(noise)

        return Model(noise, img) 
Example 27
Project: Keras-GAN   Author: eriklindernoren   File: gan.py    License: MIT License 6 votes vote down vote up
def build_discriminator(self):

        model = Sequential()

        model.add(Flatten(input_shape=self.img_shape))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(256))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(1, activation='sigmoid'))
        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity) 
Example 28
Project: Keras-GAN   Author: eriklindernoren   File: aae.py    License: MIT License 6 votes vote down vote up
def build_encoder(self):
        # Encoder

        img = Input(shape=self.img_shape)

        h = Flatten()(img)
        h = Dense(512)(h)
        h = LeakyReLU(alpha=0.2)(h)
        h = Dense(512)(h)
        h = LeakyReLU(alpha=0.2)(h)
        mu = Dense(self.latent_dim)(h)
        log_var = Dense(self.latent_dim)(h)
        latent_repr = merge([mu, log_var],
                mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2),
                output_shape=lambda p: p[0])

        return Model(img, latent_repr) 
Example 29
Project: Keras-GAN   Author: eriklindernoren   File: aae.py    License: MIT License 6 votes vote down vote up
def build_decoder(self):

        model = Sequential()

        model.add(Dense(512, input_dim=self.latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dense(np.prod(self.img_shape), activation='tanh'))
        model.add(Reshape(self.img_shape))

        model.summary()

        z = Input(shape=(self.latent_dim,))
        img = model(z)

        return Model(z, img) 
Example 30
Project: Keras-GAN   Author: eriklindernoren   File: bgan.py    License: MIT License 6 votes vote down vote up
def build_generator(self):

        model = Sequential()

        model.add(Dense(256, input_dim=self.latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(1024))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(np.prod(self.img_shape), activation='tanh'))
        model.add(Reshape(self.img_shape))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
        img = model(noise)

        return Model(noise, img)