Python keras.applications.VGG19 Examples

The following are 16 code examples of keras.applications.VGG19(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.applications , or try the search function .
Example #1
Source Project: Keras-GAN   Author: eriklindernoren   File: srgan.py    License: MIT License 6 votes vote down vote up
def build_vgg(self):
        """
        Builds a pre-trained VGG19 model that outputs image features extracted at the
        third block of the model
        """
        vgg = VGG19(weights="imagenet")
        # Set outputs to outputs of last conv. layer in block 3
        # See architecture at: https://github.com/keras-team/keras/blob/master/keras/applications/vgg19.py
        vgg.outputs = [vgg.layers[9].output]

        img = Input(shape=self.hr_shape)

        # Extract image features
        img_features = vgg(img)

        return Model(img, img_features) 
Example #2
Source Project: keras-onnx   Author: onnx   File: test_srgan.py    License: MIT License 6 votes vote down vote up
def build_vgg(self):
        """
        Builds a pre-trained VGG19 model that outputs image features extracted at the
        third block of the model
        """
        vgg = VGG19(weights="imagenet")
        # Set outputs to outputs of last conv. layer in block 3
        # See architecture at: https://github.com/keras-team/keras/blob/master/keras/applications/vgg19.py
        vgg.outputs = [vgg.layers[9].output]

        img = Input(shape=self.hr_shape)

        # Extract image features
        img_features = vgg(img)

        return Model(img, img_features) 
Example #3
Source Project: Generative-Adversarial-Networks-Projects   Author: PacktPublishing   File: run.py    License: MIT License 6 votes vote down vote up
def build_vgg():
    """
    Build VGG network to extract image features
    """
    input_shape = (256, 256, 3)

    # Load a pre-trained VGG19 model trained on 'Imagenet' dataset
    vgg = VGG19(weights="imagenet")
    vgg.outputs = [vgg.layers[9].output]

    input_layer = Input(shape=input_shape)

    # Extract features
    features = vgg(input_layer)

    # Create a Keras model
    model = Model(inputs=[input_layer], outputs=[features])
    return model 
Example #4
Source Project: DeepFashion   Author: abhishekrana   File: cnn.py    License: Apache License 2.0 5 votes vote down vote up
def model_vgg_create(input_shape, num_classes):

    logging.debug('input_shape {}'.format(input_shape))
    #model = applications.VGG19(weights = "imagenet", include_top=False, input_shape = (256, 256, 3))
    model = applications.VGG19(weights = "imagenet", include_top=False, input_shape = (input_shape))        #  input_shape (128, 128, 1)
                                                                                                            #  input_shape (128, 128, 3)

    # Freeze the layers which you don't want to train. Freezing the first 5 layers.
    for layer in model.layers[:5]:
        layer.trainable = False

    # Adding custom Layers
    x = model.output
    x = Flatten()(x)
    x = Dense(1024, activation="relu")(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, activation="relu")(x)
    predictions = Dense(num_classes, activation="softmax")(x)

    # Creating the final model
    model_final = Model(inputs = model.input, outputs = predictions)

    # Compile the model
    # opt = RMSprop(lr=0.0001, decay=1e-6)
    opt = SGD(lr=0.0001, momentum=0.9)
    model_final.compile(loss = "categorical_crossentropy", optimizer = opt, metrics=["accuracy"])

    return model_final 
Example #5
Source Project: show-attend-and-tell-keras   Author: zimmerrol   File: generate_features.py    License: MIT License 5 votes vote down vote up
def setup_model(encoder, layer_name):
    image_input = Input(shape=(224, 224, 3))

    base_model = None
    if encoder == 'vgg16':
        base_model = VGG16(include_top=False, weights='imagenet', input_tensor=image_input, input_shape=(224, 224, 3))
    elif encoder == 'vgg19':
        base_model = VGG19(include_top=False, weights='imagenet', input_tensor=image_input, input_shape=(224, 224, 3))
    else:
        raise ValueError("not implemented encoder type")

    model = Model(inputs=base_model.input, outputs=base_model.get_layer(layer_name).output)
    return model 
Example #6
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: applications_test.py    License: MIT License 5 votes vote down vote up
def test_vgg():
    app = random.choice([applications.VGG16, applications.VGG19])
    last_dim = 512
    _test_application_basic(app)
    _test_application_notop(app, last_dim)
    _test_application_variable_input_channels(app, last_dim)
    _test_app_pooling(app, last_dim) 
Example #7
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: applications_test.py    License: MIT License 5 votes vote down vote up
def test_vgg():
    app = random.choice([applications.VGG16, applications.VGG19])
    last_dim = 512
    _test_application_basic(app)
    _test_application_notop(app, last_dim)
    _test_application_variable_input_channels(app, last_dim)
    _test_app_pooling(app, last_dim) 
Example #8
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: applications_test.py    License: MIT License 5 votes vote down vote up
def test_vgg():
    app = random.choice([applications.VGG16, applications.VGG19])
    last_dim = 512
    _test_application_basic(app)
    _test_application_notop(app, last_dim)
    _test_application_variable_input_channels(app, last_dim)
    _test_app_pooling(app, last_dim) 
Example #9
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: applications_test.py    License: MIT License 5 votes vote down vote up
def test_vgg():
    app = random.choice([applications.VGG16, applications.VGG19])
    last_dim = 512
    _test_application_basic(app)
    _test_application_notop(app, last_dim)
    _test_application_variable_input_channels(app, last_dim)
    _test_app_pooling(app, last_dim) 
Example #10
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: applications_test.py    License: MIT License 5 votes vote down vote up
def test_vgg():
    app = random.choice([applications.VGG16, applications.VGG19])
    last_dim = 512
    _test_application_basic(app)
    _test_application_notop(app, last_dim)
    _test_application_variable_input_channels(app, last_dim)
    _test_app_pooling(app, last_dim) 
Example #11
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: applications_test.py    License: MIT License 5 votes vote down vote up
def test_vgg():
    app = random.choice([applications.VGG16, applications.VGG19])
    last_dim = 512
    _test_application_basic(app)
    _test_application_notop(app, last_dim)
    _test_application_variable_input_channels(app, last_dim)
    _test_app_pooling(app, last_dim) 
Example #12
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: applications_test.py    License: MIT License 5 votes vote down vote up
def test_vgg():
    app = random.choice([applications.VGG16, applications.VGG19])
    last_dim = 512
    _test_application_basic(app)
    _test_application_notop(app, last_dim)
    _test_application_variable_input_channels(app, last_dim)
    _test_app_pooling(app, last_dim) 
Example #13
Source Project: plaidbench   Author: plaidml   File: vgg19.py    License: Apache License 2.0 5 votes vote down vote up
def build_model():
    import keras.applications as kapp
    from keras.layers import Input
    from keras.backend import floatx
    inputLayer = Input(shape=(224, 224, 3), dtype=floatx())
    return kapp.VGG19(input_tensor=inputLayer) 
Example #14
Source Project: vergeml   Author: mme   File: features.py    License: MIT License 4 votes vote down vote up
def get_imagenet_architecture(architecture, variant, size, alpha, output_layer, include_top=False, weights='imagenet'):
    from keras import applications, Model

    if include_top:
        assert output_layer == 'last'

    if size == 'auto':
        size = get_image_size(architecture, variant, size)

    shape = (size, size, 3)

    if architecture == 'densenet':
        if variant == 'auto':
            variant = 'densenet-121'
        if variant == 'densenet-121':
            model = applications.DenseNet121(weights=weights, include_top=include_top, input_shape=shape)
        elif variant == 'densenet-169':
            model = applications.DenseNet169(weights=weights, include_top=include_top, input_shape=shape)
        elif variant == 'densenet-201':
            model = applications.DenseNet201(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'inception-resnet-v2':
        model = applications.InceptionResNetV2(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'mobilenet':
        model = applications.MobileNet(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha)
    elif architecture == 'mobilenet-v2':
        model = applications.MobileNetV2(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha)
    elif architecture == 'nasnet':
        if variant == 'auto':
            variant = 'large'
        if variant == 'large':
            model = applications.NASNetLarge(weights=weights, include_top=include_top, input_shape=shape)
        else:
            model = applications.NASNetMobile(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'resnet-50':
        model = applications.ResNet50(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'vgg-16':
        model = applications.VGG16(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'vgg-19':
        model = applications.VGG19(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'xception':
        model = applications.Xception(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'inception-v3':
        model = applications.InceptionV3(weights=weights, include_top=include_top, input_shape=shape)

    if output_layer != 'last':
        try:
            if isinstance(output_layer, int):
                layer = model.layers[output_layer]
            else:
                layer = model.get_layer(output_layer)
        except Exception:
            raise VergeMLError('layer not found: {}'.format(output_layer))
        model = Model(inputs=model.input, outputs=layer.output)

    return model 
Example #15
Source Project: Keras-GAN   Author: eriklindernoren   File: srgan.py    License: MIT License 4 votes vote down vote up
def train(self, epochs, batch_size=1, sample_interval=50):

        start_time = datetime.datetime.now()

        for epoch in range(epochs):

            # ----------------------
            #  Train Discriminator
            # ----------------------

            # Sample images and their conditioning counterparts
            imgs_hr, imgs_lr = self.data_loader.load_data(batch_size)

            # From low res. image generate high res. version
            fake_hr = self.generator.predict(imgs_lr)

            valid = np.ones((batch_size,) + self.disc_patch)
            fake = np.zeros((batch_size,) + self.disc_patch)

            # Train the discriminators (original images = real / generated = Fake)
            d_loss_real = self.discriminator.train_on_batch(imgs_hr, valid)
            d_loss_fake = self.discriminator.train_on_batch(fake_hr, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ------------------
            #  Train Generator
            # ------------------

            # Sample images and their conditioning counterparts
            imgs_hr, imgs_lr = self.data_loader.load_data(batch_size)

            # The generators want the discriminators to label the generated images as real
            valid = np.ones((batch_size,) + self.disc_patch)

            # Extract ground truth image features using pre-trained VGG19 model
            image_features = self.vgg.predict(imgs_hr)

            # Train the generators
            g_loss = self.combined.train_on_batch([imgs_lr, imgs_hr], [valid, image_features])

            elapsed_time = datetime.datetime.now() - start_time
            # Plot the progress
            print ("%d time: %s" % (epoch, elapsed_time))

            # If at save interval => save generated image samples
            if epoch % sample_interval == 0:
                self.sample_images(epoch) 
Example #16
Source Project: keras-onnx   Author: onnx   File: test_srgan.py    License: MIT License 4 votes vote down vote up
def __init__(self):
        # Input shape
        self.channels = 3
        self.lr_height = 64                 # Low resolution height
        self.lr_width = 64                  # Low resolution width
        self.lr_shape = (self.lr_height, self.lr_width, self.channels)
        self.hr_height = self.lr_height*4   # High resolution height
        self.hr_width = self.lr_width*4     # High resolution width
        self.hr_shape = (self.hr_height, self.hr_width, self.channels)

        # Number of residual blocks in the generator
        self.n_residual_blocks = 16

        # We use a pre-trained VGG19 model to extract image features from the high resolution
        # and the generated high resolution images and minimize the mse between them
        self.vgg = self.build_vgg()
        self.vgg.trainable = False

        # Calculate output shape of D (PatchGAN)
        patch = int(self.hr_height / 2**4)
        self.disc_patch = (patch, patch, 1)

        # Number of filters in the first layer of G and D
        self.gf = 64
        self.df = 64

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()

        # Build the generator
        self.generator = self.build_generator()

        # High res. and low res. images
        img_hr = Input(shape=self.hr_shape)
        img_lr = Input(shape=self.lr_shape)

        # Generate high res. version from low res.
        fake_hr = self.generator(img_lr)

        # Extract image features of the generated img
        fake_features = self.vgg(fake_hr)

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # Discriminator determines validity of generated high res. images
        validity = self.discriminator(fake_hr)

        self.combined = Model([img_lr, img_hr], [validity, fake_features])