Python keras.applications.vgg19.VGG19 Examples

The following are 20 code examples of keras.applications.vgg19.VGG19(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.applications.vgg19 , or try the search function .
Example #1
Source File: feat.py    From Unstructured-change-detection-using-CNN with GNU General Public License v3.0 7 votes vote down vote up
def extra_feat(img_path):
        #Using a VGG19 as feature extractor
        base_model = VGG19(weights='imagenet',include_top=False)
	img = image.load_img(img_path, target_size=(224, 224))
	x = image.img_to_array(img)
	x = np.expand_dims(x, axis=0)
	x = preprocess_input(x)
        block1_pool_features=get_activations(base_model, 3, x)
        block2_pool_features=get_activations(base_model, 6, x)
        block3_pool_features=get_activations(base_model, 10, x)
        block4_pool_features=get_activations(base_model, 14, x)
        block5_pool_features=get_activations(base_model, 18, x)

	x1 = tf.image.resize_images(block1_pool_features[0],[112,112])
	x2 = tf.image.resize_images(block2_pool_features[0],[112,112])
	x3 = tf.image.resize_images(block3_pool_features[0],[112,112])
	x4 = tf.image.resize_images(block4_pool_features[0],[112,112])
	x5 = tf.image.resize_images(block5_pool_features[0],[112,112])
	
	F = tf.concat([x3,x2,x1,x4,x5],3) #Change to only x1, x1+x2,x1+x2+x3..so on, inorder to visualize features from diffetrrnt blocks
        return F 
Example #2
Source File: train_TensorFlow.py    From Neural-Style with MIT License 7 votes vote down vote up
def load_img(path_to_img):

  max_dim  = 512
  img      = Image.open(path_to_img)
  img_size = max(img.size)
  scale    = max_dim/img_size
  img      = img.resize((round(img.size[0]*scale), round(img.size[1]*scale)), Image.ANTIALIAS)

  img      = kp_image.img_to_array(img)

  # We need to broadcast the image array such that it has a batch dimension 
  img = np.expand_dims(img, axis=0)

  # preprocess raw images to make it suitable to be used by VGG19 model
  out = tf.keras.applications.vgg19.preprocess_input(img)

  return tf.convert_to_tensor(out) 
Example #3
Source File: neuralnets.py    From EmoPy with GNU Affero General Public License v3.0 6 votes vote down vote up
def _get_base_model(self):
        """
        :return: base model from Keras based on user-supplied model name
        """
        if self.model_name == 'inception_v3':
            return InceptionV3(weights='imagenet', include_top=False)
        elif self.model_name == 'xception':
            return Xception(weights='imagenet', include_top=False)
        elif self.model_name == 'vgg16':
            return VGG16(weights='imagenet', include_top=False)
        elif self.model_name == 'vgg19':
            return VGG19(weights='imagenet', include_top=False)
        elif self.model_name == 'resnet50':
            return ResNet50(weights='imagenet', include_top=False)
        else:
            raise ValueError('Cannot find base model %s' % self.model_name) 
Example #4
Source File: cnn_model.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def VGG_19_ImageNet(self, nOutput, input):

        # Define inputs and outputs IDs
        self.ids_inputs = ['input_1']
        self.ids_outputs = ['predictions']

        # Load VGG19 model pre-trained on ImageNet
        self.model = VGG19(weights='imagenet', layers_lr=0.001)

        # Recover input layer
        image = self.model.get_layer(self.ids_inputs[0]).output

        # Recover last layer kept from original model
        out = self.model.get_layer('fc2').output
        out = Dense(nOutput, name=self.ids_outputs[0], activation='softmax')(out)

        self.model = Model(input=image, output=out)

    ########################################
    # GoogLeNet implementation from http://dandxy89.github.io/ImageModels/googlenet/
    ######################################## 
Example #5
Source File: cnn_model.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def VGG_19(self, nOutput, input):

        # Define inputs and outputs IDs
        self.ids_inputs = ['input_1']
        self.ids_outputs = ['predictions']

        # Load VGG19 model pre-trained on ImageNet
        self.model = VGG19()

        # Recover input layer
        image = self.model.get_layer(self.ids_inputs[0]).output

        # Recover last layer kept from original model
        out = self.model.get_layer('fc2').output
        out = Dense(nOutput, name=self.ids_outputs[0], activation='softmax')(out)

        self.model = Model(input=image, output=out) 
Example #6
Source File: cnn_model-predictor.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def VGG_19_ImageNet(self, nOutput, input):

        # Define inputs and outputs IDs
        self.ids_inputs = ['input_1']
        self.ids_outputs = ['predictions']

        # Load VGG19 model pre-trained on ImageNet
        self.model = VGG19(weights='imagenet', layers_lr=0.001)

        # Recover input layer
        image = self.model.get_layer(self.ids_inputs[0]).output

        # Recover last layer kept from original model
        out = self.model.get_layer('fc2').output
        out = Dense(nOutput, name=self.ids_outputs[0], activation='softmax')(out)

        self.model = Model(input=image, output=out)

    ########################################
    # GoogLeNet implementation from http://dandxy89.github.io/ImageModels/googlenet/
    ######################################## 
Example #7
Source File: cnn_model-predictor.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def VGG_19(self, nOutput, input):

        # Define inputs and outputs IDs
        self.ids_inputs = ['input_1']
        self.ids_outputs = ['predictions']

        # Load VGG19 model pre-trained on ImageNet
        self.model = VGG19()

        # Recover input layer
        image = self.model.get_layer(self.ids_inputs[0]).output

        # Recover last layer kept from original model
        out = self.model.get_layer('fc2').output
        out = Dense(nOutput, name=self.ids_outputs[0], activation='softmax')(out)

        self.model = Model(input=image, output=out) 
Example #8
Source File: extract_features.py    From Audio-Vision with MIT License 5 votes vote down vote up
def get_model(weights_path=None):
    
    ## [17-june-2018]Use residual after this
    input_tensor = Input(shape=(448,448,3))
    base_model = VGG19(weights='imagenet', include_top=False, input_tensor=input_tensor)
    #base_model.summary()
    for layer in base_model.layers:
        layer.trainable = False
        
    model = Model(input=base_model.input, output=base_model.get_layer('block5_pool').output)  
    #model.summary()
    #model = VGG19(weights_path)
    #model.summary()
    return model 
Example #9
Source File: gram.py    From subjective-functions with MIT License 5 votes vote down vote up
def construct_gatys_model(padding='valid'):
    default_model = vgg19.VGG19(weights='imagenet')

    # We don't care about the actual predictions, and want to be able to handle arbitrarily
    # sized images. So let's do it!
    new_layers = []
    for i, layer in enumerate(default_model.layers[1:]):
        if isinstance(layer, keras.layers.Conv2D):
            config = layer.get_config()
            if i == 0:
                config['input_shape'] = (None, None, 3)
            config['padding'] = padding
            # ugh gatys has different layer naming
            old_name = config['name']
            m = re.match(r"block([0-9])_conv([0-9])", old_name)
            new_name = "conv{}_{}".format(m.group(1), m.group(2))
            config['name'] = new_name
            new = keras.layers.Conv2D.from_config(config)
        elif isinstance(layer, keras.layers.MaxPooling2D):
            config = layer.get_config()
            config['padding'] = padding
            #new = keras.layers.MaxPooling2D.from_config(config)
            new = keras.layers.AveragePooling2D.from_config(config)
        else:
            print("UNEXPECTED LAYER: ", layer)
            continue
        new_layers.append(new)
    model = keras.models.Sequential(layers=new_layers)
    gatys_weights = np.load("../gatys/gatys.npy", encoding='latin1').item() # encoding because of python2
    # Previously, we loaded weights from Keras' VGG-16. Now, instead, we'll use Gatys' VGG-19!
    for i, new_layer in enumerate(model.layers):
        if 'conv' in new_layer.name:
            layer_weights = gatys_weights[new_layer.name]
            w = layer_weights['weights']
            b = layer_weights['biases']
            new_layer.set_weights([w, b])
    model._padding_mode = padding
    return model 
Example #10
Source File: extract_features.py    From Audio-Vision with MIT License 5 votes vote down vote up
def get_model(weights_path=None):
    
    ## [17-june-2018]Use residual after this
    input_tensor = Input(shape=(448,448,3))
    base_model = VGG19(weights='imagenet', include_top=False, input_tensor=input_tensor)
    #base_model.summary()
    for layer in base_model.layers:
        layer.trainable = False
        
    model = Model(input=base_model.input, output=base_model.get_layer('fc1').output)  
    model.summary()
    #model = VGG19(weights_path)
    #model.summary()
    return model 
Example #11
Source File: keras_applications.py    From spark-deep-learning with Apache License 2.0 5 votes vote down vote up
def model(self, preprocessed, featurize):
        # Model provided by Keras. All cotributions by Keras are provided subject to the
        # MIT license located at https://github.com/fchollet/keras/blob/master/LICENSE
        # and subject to the below additional copyrights and licenses.
        #
        # Copyright 2014 Oxford University
        #
        # Licensed under the Creative Commons Attribution License CC BY 4.0 ("License").
        # You may obtain a copy of the License at
        #
        #     https://creativecommons.org/licenses/by/4.0/
        #
        return vgg19.VGG19(input_tensor=preprocessed, weights="imagenet",
                           include_top=(not featurize)) 
Example #12
Source File: extract_bottleneck_features.py    From kale with Apache License 2.0 5 votes vote down vote up
def extract_VGG19(tensor):
	from keras.applications.vgg19 import VGG19, preprocess_input
	return VGG19(weights='imagenet', include_top=False).predict(preprocess_input(tensor)) 
Example #13
Source File: pretrain_imagenet_cnn.py    From hyperspectral_deeplearning_review with GNU General Public License v3.0 5 votes vote down vote up
def get_model_pretrain(arch):
    modlrate = 1
    if   "VGG16" in arch:       base_model = vgg16.VGG16
    elif "VGG19" in arch:       base_model = vgg19.VGG19
    elif "RESNET50" in arch:    base_model = resnet50.ResNet50
    elif "DENSENET121" in arch: base_model = densenet.DenseNet121
    elif "MOBILENET" in arch:
        base_model = mobilenet.MobileNet
        modlrate = 10
    else: print("model not avaiable"); exit()
    base_model = base_model(weights='imagenet', include_top=False)
    return base_model, modlrate 
Example #14
Source File: network.py    From autowebcompat with Mozilla Public License 2.0 5 votes vote down vote up
def create_vgg19_network(input_shape, weights):
    base_model = VGG19(input_shape=input_shape, weights=weights)
    return Model(inputs=base_model.input, outputs=base_model.get_layer('fc2').output) 
Example #15
Source File: truncated_vgg.py    From posewarp-cvpr2018 with MIT License 5 votes vote down vote up
def vgg_norm():
    img_input = Input(shape=(256, 256, 3))
    x1 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
    x2 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x1)
    x3 = AveragePooling2D((2, 2), strides=(2, 2), name='block1_pool')(x2)

    x4 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x3)
    x5 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x4)
    x6 = AveragePooling2D((2, 2), strides=(2, 2), name='block2_pool')(x5)

    x7 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x6)
    x8 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x7)
    x9 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x8)
    x10 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x9)
    x11 = AveragePooling2D((2, 2), strides=(2, 2), name='block3_pool')(x10)

    x12 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x11)
    x13 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x12)
    x14 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x13)
    x15 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x14)
    x16 = AveragePooling2D((2, 2), strides=(2, 2), name='block4_pool')(x15)

    x17 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x16)
    x18 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x17)
    x19 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x18)
    x20 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x19)
    x21 = AveragePooling2D((2, 2), strides=(2, 2), name='block5_pool')(x20)

    model = Model(inputs=[img_input], outputs=[x1, x2, x4, x5, x7, x8, x9, x10, x12, x13, x14, x15])
    model_orig = VGG19(weights='imagenet', input_shape=(256, 256, 3), include_top=False)

    for i in range(len(model.layers)):
        weights = model_orig.layers[i].get_weights()
        model.layers[i].set_weights(weights)

    return model 
Example #16
Source File: train_TensorFlow.py    From Neural-Style with MIT License 5 votes vote down vote up
def get_model(content_layers,style_layers):

  # Load our model. We load pretrained VGG, trained on imagenet data
  vgg19           = VGG19(weights=None, include_top=False)

  # We don't need to (or want to) train any layers of our pre-trained vgg model, so we set it's trainable to false.
  vgg19.trainable = False

  style_model_outputs   =  [vgg19.get_layer(name).output for name in style_layers]
  content_model_outputs =  [vgg19.get_layer(name).output for name in content_layers]
  
  model_outputs = content_model_outputs + style_model_outputs

  # Build model 
  return Model(inputs = vgg19.input, outputs = model_outputs),  vgg19 
Example #17
Source File: train_TensorFlow.py    From Neural-Style with MIT License 5 votes vote down vote up
def compute_loss(model, loss_weights, generated_output_activations, gram_style_features, content_features, num_content_layers, num_style_layers):

  generated_content_activations = generated_output_activations[:num_content_layers]
  generated_style_activations   = generated_output_activations[num_content_layers:]

  style_weight, content_weight = loss_weights
  
  style_score = 0
  content_score = 0

  # Accumulate style losses from all layers
  # Here, we equally weight each contribution of each loss layer
  weight_per_style_layer = 1.0 / float(num_style_layers)
  for target_style, comb_style in zip(gram_style_features, generated_style_activations):
    temp = get_style_loss(comb_style[0], target_style)
    style_score += weight_per_style_layer * temp
    
  # Accumulate content losses from all layers 
  weight_per_content_layer = 1.0 / float(num_content_layers)
  for target_content, comb_content in zip(content_features, generated_content_activations):
    temp = get_content_loss(comb_content[0], target_content)
    content_score += weight_per_content_layer* temp

  # Get total loss
  loss = style_weight*style_score + content_weight*content_score 


  return loss, style_score, content_score

############################################################################################################
############################################################################################################
#                                    CREATE STYLE TRANFER
############################################################################################################
############################################################################################################


# Using Keras Load VGG19 model 
Example #18
Source File: Network.py    From MBLLEN with Apache License 2.0 5 votes vote down vote up
def build_vgg():
    vgg_model = VGG19(include_top=False, weights='imagenet')
    vgg_model.trainable = False
    return Model(inputs=vgg_model.input, outputs=vgg_model.get_layer('block3_conv4').output) 
Example #19
Source File: keras_applications.py    From spark-deep-learning with Apache License 2.0 5 votes vote down vote up
def _testKerasModel(self, include_top):
        return vgg19.VGG19(weights="imagenet", include_top=include_top) 
Example #20
Source File: test_bench.py    From Keras-inference-time-optimizer with MIT License 4 votes vote down vote up
def get_tst_neural_net(type):
    model = None
    custom_objects = dict()
    if type == 'mobilenet_small':
        from keras.applications.mobilenet import MobileNet
        model = MobileNet((128, 128, 3), depth_multiplier=1, alpha=0.25, include_top=True, weights='imagenet')
    elif type == 'mobilenet':
        from keras.applications.mobilenet import MobileNet
        model = MobileNet((224, 224, 3), depth_multiplier=1, alpha=1.0, include_top=True, weights='imagenet')
    elif type == 'mobilenet_v2':
        from keras.applications.mobilenetv2 import MobileNetV2
        model = MobileNetV2((224, 224, 3), depth_multiplier=1, alpha=1.4, include_top=True, weights='imagenet')
    elif type == 'resnet50':
        from keras.applications.resnet50 import ResNet50
        model = ResNet50(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'inception_v3':
        from keras.applications.inception_v3 import InceptionV3
        model = InceptionV3(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
    elif type == 'inception_resnet_v2':
        from keras.applications.inception_resnet_v2 import InceptionResNetV2
        model = InceptionResNetV2(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
    elif type == 'xception':
        from keras.applications.xception import Xception
        model = Xception(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
    elif type == 'densenet121':
        from keras.applications.densenet import DenseNet121
        model = DenseNet121(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'densenet169':
        from keras.applications.densenet import DenseNet169
        model = DenseNet169(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'densenet201':
        from keras.applications.densenet import DenseNet201
        model = DenseNet201(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'nasnetmobile':
        from keras.applications.nasnet import NASNetMobile
        model = NASNetMobile(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'nasnetlarge':
        from keras.applications.nasnet import NASNetLarge
        model = NASNetLarge(input_shape=(331, 331, 3), include_top=True, weights='imagenet')
    elif type == 'vgg16':
        from keras.applications.vgg16 import VGG16
        model = VGG16(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet')
    elif type == 'vgg19':
        from keras.applications.vgg19 import VGG19
        model = VGG19(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet')
    elif type == 'multi_io':
        model = get_custom_multi_io_model()
    elif type == 'multi_model_layer_1':
        model = get_custom_model_with_other_model_as_layer()
    elif type == 'multi_model_layer_2':
        model = get_small_model_with_other_model_as_layer()
    elif type == 'Conv2DTranspose':
        model = get_Conv2DTranspose_model()
    elif type == 'RetinaNet':
        model, custom_objects = get_RetinaNet_model()
    elif type == 'conv3d_model':
        model = get_simple_3d_model()
    return model, custom_objects