Python keras.applications.vgg16.VGG16 Examples

The following are 30 code examples of keras.applications.vgg16.VGG16(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.applications.vgg16 , or try the search function .
Example #1
Source File: model.py    From Image-Caption-Generator with MIT License 11 votes vote down vote up
def RNNModel(vocab_size, max_len, rnnConfig, model_type):
	embedding_size = rnnConfig['embedding_size']
	if model_type == 'inceptionv3':
		# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(2048,))
	elif model_type == 'vgg16':
		# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(4096,))
	image_model_1 = Dropout(rnnConfig['dropout'])(image_input)
	image_model = Dense(embedding_size, activation='relu')(image_model_1)

	caption_input = Input(shape=(max_len,))
	# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
	caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
	caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1)
	caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2)

	# Merging the models and creating a softmax classifier
	final_model_1 = concatenate([image_model, caption_model])
	final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1)
	final_model = Dense(vocab_size, activation='softmax')(final_model_2)

	model = Model(inputs=[image_input, caption_input], outputs=final_model)
	model.compile(loss='categorical_crossentropy', optimizer='adam')
	return model 
Example #2
Source File: TransferLearning_reg.py    From Intelligent-Projects-Using-Python with MIT License 7 votes vote down vote up
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
		model = ResNet50(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(1)(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# VGG16 Model for transfer Learning 
Example #3
Source File: VideoCaptioningPreProcessing.py    From Intelligent-Projects-Using-Python with MIT License 7 votes vote down vote up
def video_to_frames(self,video):
        
        with open(os.devnull, "w") as ffmpeg_log:
            if os.path.exists(self.temp_dest):
                print(" cleanup: " + self.temp_dest + "/")
                shutil.rmtree(self.temp_dest)
            os.makedirs(self.temp_dest)
            video_to_frames_cmd = ["ffmpeg",
                                       
                                       '-y',
                                       '-i', video,  
                                       '-vf', "scale=400:300", 
                                       '-qscale:v', "2", 
                                       '{0}/%06d.jpg'.format(self.temp_dest)]
            subprocess.call(video_to_frames_cmd,
                            stdout=ffmpeg_log, stderr=ffmpeg_log)
                        
# Load the pre-trained VGG16 Model and extract the dense features as output 
Example #4
Source File: TransferLearning.py    From Intelligent-Projects-Using-Python with MIT License 7 votes vote down vote up
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
		model = ResNet50(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(5,activation='softmax')(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# VGG16 Model for transfer Learning 
Example #5
Source File: conv_filter_visualization.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + K.epsilon())
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_data_format() == 'channels_first':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x

# build the VGG16 network with ImageNet weights 
Example #6
Source File: TransferLearning_ffd.py    From Intelligent-Projects-Using-Python with MIT License 6 votes vote down vote up
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
        model = ResNet50(weights='imagenet',include_top=False)
        x = model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        out = Dense(5,activation='softmax')(x)
        model_final = Model(input = model.input,outputs=out)
        if full_freeze != 'N':
            for layer in model.layers[0:freeze_layers]:
                layer.trainable = False
        return model_final

# VGG16 Model for transfer Learning 
Example #7
Source File: conv_filter_visualization.py    From pCVR with Apache License 2.0 6 votes vote down vote up
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + 1e-5)
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_data_format() == 'channels_first':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x

# build the VGG16 network with ImageNet weights 
Example #8
Source File: network.py    From AdvancedEAST with MIT License 6 votes vote down vote up
def __init__(self):
        self.input_img = Input(name='input_img',
                               shape=(None, None, cfg.num_channels),
                               dtype='float32')
        vgg16 = VGG16(input_tensor=self.input_img,
                      weights='imagenet',
                      include_top=False)
        if cfg.locked_layers:
            # locked first two conv layers
            locked_layers = [vgg16.get_layer('block1_conv1'),
                             vgg16.get_layer('block1_conv2')]
            for layer in locked_layers:
                layer.trainable = False
        self.f = [vgg16.get_layer('block%d_pool' % i).output
                  for i in cfg.feature_layers_range]
        self.f.insert(0, None)
        self.diff = cfg.feature_layers_range[0] - cfg.feature_layers_num 
Example #9
Source File: conv_filter_visualization.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + K.epsilon())
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_data_format() == 'channels_first':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x

# build the VGG16 network with ImageNet weights 
Example #10
Source File: models.py    From Federated-Learning-Mini-Framework with MIT License 6 votes vote down vote up
def create_model(input_shape: tuple, nb_classes: int, init_with_imagenet: bool = False, learning_rate: float = 0.01):
    weights = None
    if init_with_imagenet:
        weights = "imagenet"

    model = VGG16(input_shape=input_shape,
                  classes=nb_classes,
                  weights=weights,
                  include_top=False)
    # "Shallow" VGG for Cifar10
    x = model.get_layer('block3_pool').output
    x = layers.Flatten(name='Flatten')(x)
    x = layers.Dense(512, activation='relu')(x)
    x = layers.Dense(nb_classes)(x)
    x = layers.Softmax()(x)
    model = models.Model(model.input, x)

    loss = losses.categorical_crossentropy
    optimizer = optimizers.SGD(lr=learning_rate, decay=0.99)

    model.compile(optimizer, loss, metrics=["accuracy"])
    return model 
Example #11
Source File: conv_filter_visualization.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + K.epsilon())
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_data_format() == 'channels_first':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x

# build the VGG16 network with ImageNet weights 
Example #12
Source File: conv_filter_visualization.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + K.epsilon())
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_data_format() == 'channels_first':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x

# build the VGG16 network with ImageNet weights 
Example #13
Source File: conv_filter_visualization.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + K.epsilon())
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_data_format() == 'channels_first':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x

# build the VGG16 network with ImageNet weights 
Example #14
Source File: training.py    From neural-style-keras with MIT License 6 votes vote down vote up
def get_loss_net(pastiche_net_output, input_tensor=None):
    '''
    Instantiates a VGG net and applies its layers on top of the pastiche net's
    output.
    '''
    loss_net = vgg16.VGG16(weights='imagenet', include_top=False,
                           input_tensor=input_tensor)
    targets_dict = dict([(layer.name, layer.output) for layer in loss_net.layers])
    i = pastiche_net_output
    # We need to apply all layers to the output of the style net
    outputs_dict = {}
    for l in loss_net.layers[1:]: # Ignore the input layer
        i = l(i)
        outputs_dict[l.name] = i

    return loss_net, outputs_dict, targets_dict 
Example #15
Source File: conv_filter_visualization.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + K.epsilon())
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_data_format() == 'channels_first':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x

# build the VGG16 network with ImageNet weights 
Example #16
Source File: model.py    From udacity-SDC-baseline with MIT License 6 votes vote down vote up
def build_vgg16(image_size=None):
	image_size = image_size or (240, 240)
	if K.image_dim_ordering() == 'th':
	    input_shape = (3,) + image_size
	else:
	    input_shape = image_size + (3, )
	bottleneck_model = vgg16.VGG16(include_top=False, 
	                               input_tensor=Input(input_shape))
	#bottleneck_model.trainable = False
	for layer in bottleneck_model.layers:
	    layer.trainable = False

	x = bottleneck_model.input
	y = bottleneck_model.output
	y = Flatten()(y)
	y = BatchNormalization()(y)
	y = Dense(2048, activation='relu')(y)
	y = Dropout(.5)(y)
	y = Dense(1024, activation='relu')(y)
	y = Dropout(.5)(y)
	y = Dense(1)(y)

	model = Model(input=x, output=y)
	model.compile(optimizer=Adam(lr=1e-4), loss = 'mse')
	return model 
Example #17
Source File: conv_filter_visualization.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + K.epsilon())
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_data_format() == 'channels_first':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x

# build the VGG16 network with ImageNet weights 
Example #18
Source File: recurrent_networks.py    From keras-video-classifier with MIT License 6 votes vote down vote up
def load_model(self, config_file_path, weight_file_path):

        config = np.load(config_file_path).item()
        self.num_input_tokens = config['num_input_tokens']
        self.nb_classes = config['nb_classes']
        self.labels = config['labels']
        self.expected_frames = config['expected_frames']
        self.vgg16_include_top = config['vgg16_include_top']
        self.labels_idx2word = dict([(idx, word) for word, idx in self.labels.items()])

        self.model = self.create_model()
        self.model.load_weights(weight_file_path)

        vgg16_model = VGG16(include_top=self.vgg16_include_top, weights='imagenet')
        vgg16_model.compile(optimizer=SGD(), loss='categorical_crossentropy', metrics=['accuracy'])
        self.vgg16_model = vgg16_model 
Example #19
Source File: multiclass_VGGNet.py    From CNNArt with Apache License 2.0 6 votes vote down vote up
def fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSize=None, learningRate=None,iEpochs=None):
    # parse inputs
    batchSize = 64 if batchSize is None else batchSize
    learningRate = 0.01 if learningRate is None else learningRate
    iEpochs = 300 if iEpochs is None else iEpochs

    print('Training(pre) CNN (VGGNet)')
    print('with lr = ' + str(i) + ' , batchSize = ' + str(j))

    # build model
    base = VGG16(include_top=False, weights=None, input_shape=(1, 180, 180))

    top_model = Sequential()
    top_model.add(Flatten(input_shape=base.output_shape[1:]))
    top_model.add(Dense(11, activation='softmax'))
    # top_model.load_weights('fc_model.h5')
    model = base.add(top_model)

    # save names
	_, sPath = os.path.splitdrive(sOutPath) 
Example #20
Source File: frcnn.py    From maskrcnn with MIT License 6 votes vote down vote up
def _model_backbone_headless(self):
        if self.config.backbone_nn_type == 'vgg':
            model = VGG16(weights='imagenet', include_top=False)
            # 畳み込み層の後のプーリング層を除く
            # https://github.com/keras-team/keras/issues/2371
            # https://github.com/keras-team/keras/issues/6229
            # http://forums.fast.ai/t/how-to-finetune-with-new-keras-api/2328/9
            model.layers.pop()
        else:
            model = ResNet50(weights='imagenet', include_top=False)
        # VGGの重みは学習対象外
        for layer in model.layers:
            layer.trainable = False
        output = model.layers[-1].output
        _input = model.input
        return _input, output 
Example #21
Source File: datasets.py    From DEC-keras with MIT License 6 votes vote down vote up
def extract_vgg16_features(x):
    from keras.preprocessing.image import img_to_array, array_to_img
    from keras.applications.vgg16 import preprocess_input, VGG16
    from keras.models import Model

    # im_h = x.shape[1]
    im_h = 224
    model = VGG16(include_top=True, weights='imagenet', input_shape=(im_h, im_h, 3))
    # if flatten:
    #     add_layer = Flatten()
    # else:
    #     add_layer = GlobalMaxPool2D()
    # feature_model = Model(model.input, add_layer(model.output))
    feature_model = Model(model.input, model.get_layer('fc1').output)
    print('extracting features...')
    x = np.asarray([img_to_array(array_to_img(im, scale=False).resize((im_h,im_h))) for im in x])
    x = preprocess_input(x)  # data - 127. #data/255.#
    features = feature_model.predict(x)
    print('Features shape = ', features.shape)

    return features 
Example #22
Source File: test_shap.py    From AIX360 with Apache License 2.0 6 votes vote down vote up
def test_ShapGradientExplainer(self):

    #     model = VGG16(weights='imagenet', include_top=True)
    #     X, y = shap.datasets.imagenet50()
    #     to_explain = X[[39, 41]]
    #
    #     url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
    #     fname = shap.datasets.cache(url)
    #     with open(fname) as f:
    #         class_names = json.load(f)
    #
    #     def map2layer(x, layer):
    #         feed_dict = dict(zip([model.layers[0].input], [preprocess_input(x.copy())]))
    #         return K.get_session().run(model.layers[layer].input, feed_dict)
    #
    #     e = GradientExplainer((model.layers[7].input, model.layers[-1].output),
    #                           map2layer(preprocess_input(X.copy()), 7))
    #     shap_values, indexes = e.explain_instance(map2layer(to_explain, 7), ranked_outputs=2)
    #
          print("Skipped Shap GradientExplainer") 
Example #23
Source File: neuralnets.py    From EmoPy with GNU Affero General Public License v3.0 6 votes vote down vote up
def _get_base_model(self):
        """
        :return: base model from Keras based on user-supplied model name
        """
        if self.model_name == 'inception_v3':
            return InceptionV3(weights='imagenet', include_top=False)
        elif self.model_name == 'xception':
            return Xception(weights='imagenet', include_top=False)
        elif self.model_name == 'vgg16':
            return VGG16(weights='imagenet', include_top=False)
        elif self.model_name == 'vgg19':
            return VGG19(weights='imagenet', include_top=False)
        elif self.model_name == 'resnet50':
            return ResNet50(weights='imagenet', include_top=False)
        else:
            raise ValueError('Cannot find base model %s' % self.model_name) 
Example #24
Source File: frcnn.py    From maskrcnn with MIT License 5 votes vote down vote up
def _model_backbone_plane(self):
        if self.config.backbone_nn_type == 'vgg':
            model = VGG16(weights='imagenet')
        else:
            model = ResNet50(weights='imagenet')
        return model 
Example #25
Source File: deblur_gan.py    From deep_learning with MIT License 5 votes vote down vote up
def perceptual_loss(y, y_pred):
        vgg = VGG16(include_top=False, weights="imagenet",input_shape=image_shape)
        loss_model = Model(inputs=vgg.input, outputs=vgg.get_layer("block3_conv3").output)
        loss_model.trainable = False
        return K.mean(K.squeeze(loss_model(y)-loss_model(y_pred))) 
Example #26
Source File: model.py    From Image-Caption-Generator with MIT License 5 votes vote down vote up
def CNNModel(model_type):
	if model_type == 'inceptionv3':
		model = InceptionV3()
	elif model_type == 'vgg16':
		model = VGG16()
	model.layers.pop()
	model = Model(inputs=model.inputs, outputs=model.layers[-1].output)
	return model 
Example #27
Source File: utils.py    From fast-neural-style-keras with MIT License 5 votes vote down vote up
def get_vgg_activation(layer_name, width, height):
    tensor = K.placeholder((1, height, width, 3))
    model = vgg16.VGG16(input_tensor=tensor, weights='imagenet', include_top=False)
    outputs_dict = {}
    for layer in model.layers:
        outputs_dict[layer.name] = layer.output
        layer.trainable = False
    return K.function([tensor], [outputs_dict[layer_name]]) 
Example #28
Source File: models.py    From ICIAR2018 with MIT License 5 votes vote down vote up
def __init__(self, batch_size=32):
        model = VGG16(include_top=False, weights="imagenet", pooling="avg")
        x2 = GlobalAveragePooling2D()(model.get_layer("block2_conv2").output)  # 128
        x3 = GlobalAveragePooling2D()(model.get_layer("block3_conv3").output)  # 256
        x4 = GlobalAveragePooling2D()(model.get_layer("block4_conv3").output)  # 512
        x5 = GlobalAveragePooling2D()(model.get_layer("block5_conv3").output)  # 512
        x = Concatenate()([x2, x3, x4, x5])
        self.model = Model(inputs=model.input, outputs=x)
        self.batch_size = batch_size
        self.data_format = K.image_data_format() 
Example #29
Source File: vgg16_feature_extractor.py    From keras-video-classifier with MIT License 5 votes vote down vote up
def scan_and_extract_vgg16_features(data_dir_path, output_dir_path, model=None, data_set_name=None):
    if data_set_name is None:
        data_set_name = 'UCF-101'

    input_data_dir_path = data_dir_path + '/' + data_set_name
    output_feature_data_dir_path = data_dir_path + '/' + output_dir_path

    if model is None:
        model = VGG16(include_top=True, weights='imagenet')
        model.compile(optimizer=SGD(), loss='categorical_crossentropy', metrics=['accuracy'])
    
    if not os.path.exists(output_feature_data_dir_path):
        os.makedirs(output_feature_data_dir_path)

    y_samples = []
    x_samples = []

    dir_count = 0
    for f in os.listdir(input_data_dir_path):
        file_path = input_data_dir_path + os.path.sep + f
        if not os.path.isfile(file_path):
            output_dir_name = f
            output_dir_path = output_feature_data_dir_path + os.path.sep + output_dir_name
            if not os.path.exists(output_dir_path):
                os.makedirs(output_dir_path)
            dir_count += 1
            for ff in os.listdir(file_path):
                video_file_path = file_path + os.path.sep + ff
                output_feature_file_path = output_dir_path + os.path.sep + ff.split('.')[0] + '.npy'
                x = extract_vgg16_features(model, video_file_path, output_feature_file_path)
                y = f
                y_samples.append(y)
                x_samples.append(x)

        if dir_count == MAX_NB_CLASSES:
            break

    return x_samples, y_samples 
Example #30
Source File: recurrent_networks.py    From keras-video-classifier with MIT License 5 votes vote down vote up
def load_model(self, config_file_path, weight_file_path):
        if os.path.exists(config_file_path):
            print('loading configuration from ', config_file_path)
        else:
            raise ValueError('cannot locate config file {}'.format(config_file_path))

        config = np.load(config_file_path).item()
        self.num_input_tokens = config['num_input_tokens']
        self.nb_classes = config['nb_classes']
        self.labels = config['labels']
        self.expected_frames = config['expected_frames']
        self.vgg16_include_top = config['vgg16_include_top']
        self.labels_idx2word = dict([(idx, word) for word, idx in self.labels.items()])
        self.config = config

        self.model = self.create_model()
        if os.path.exists(weight_file_path):
            print('loading network weights from ', weight_file_path)
        else:
            raise ValueError('cannot local weight file {}'.format(weight_file_path))

        self.model.load_weights(weight_file_path)

        print('build vgg16 with pre-trained model')
        vgg16_model = VGG16(include_top=self.vgg16_include_top, weights='imagenet')
        vgg16_model.compile(optimizer=SGD(), loss='categorical_crossentropy', metrics=['accuracy'])
        self.vgg16_model = vgg16_model