Python keras.applications.ResNet50() Examples
The following are 16
code examples of keras.applications.ResNet50().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.applications
, or try the search function
.

Example #1
Source Project: DeepTL-Lane-Change-Classification Author: Ekim-Yurtsever File: train.py License: MIT License | 6 votes |
def set_dataset(image_path, label_path, feature_extract_option=0, feature_path='/mit_resnet_train.pickle'): df = pd.read_csv(label_path, header=0, usecols=[3, 4]) target_data = np.zeros([len(df['no_event'].tolist()), 2]) target_data[:, 0] = df['no_event'].tolist() target_data[:, 1] = df['critical'].tolist() data = DataSet() data.risk_one_hot = target_data if feature_extract_option == 0: backbone_model = ResNet50(weights='imagenet') backbone_model = Model(inputs=backbone_model.input, outputs=backbone_model.get_layer(index=-2).output) data.model = backbone_model data.extract_features(image_path, option='fixed frame amount', number_of_frames=190) elif feature_extract_option == 1: data.video_features = DataSet.loader(image_path + feature_path) return data
Example #2
Source Project: spark-deep-learning Author: databricks File: test_pieces.py License: Apache License 2.0 | 5 votes |
def test_bare_keras_module(self): """ Keras GraphFunctions should give the same result as standard Keras models """ img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg')) for model_gen, preproc_fn, target_size in [(InceptionV3, iv3.preprocess_input, model_sizes['InceptionV3']), (Xception, xcpt.preprocess_input, model_sizes['Xception']), (ResNet50, rsnt.preprocess_input, model_sizes['ResNet50'])]: keras_model = model_gen(weights="imagenet") _preproc_img_list = [] for fpath in img_fpaths: img = load_img(fpath, target_size=target_size) # WARNING: must apply expand dimensions first, or ResNet50 preprocessor fails img_arr = np.expand_dims(img_to_array(img), axis=0) _preproc_img_list.append(preproc_fn(img_arr)) imgs_input = np.vstack(_preproc_img_list) preds_ref = keras_model.predict(imgs_input) gfn_bare_keras = GraphFunction.fromKeras(keras_model) with IsolatedSession(using_keras=True) as issn: K.set_learning_phase(0) feeds, fetches = issn.importGraphFunction(gfn_bare_keras) preds_tgt = issn.run(fetches[0], {feeds[0]: imgs_input}) np.testing.assert_array_almost_equal(preds_tgt, preds_ref, decimal=self.featurizerCompareDigitsExact)
Example #3
Source Project: nyoka Author: nyoka-pmml File: _validateSchema.py License: Apache License 2.0 | 5 votes |
def test_validate_keras_resnet(self): input_tensor = Input(shape=(224, 224, 3)) model = ResNet50(weights="imagenet", input_tensor=input_tensor) file_name = "keras"+model.name+".pmml" pmml_obj = KerasToPmml(model,dataSet="image",predictedClasses=[str(i) for i in range(1000)]) pmml_obj.export(open(file_name,'w'),0) self.assertEqual(self.schema.is_valid(file_name), True)
Example #4
Source Project: DeepTL-Lane-Change-Classification Author: Ekim-Yurtsever File: models.py License: MIT License | 5 votes |
def build_transfer_ResNet_to_LSTM(self, input_shape, optimizer=Adam(lr=1e-6, decay=1e-5)): input_sequences = Input(shape=input_shape) backbone_model = ResNet50(weights='imagenet') backbone_model = Model(inputs=backbone_model.input, outputs=backbone_model.get_layer(index=-2).output) feature_sequences = TimeDistributed(backbone_model)(input_sequences) lstm_out = LSTM(20, return_sequences=False)(feature_sequences) prediction = Dense(2, activation='softmax', kernel_initializer='ones')(lstm_out) self.model = Model(inputs=input_sequences, outputs=prediction) self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
Example #5
Source Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: applications_test.py License: MIT License | 5 votes |
def test_resnet50(): app = applications.ResNet50 last_dim = 2048 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #6
Source Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: applications_test.py License: MIT License | 5 votes |
def test_resnet50(): app = applications.ResNet50 last_dim = 2048 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #7
Source Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: applications_test.py License: MIT License | 5 votes |
def test_resnet50(): app = applications.ResNet50 last_dim = 2048 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #8
Source Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: applications_test.py License: MIT License | 5 votes |
def test_resnet50(): app = applications.ResNet50 last_dim = 2048 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #9
Source Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: applications_test.py License: MIT License | 5 votes |
def test_resnet50(): app = applications.ResNet50 last_dim = 2048 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #10
Source Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: applications_test.py License: MIT License | 5 votes |
def test_resnet50(): app = applications.ResNet50 last_dim = 2048 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #11
Source Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: applications_test.py License: MIT License | 5 votes |
def test_resnet50(): app = applications.ResNet50 last_dim = 2048 _test_application_basic(app) _test_application_notop(app, last_dim) _test_application_variable_input_channels(app, last_dim) _test_app_pooling(app, last_dim)
Example #12
Source Project: plaidbench Author: plaidml File: resnet50.py License: Apache License 2.0 | 5 votes |
def build_model(): import keras.applications as kapp from keras.backend import floatx from keras.layers import Input inputLayer = Input(shape=(224, 224, 3), dtype=floatx()) return kapp.ResNet50(input_tensor=inputLayer)
Example #13
Source Project: simple-keras-rest-api Author: jrosebr1 File: run_keras_server.py License: MIT License | 5 votes |
def load_model(): # load the pre-trained Keras model (here we are using a model # pre-trained on ImageNet and provided by Keras, but you can # substitute in your own networks just as easily) global model model = ResNet50(weights="imagenet")
Example #14
Source Project: cv Author: vsmolyakov File: prepare_dataset.py License: MIT License | 5 votes |
def load_encoding_model(): model = ResNet50(weights='imagenet', include_top=False, input_shape = (224, 224, 3)) return model
Example #15
Source Project: vergeml Author: mme File: features.py License: MIT License | 4 votes |
def get_imagenet_architecture(architecture, variant, size, alpha, output_layer, include_top=False, weights='imagenet'): from keras import applications, Model if include_top: assert output_layer == 'last' if size == 'auto': size = get_image_size(architecture, variant, size) shape = (size, size, 3) if architecture == 'densenet': if variant == 'auto': variant = 'densenet-121' if variant == 'densenet-121': model = applications.DenseNet121(weights=weights, include_top=include_top, input_shape=shape) elif variant == 'densenet-169': model = applications.DenseNet169(weights=weights, include_top=include_top, input_shape=shape) elif variant == 'densenet-201': model = applications.DenseNet201(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'inception-resnet-v2': model = applications.InceptionResNetV2(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'mobilenet': model = applications.MobileNet(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha) elif architecture == 'mobilenet-v2': model = applications.MobileNetV2(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha) elif architecture == 'nasnet': if variant == 'auto': variant = 'large' if variant == 'large': model = applications.NASNetLarge(weights=weights, include_top=include_top, input_shape=shape) else: model = applications.NASNetMobile(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'resnet-50': model = applications.ResNet50(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'vgg-16': model = applications.VGG16(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'vgg-19': model = applications.VGG19(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'xception': model = applications.Xception(weights=weights, include_top=include_top, input_shape=shape) elif architecture == 'inception-v3': model = applications.InceptionV3(weights=weights, include_top=include_top, input_shape=shape) if output_layer != 'last': try: if isinstance(output_layer, int): layer = model.layers[output_layer] else: layer = model.get_layer(output_layer) except Exception: raise VergeMLError('layer not found: {}'.format(output_layer)) model = Model(inputs=model.input, outputs=layer.output) return model
Example #16
Source Project: surround_vehicles_awareness Author: ndrplz File: model.py License: MIT License | 4 votes |
def SDPN(summary=True): """ Create and return Semantic-aware Dense Prediction Network. Parameters ---------- summary : bool If True, network summary is printed to stout. Returns ------- model : keras Model Model of SDPN """ input_coords = Input(shape=(4,)) input_crop = Input(shape=(3, 224, 224)) # extract feature from image crop resnet = ResNet50(include_top=False, weights='imagenet') for layer in resnet.layers: # set resnet as non-trainable layer.trainable = False crop_encoded = resnet(input_crop) # shape of `crop_encoded` is 2018x1x1 crop_encoded = Reshape(target_shape=(2048,))(crop_encoded) # encode input coordinates h = Dense(256, activation='relu')(input_coords) h = Dropout(p=0.25)(h) h = Dense(256, activation='relu')(h) h = Dropout(p=0.25)(h) h = Dense(256, activation='relu')(h) # merge feature vectors from crop and coords merged = merge([crop_encoded, h], mode='concat') # decoding into output coordinates h = Dense(1024, activation='relu')(merged) h = Dropout(p=0.25)(h) h = Dense(1024, activation='relu')(h) h = Dropout(p=0.25)(h) h = Dense(512, activation='relu')(h) h = Dropout(p=0.25)(h) h = Dense(256, activation='relu')(h) h = Dropout(p=0.25)(h) h = Dense(128, activation='relu')(h) h = Dropout(p=0.25)(h) output_coords = Dense(4, activation='tanh')(h) model = Model(input=[input_coords, input_crop], output=output_coords) if summary: model.summary() return model