Python keras.applications.xception.Xception() Examples

The following are 11 code examples of keras.applications.xception.Xception(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.applications.xception , or try the search function .
Example #1
Source File: keras_applications.py    From spark-deep-learning with Apache License 2.0 6 votes vote down vote up
def _imagenet_preprocess_input(x, input_shape):
    """
    For ResNet50, VGG models. For InceptionV3 and Xception it's okay to use the
    keras version (e.g. InceptionV3.preprocess_input) as the code path they hit
    works okay with tf.Tensor inputs. The following was translated to tf ops from
    https://github.com/fchollet/keras/blob/fb4a0849cf4dc2965af86510f02ec46abab1a6a4/keras/applications/imagenet_utils.py#L52
    It's a possibility to change the implementation in keras to look like the
    following and modified to work with BGR images (standard in Spark), but not doing it for now.
    """
    # assuming 'BGR'
    # Zero-center by mean pixel
    mean = np.ones(input_shape + (3,), dtype=np.float32)
    mean[..., 0] = 103.939
    mean[..., 1] = 116.779
    mean[..., 2] = 123.68
    return x - mean 
Example #2
Source File: xception.py    From pygta5 with GNU General Public License v3.0 6 votes vote down vote up
def get_model(session):

    # create the base pre-trained model
    base_model = Xception(weights=None, include_top=False, input_shape=(270, 480, 3))

    # add a global spatial average pooling layer
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # add a fully-connected layer
    x = Dense(1024, activation='relu')(x)
    # putput layer
    predictions = Dense(session.training_dataset_info['number_of_labels'], activation='softmax')(x)
    # model
    model = Model(inputs=base_model.input, outputs=predictions)

    learning_rate = 0.001
    opt = keras.optimizers.adam(lr=learning_rate, decay=1e-5)

    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    return model 
Example #3
Source File: extract_bottleneck_features.py    From kale with Apache License 2.0 6 votes vote down vote up
def extract_Xception(tensor):
	from keras.applications.xception import Xception, preprocess_input
	return Xception(weights='imagenet', include_top=False).predict(preprocess_input(tensor)) 
Example #4
Source File: neuralnets.py    From EmoPy with GNU Affero General Public License v3.0 6 votes vote down vote up
def _get_base_model(self):
        """
        :return: base model from Keras based on user-supplied model name
        """
        if self.model_name == 'inception_v3':
            return InceptionV3(weights='imagenet', include_top=False)
        elif self.model_name == 'xception':
            return Xception(weights='imagenet', include_top=False)
        elif self.model_name == 'vgg16':
            return VGG16(weights='imagenet', include_top=False)
        elif self.model_name == 'vgg19':
            return VGG19(weights='imagenet', include_top=False)
        elif self.model_name == 'resnet50':
            return ResNet50(weights='imagenet', include_top=False)
        else:
            raise ValueError('Cannot find base model %s' % self.model_name) 
Example #5
Source File: keras_applications.py    From spark-deep-learning with Apache License 2.0 5 votes vote down vote up
def model(self, preprocessed, featurize):
        # Model provided by Keras. All cotributions by Keras are provided subject to the
        # MIT license located at https://github.com/fchollet/keras/blob/master/LICENSE.
        return xception.Xception(input_tensor=preprocessed, weights="imagenet",
                                 include_top=(not featurize)) 
Example #6
Source File: keras_applications.py    From spark-deep-learning with Apache License 2.0 5 votes vote down vote up
def _testKerasModel(self, include_top):
        return xception.Xception(weights="imagenet",
                                 include_top=include_top) 
Example #7
Source File: xception.py    From transfer with MIT License 5 votes vote down vote up
def get_xception_model(img_dim):
    array_input = Input(shape=(img_dim, img_dim, 3))
    xception = Xception(include_top=True,
                     weights='imagenet',
                     input_tensor=array_input,
                     pooling='avg')
    return xception 
Example #8
Source File: test_keras_applications.py    From keras-onnx with MIT License 5 votes vote down vote up
def test_Xception(self):
        from keras.applications.xception import Xception
        model = Xception(include_top=True, weights='imagenet')
        res = run_image(model, self.model_files, img_path, atol=5e-3, target_size=299)
        self.assertTrue(*res) 
Example #9
Source File: xception.py    From crnn-lid with GNU General Public License v3.0 5 votes vote down vote up
def create_model(input_shape, config):

    input_tensor = Input(shape=input_shape)  # this assumes K.image_dim_ordering() == 'tf'
    xception_model = Xception(include_top=False, weights=None, input_tensor=input_tensor)
    print(xception_model.summary())

    x = xception_model.output
    x = GlobalAveragePooling2D()(x)
    predictions = Dense(config["num_classes"], activation='softmax')(x)

    return Model(input=xception_model.input, output=predictions) 
Example #10
Source File: models.py    From Carla-RL with MIT License 5 votes vote down vote up
def model_base_Xception(input_shape):
    model = Xception(weights=None, include_top=False, input_shape=input_shape)

    # Grab last model layer and attach global average pooling layer
    x = model.output
    x = GlobalAveragePooling2D()(x)

    return model.input, x

# First small CNN model 
Example #11
Source File: test_bench.py    From Keras-inference-time-optimizer with MIT License 4 votes vote down vote up
def get_tst_neural_net(type):
    model = None
    custom_objects = dict()
    if type == 'mobilenet_small':
        from keras.applications.mobilenet import MobileNet
        model = MobileNet((128, 128, 3), depth_multiplier=1, alpha=0.25, include_top=True, weights='imagenet')
    elif type == 'mobilenet':
        from keras.applications.mobilenet import MobileNet
        model = MobileNet((224, 224, 3), depth_multiplier=1, alpha=1.0, include_top=True, weights='imagenet')
    elif type == 'mobilenet_v2':
        from keras.applications.mobilenetv2 import MobileNetV2
        model = MobileNetV2((224, 224, 3), depth_multiplier=1, alpha=1.4, include_top=True, weights='imagenet')
    elif type == 'resnet50':
        from keras.applications.resnet50 import ResNet50
        model = ResNet50(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'inception_v3':
        from keras.applications.inception_v3 import InceptionV3
        model = InceptionV3(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
    elif type == 'inception_resnet_v2':
        from keras.applications.inception_resnet_v2 import InceptionResNetV2
        model = InceptionResNetV2(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
    elif type == 'xception':
        from keras.applications.xception import Xception
        model = Xception(input_shape=(299, 299, 3), include_top=True, weights='imagenet')
    elif type == 'densenet121':
        from keras.applications.densenet import DenseNet121
        model = DenseNet121(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'densenet169':
        from keras.applications.densenet import DenseNet169
        model = DenseNet169(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'densenet201':
        from keras.applications.densenet import DenseNet201
        model = DenseNet201(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'nasnetmobile':
        from keras.applications.nasnet import NASNetMobile
        model = NASNetMobile(input_shape=(224, 224, 3), include_top=True, weights='imagenet')
    elif type == 'nasnetlarge':
        from keras.applications.nasnet import NASNetLarge
        model = NASNetLarge(input_shape=(331, 331, 3), include_top=True, weights='imagenet')
    elif type == 'vgg16':
        from keras.applications.vgg16 import VGG16
        model = VGG16(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet')
    elif type == 'vgg19':
        from keras.applications.vgg19 import VGG19
        model = VGG19(input_shape=(224, 224, 3), include_top=False, pooling='avg', weights='imagenet')
    elif type == 'multi_io':
        model = get_custom_multi_io_model()
    elif type == 'multi_model_layer_1':
        model = get_custom_model_with_other_model_as_layer()
    elif type == 'multi_model_layer_2':
        model = get_small_model_with_other_model_as_layer()
    elif type == 'Conv2DTranspose':
        model = get_Conv2DTranspose_model()
    elif type == 'RetinaNet':
        model, custom_objects = get_RetinaNet_model()
    elif type == 'conv3d_model':
        model = get_simple_3d_model()
    return model, custom_objects