Python keras.applications.resnet50.ResNet50() Examples

The following are code examples for showing how to use keras.applications.resnet50.ResNet50(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: smart_image_classifier   Author: anuragmishracse   File: smic.py    MIT License 7 votes vote down vote up
def __init__(self, color = 1, dim = 256, gpu=0):
		self.color = color
		self.dim = dim
		self.gpu = gpu
		self.TRAIN_PATH = 'train/'
		self.TEST_PATH = 'test/'
		self.model = None
		self.label_map = {}
		self.rev_label_map = {}
		self.train_images = []
		self.train_labels = []
		self.num_classes = -1
		self.hyperparameters = {}
		self.transfer_models = {'vgg16' : VGG16, 'vgg19' : VGG19, 'resnet50' : ResNet50, 'inception_v3' : InceptionV3}
		self.optimizers = {'sgd' : 'SGD', 'rmsprop' : 'RMSprop', 'adam' : 'Adam'}
		self.layers = {'dense' : Dense, 'dropout' : Dropout} 
Example 2
Project: spark-deep-learning   Author: databricks   File: keras_applications.py    Apache License 2.0 6 votes vote down vote up
def _imagenet_preprocess_input(x, input_shape):
    """
    For ResNet50, VGG models. For InceptionV3 and Xception it's okay to use the
    keras version (e.g. InceptionV3.preprocess_input) as the code path they hit
    works okay with tf.Tensor inputs. The following was translated to tf ops from
    https://github.com/fchollet/keras/blob/fb4a0849cf4dc2965af86510f02ec46abab1a6a4/keras/applications/imagenet_utils.py#L52
    It's a possibility to change the implementation in keras to look like the
    following and modified to work with BGR images (standard in Spark), but not doing it for now.
    """
    # assuming 'BGR'
    # Zero-center by mean pixel
    mean = np.ones(input_shape + (3,), dtype=np.float32)
    mean[..., 0] = 103.939
    mean[..., 1] = 116.779
    mean[..., 2] = 123.68
    return x - mean 
Example 3
Project: deep-learning-keras   Author: arnaudvl   File: cnn_transfer_learning.py    MIT License 6 votes vote down vote up
def _get_base_model(self):
        """
        Define base model used in transfer learning.
        """
        if self.base_model=='VGG16':
            base_model = VGG16(weights='imagenet', include_top=False, input_shape=self.X.shape[1:])
        elif self.base_model=='VGG19':
            base_model = VGG19(weights='imagenet', include_top=False, input_shape=self.X.shape[1:])
        elif self.base_model=='ResNet50':
            base_model = ResNet50(weights='imagenet', include_top=False, input_shape=self.X.shape[1:])
        elif self.base_model=='Xception':
            base_model = Xception(weights='imagenet', include_top=False, input_shape=self.X.shape[1:])
        elif self.base_model=='InceptionV3':
            base_model = InceptionV3(weights='imagenet', include_top=False, input_shape=self.X.shape[1:])
        elif self.base_model=='InceptionResNetV2':
            base_model = InceptionResNetV2(weights='imagenet', include_top=False, input_shape=self.X.shape[1:])
        elif self.base_model=='MobileNet':
            base_model = MobileNet(weights='imagenet', include_top=False, input_shape=self.X.shape[1:])
        else:
            raise ValueError('Valid base model values are: "VGG16","VGG19","ResNet50","Xception", \
                             "InceptionV3","InceptionResNetV2","MobileNet".')
        return base_model 
Example 4
Project: fresh_eyes   Author: ksteinfe   File: server.py    MIT License 6 votes vote down vote up
def initialize(config):
    global MODL
    MODL = resnet50.ResNet50() # Load Keras' ResNet50 model that was pre-trained against the ImageNet database
    #print(output_features)
    console_msg = """#################### FRESH EYES ####################
I've just loaded the ResNet50 Keras Model pre-trained on the ImageNet database
To check that the server is working, go to http://localhost:{0}/
Invoke Cntl+C to stop the server
#################### FRESH EYES ####################
"""
    print(console_msg.format(config['port_num']))


###############################################################
##                                                           ##
##                          flask                            ##
##                                                           ##
############################################################### 
Example 5
Project: lost   Author: l3p-cv   File: cluster_resnet.py    MIT License 6 votes vote down vote up
def main(self):
        self.logger.info('Will load keras model')
        model = ResNet50(weights='imagenet')
        self.logger.info('Keras model loaded')
        feature_list = []
        img_path_list = []
        for raw_file in self.inp.raw_files:
            media_path = raw_file.path
            file_list = os.listdir(media_path)
            total = float(len(file_list))
            for index, img_file in enumerate(file_list):
                img_path = os.path.join(media_path, img_file)
                img_path_list.append(img_path)
                img = image.load_img(img_path, target_size=(224, 224))
                x = keras_image.img_to_array(img)
                x = np.expand_dims(x, axis=0)
                x = preprocess_input(x)
                # extract features
                scores = model.predict(x)
                sim_class = np.argmax(scores)
                print('Scores {}\nSimClass: {}'.format(scores, sim_class))
                self.outp.request_annos(img_path, img_sim_class=sim_class)
                self.logger.info('Requested annotation for: {} (cluster: {})'.format(img_path, sim_class))
                self.update_progress(index*100/total) 
Example 6
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning.py    MIT License 6 votes vote down vote up
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'):
		model = InceptionV3(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(5,activation='softmax')(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# ResNet50 Model for transfer Learning 
Example 7
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning.py    MIT License 6 votes vote down vote up
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
		model = ResNet50(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(5,activation='softmax')(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# VGG16 Model for transfer Learning 
Example 8
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_reg.py    MIT License 6 votes vote down vote up
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'):
		model = InceptionV3(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(1)(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# ResNet50 Model for transfer Learning 
Example 9
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_reg.py    MIT License 6 votes vote down vote up
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
		model = ResNet50(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(1)(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# VGG16 Model for transfer Learning 
Example 10
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_ffd.py    MIT License 6 votes vote down vote up
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
        model = ResNet50(weights='imagenet',include_top=False)
        x = model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        out = Dense(5,activation='softmax')(x)
        model_final = Model(input = model.input,outputs=out)
        if full_freeze != 'N':
            for layer in model.layers[0:freeze_layers]:
                layer.trainable = False
        return model_final

# VGG16 Model for transfer Learning 
Example 11
Project: keras-transfer-learning   Author: hequn   File: resnet50.py    MIT License 6 votes vote down vote up
def _create(self):
        base_model = KerasResNet50(include_top=False, input_tensor=self.get_input_tensor())
        self.make_net_layers_non_trainable(base_model)

        x = base_model.output
        x = Flatten()(x)
        x = Dropout(0.5)(x)
        # we could achieve almost the same accuracy without this layer, buy this one helps later
        # for novelty detection part and brings much more useful features.
        x = Dense(self.noveltyDetectionLayerSize, activation='elu', name=self.noveltyDetectionLayerName)(x)
        x = Dropout(0.5)(x)

        x = Dense(1024, activation='elu', name='fc_h')(x)
        x = Dropout(0.5)(x)

        predictions = Dense(len(config.classes), activation='softmax', name='predictions')(x)

        self.model = Model(input=base_model.input, output=predictions) 
Example 12
Project: Hybrid_Task_Cascade   Author: qixuxiang   File: classfy_model.py    Apache License 2.0 6 votes vote down vote up
def build_finetune_model(base_model, dropout, fc_layers, num_classes):
    base_model = ResNet50(weights='imagenet', 
                      include_top=False, input_shape=(HEIGHT, WIDTH, 3))
    for layer in base_model.layers:
        layer.trainable = False

    x = base_model.output
    x = Flatten()(x)
    for fc in fc_layers:
        x = Dense(fc, activation='relu')(x) # New FC layer, random init
        x = Dropout(dropout)(x)
    # New softmax layer
    predictions = Dense(num_classes, activation='softmax')(x) 
    
    finetune_model = Model(inputs=base_model.input, outputs=predictions)

    return finetune_model 
Example 13
Project: maskrcnn   Author: shtamura   File: frcnn.py    MIT License 6 votes vote down vote up
def _model_backbone_headless(self):
        if self.config.backbone_nn_type == 'vgg':
            model = VGG16(weights='imagenet', include_top=False)
            # 畳み込み層の後のプーリング層を除く
            # https://github.com/keras-team/keras/issues/2371
            # https://github.com/keras-team/keras/issues/6229
            # http://forums.fast.ai/t/how-to-finetune-with-new-keras-api/2328/9
            model.layers.pop()
        else:
            model = ResNet50(weights='imagenet', include_top=False)
        # VGGの重みは学習対象外
        for layer in model.layers:
            layer.trainable = False
        output = model.layers[-1].output
        _input = model.input
        return _input, output 
Example 14
Project: Invasive_Species_Monitoring   Author: sdhayalk   File: Transfer_ResNet_Keras.py    MIT License 6 votes vote down vote up
def ResNet50_model():
    input_tensor = Input(shape=(dimension, dimension, number_of_channels))
    model = ResNet50(input_tensor=input_tensor, weights='imagenet', include_top=True)
    model.layers.pop()
    model.outputs = [model.layers[-1].output]
    model.layers[-1].outbound_nodes = []
    x = Dense(number_of_classes, activation='softmax')(model.output)
    model = Model(model.input, x)

    # the first 24 layers are not trained
    for layer in model.layers[:24]:
        layer.trainable = False

    lrate = 0.001
    decay = 0.000001
    adam = Adam(lr=lrate, decay=decay)
    model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])

    print(model.summary())
    return model 
Example 15
Project: face_age_gender   Author: CVxTz   File: baseline_age.py    MIT License 6 votes vote down vote up
def get_model(n_classes=1):

    base_model = ResNet50(weights='imagenet', include_top=False)

    #for layer in base_model.layers:
    #    layer.trainable = False

    x = base_model.output
    x = GlobalMaxPooling2D()(x)
    x = Dropout(0.5)(x)
    x = Dense(100, activation="relu")(x)
    x = Dropout(0.5)(x)
    if n_classes == 1:
        x = Dense(n_classes, activation="sigmoid")(x)
    else:
        x = Dense(n_classes, activation="softmax")(x)

    base_model = Model(base_model.input, x, name="base_model")
    if n_classes == 1:
        base_model.compile(loss="binary_crossentropy", metrics=['acc'], optimizer="adam")
    else:
        base_model.compile(loss="sparse_categorical_crossentropy", metrics=['acc'], optimizer="adam")

    return base_model 
Example 16
Project: face_age_gender   Author: CVxTz   File: baseline_gender.py    MIT License 6 votes vote down vote up
def get_model(n_classes=1):

    base_model = ResNet50(weights='imagenet', include_top=False)

    #for layer in base_model.layers:
    #    layer.trainable = False

    x = base_model.output
    x = GlobalMaxPooling2D()(x)
    x = Dropout(0.5)(x)
    x = Dense(100, activation="relu")(x)
    x = Dropout(0.5)(x)
    if n_classes == 1:
        x = Dense(n_classes, activation="sigmoid")(x)
    else:
        x = Dense(n_classes, activation="softmax")(x)

    base_model = Model(base_model.input, x, name="base_model")
    if n_classes == 1:
        base_model.compile(loss="binary_crossentropy", metrics=['acc'], optimizer="adam")
    else:
        base_model.compile(loss="sparse_categorical_crossentropy", metrics=['acc'], optimizer="adam")

    return base_model 
Example 17
Project: ccyclegan   Author: gtesei   File: classifier.py    MIT License 6 votes vote down vote up
def build_discriminator2(self):

        #img = Input(shape=self.img_shape)
        
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(self.num_classes, 100)(label))
        
        #flat_img = Flatten()(img)   
        
        #model_input = multiply([flat_img, label_embedding])
        
        base_model  = ResNet50(weights= 'imagenet', include_top=False, input_shape= (48,48,3))
        
        # add a global spatial average pooling layer
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        
        #latent_vect = Flatten()(x)
        latent_concat = concatenate([x, label_embedding])
        # let's add a fully-connected layer
        f = Dense(1024, activation='relu')(latent_concat)
        # and a logistic layer -- let's say we have 200 classes
        predictions = Dense(1, activation='sigmoid')(f)
        
        return Model([label,base_model.input], predictions) 
Example 18
Project: ccyclegan   Author: gtesei   File: ccyclegan_t8.py    MIT License 6 votes vote down vote up
def build_discriminator2(self):

        #img = Input(shape=self.img_shape)
        
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(self.num_classes, 100)(label))
        
        #flat_img = Flatten()(img)   
        
        #model_input = multiply([flat_img, label_embedding])
        
        base_model  = ResNet50(weights= 'imagenet', include_top=False, input_shape= (48,48,3))
        
        # add a global spatial average pooling layer
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        
        #latent_vect = Flatten()(x)
        latent_concat = concatenate([x, label_embedding])
        # let's add a fully-connected layer
        f = Dense(1024, activation='relu')(latent_concat)
        # and a logistic layer -- let's say we have 200 classes
        predictions = Dense(1, activation='sigmoid')(f)
        
        return Model([label,base_model.input], predictions) 
Example 19
Project: ccyclegan   Author: gtesei   File: ccyclegan_t7.py    MIT License 6 votes vote down vote up
def build_discriminator2(self):

        #img = Input(shape=self.img_shape)
        
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(self.num_classes, 100)(label))
        
        #flat_img = Flatten()(img)   
        
        #model_input = multiply([flat_img, label_embedding])
        
        base_model  = ResNet50(weights= 'imagenet', include_top=False, input_shape= (48,48,3))
        
        # add a global spatial average pooling layer
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        
        #latent_vect = Flatten()(x)
        latent_concat = concatenate([x, label_embedding])
        # let's add a fully-connected layer
        f = Dense(1024, activation='relu')(latent_concat)
        # and a logistic layer -- let's say we have 200 classes
        predictions = Dense(1, activation='sigmoid')(f)
        
        return Model([label,base_model.input], predictions) 
Example 20
Project: ccyclegan   Author: gtesei   File: ccyclegan_t6.py    MIT License 6 votes vote down vote up
def build_discriminator2(self):

        #img = Input(shape=self.img_shape)
        
        label = Input(shape=(1,), dtype='int32')
        label_embedding = Flatten()(Embedding(self.num_classes, 100)(label))
        
        #flat_img = Flatten()(img)   
        
        #model_input = multiply([flat_img, label_embedding])
        
        base_model  = ResNet50(weights= 'imagenet', include_top=False, input_shape= (48,48,3))
        
        # add a global spatial average pooling layer
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        
        #latent_vect = Flatten()(x)
        latent_concat = concatenate([x, label_embedding])
        # let's add a fully-connected layer
        f = Dense(1024, activation='relu')(latent_concat)
        # and a logistic layer -- let's say we have 200 classes
        predictions = Dense(1, activation='sigmoid')(f)
        
        return Model([label,base_model.input], predictions) 
Example 21
Project: SmooFaceEngine   Author: wotchin   File: cnn_models.py    Apache License 2.0 6 votes vote down vote up
def ResNet50(input_shape, num_classes):
    # wrap ResNet50 from keras, because ResNet50 is so deep.
    from keras.applications.resnet50 import ResNet50
    input_tensor = Input(shape=input_shape, name="input")
    x = ResNet50(include_top=False,
                 weights=None,
                 input_tensor=input_tensor,
                 input_shape=None,
                 pooling="avg",
                 classes=num_classes)
    x = Dense(units=2048, name="feature")(x.output)
    return Model(inputs=input_tensor, outputs=x)


# implement ResNet's block.
# I implement two classes block:
# one is basic block, the other is bottleneck block. 
Example 22
Project: ml_idiot   Author: songjun54cm   File: ExtractImageFeat_keras.py    GNU General Public License v3.0 6 votes vote down vote up
def ext_img_feat(image_folder, batch_size):
    base_model = ResNet50(weights='imagenet')
    img_model = Model(input=base_model.input, output=base_model.get_layer('res5c').output)

    img_list = os.listdir(image_folder)
    all_img_feats = list()
    si = 0
    while si < len(img_list):
        batch_img = img_list[si:si+batch_size]
        si += batch_size
        imgs = []
        for imgf in batch_img:
            img_path = os.path.join(image_folder, imgf)
            img = image.load_img(img_path, target_size=(224, 224))
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            imgs.append(x)
        imgs = np.concatenate(imgs, axis=0)
        img_feats = img_model.predict(imgs)
        all_img_feats.append(img_feats)
        print('%d images extracted\r'%si), 
Example 23
Project: eye-in-the-sky   Author: manideep2510   File: pspnet.py    Apache License 2.0 5 votes vote down vote up
def resnet(x, input_shape):
    
    # Decreases the dimensions of the input image by a factor of 32
    x = ResNet50(include_top=False, weights=None, input_tensor=x, input_shape=(512,512,3)).output
    
    # Upsampling by 2
    x = UpSampling2D(size = (2,2))(x)
    ##x = BatchNormalization()(x)
    
    # Again Upsampling by 2 so that we get an output feature map of size 1/8th of the initial image
    x = UpSampling2D(size = (2,2))(x)
    ##res = BatchNormalization()(x)
    x = UpSampling2D(size = (2,2))(x)
    return x 
Example 24
Project: eye-in-the-sky   Author: manideep2510   File: pspnet.py    Apache License 2.0 5 votes vote down vote up
def PSPNet(n_classes = 3, input_shape = (128, 128, 4)):
    
    # Input to the model
    inputs = Input(input_shape)
    
    '''in_shape = inputs.shape
    out_shape = (in_shape[1], in_shape[2], 3)'''
    
    # Converting 4 channel input to a 3 channel map using Encoder-Decoder network 
    # to give it as a input to ResNet50 with pretrained weights
    res_input = encoder_decoder(inputs)            
    
    res_input_shape = K.int_shape(res_input)
    res_input_shape = (res_input_shape[1],res_input_shape[2],res_input_shape[3])
    
    # Passing the 3 channel map into ResNet50 followed by 2 upsampling layers 
    # to get a output of shape exactly 1/8th of the input map shape
    res = resnet(res_input, input_shape = res_input_shape)                        
    
    # Pyramid Pooling Module
    ppmodule_out = pyramid_pooling_module(res)                
    
    # Final Conv layers and output
    x = Conv2D(512, 3, activation = 'relu', padding='same')(ppmodule_out)
    x = BatchNormalization()(x)
    x = Dropout(0.5)(x)
    
    x = Conv2D(n_classes, 1)(x)
    #x = interpolation(x, shape = (input_shape[0], input_shape[1]))
    x = Lambda(interpolation, arguments={'shape': (input_shape[0], input_shape[1])})(x)
    out = Activation('softmax')(x)
    
    model = Model(inputs = inputs, outputs = out)
    
    adam = Adam(lr = 0.00001)
    
    model.compile(optimizer = adam, loss = 'categorical_crossentropy', metrics = ['accuracy'])
    
    model.summary()
    return model 
Example 25
Project: cyclegan-keras-art-attrs   Author: hollygrimm   File: cyclegan_attr_model.py    MIT License 5 votes vote down vote up
def build_perceptual_model(self, input_shape, trainable=False, pop=True):
        # import ResNet50 pretrained on imagenet
        model = ResNet50(include_top=False, weights='imagenet', input_shape=input_shape)
        if pop == True:
            model.layers.pop() # pop pooling layer
            model.layers.pop() # pop last activation layer

        for layer in model.layers:
            layer.trainable = trainable
        
        print('Resnet50 for Perceptual loss:')
        model.summary()
        return model 
Example 26
Project: spark-deep-learning   Author: databricks   File: keras_applications.py    Apache License 2.0 5 votes vote down vote up
def model(self, preprocessed, featurize):
        # Model provided by Keras. All cotributions by Keras are provided subject to the
        # MIT license located at https://github.com/fchollet/keras/blob/master/LICENSE
        # and subject to the below additional copyrights and licenses.
        #
        # The MIT License (MIT)
        #
        # Copyright (c) 2016 Shaoqing Ren
        #
        # Permission is hereby granted, free of charge, to any person obtaining a copy
        # of this software and associated documentation files (the "Software"), to deal
        # in the Software without restriction, including without limitation the rights
        # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
        # copies of the Software, and to permit persons to whom the Software is
        # furnished to do so, subject to the following conditions:
        #
        # The above copyright notice and this permission notice shall be included in all
        # copies or substantial portions of the Software.
        #
        # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
        # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
        # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
        # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
        # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
        # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
        # SOFTWARE.
        return resnet50.ResNet50(input_tensor=preprocessed, weights="imagenet",
                                 include_top=(not featurize)) 
Example 27
Project: spark-deep-learning   Author: databricks   File: keras_applications.py    Apache License 2.0 5 votes vote down vote up
def _testKerasModel(self, include_top):
        # New Keras model changed the sturecture of ResNet50, we need to add avg for to compare
        # the result. We need to change the DeepImageFeaturizer for the new Model definition in
        # Keras
        return resnet50.ResNet50(weights="imagenet", include_top=include_top, pooling='avg') 
Example 28
Project: kutils   Author: subpic   File: applications.py    MIT License 5 votes vote down vote up
def get_model_imagenet(net_name, input_shape=None, plot=False, **kwargs):
    """Get ImageNet models"""
    print 'Loading model', net_name if isinstance(net_name, str) else net_name.func_name

    if net_name == ResNet50:
        base_model = ResNet50(weights='imagenet', include_top=False,
                              input_shape=input_shape, **kwargs)
        feats = base_model.layers[-2]
    elif net_name == NASNetMobile:
        base_model = NASNetMobile(weights='imagenet',
                                  include_top=True,
                                  input_shape=input_shape, **kwargs)
        feats = base_model.layers[-3]
    elif net_name in source_module.keys():
        base_model = net_name(weights='imagenet', include_top=False,
                              input_shape=input_shape, **kwargs)
        feats = base_model.layers[-1]
    else:
        raise Exception('Unknown model ' + net_name.func_name)

    gap = GlobalAveragePooling2D(name="final_gap")(feats.output)
    model = Model(inputs=base_model.input, outputs=gap)

    if plot: plot_model(base_model, show_shapes=True,
                        to_file='plots/{}_model.png'.format(net_name.func_name))
    return model, process_input[net_name] 
Example 29
Project: videofeatures   Author: jonasrothfuss   File: CNNFeatures.py    MIT License 5 votes vote down vote up
def __init__(self):
    self.base_model = resnet50.ResNet50()
    self.model = Model(inputs=self.base_model.input, outputs=self.base_model.get_layer('avg_pool').output) 
Example 30
Project: head-detection-using-yolo   Author: pranoyr   File: backend.py    MIT License 5 votes vote down vote up
def __init__(self, input_size):
        resnet50 = ResNet50(input_shape=(input_size, input_size, 3), include_top=False)
        resnet50.layers.pop() # remove the average pooling layer
        #resnet50.load_weights(RESNET50_BACKEND_PATH)

        self.feature_extractor = Model(resnet50.layers[0].input, resnet50.layers[-1].output) 
Example 31
Project: fresh_eyes   Author: ksteinfe   File: serve_keras.py    MIT License 5 votes vote down vote up
def initialize(cfg):
    global MODL
    MODL = resnet50.ResNet50() # Load Keras' ResNet50 model that was pre-trained against the ImageNet database
    #print(output_features)
    console_msg = """#################### FRESH EYES ####################
I've just loaded the ResNet50 Keras Model pre-trained on the ImageNet database
To check that the server is working, go to http://localhost:{0}/
Invoke Cntl+C to stop the server
#################### FRESH EYES ####################
"""
    print(console_msg.format(cfg['port_num'])) 
Example 32
Project: lost   Author: l3p-cv   File: cluster_resnet.py    MIT License 5 votes vote down vote up
def main(self):
        self.logger.info('Will load keras model')
        model = ResNet50(weights='imagenet')
        self.logger.info('Keras model loaded')
        feature_list = []
        img_path_list = []
        # Request only MIA annotations for annotations of first stage
        # that have been annotated in current iteration cycle.
        img_annos = list(filter(lambda x: x.iteration == self.iteration, 
            self.inp.img_annos))
        total = len(img_annos)
        for index, img_anno in enumerate(img_annos):
            annos = img_anno.to_vec('anno.data')
            if annos:
                types = img_anno.to_vec('anno.dtype')
                img = skimage.io.imread(self.get_abs_path(img_anno.img_path))
                crops, anno_boxes = anno_helper.crop_boxes(annos, types, 
                    img, context=0.01)
                sim_classes = []
                for crop in crops:
                    # img = image.load_img(img_path, target_size=(224, 224))
                    crop_img = image.img_to_array(image.array_to_img(crop, scale=False).resize((224,224)))
                    x = keras_image.img_to_array(crop_img)
                    x = np.expand_dims(x, axis=0)
                    x = preprocess_input(x)
                    # extract features
                    scores = model.predict(x)
                    sim_classes.append(np.argmax(scores))
                self.outp.request_annos(img_anno.img_path, 
                    annos=annos, anno_types=types, anno_sim_classes=sim_classes)
                self.logger.info('Requested annotation for: {}\n{}\n{}'.format(img_anno.img_path, types, sim_classes))
                self.update_progress(index*100/total) 
Example 33
Project: lost   Author: l3p-cv   File: cluster_kmeans.py    MIT License 5 votes vote down vote up
def main(self):
        n_cluster = int(self.get_arg('n-clusters'))
        self.logger.info('Will load keras model')
        base_model = ResNet50(weights='imagenet')
        self.logger.info('Keras model loaded')
        layer_code = 'avg_pool'
        # base_model.summary()
        model = Model(inputs=base_model.input, outputs=base_model.get_layer(layer_code).output)
        feature_list = []
        img_path_list = []
        self.logger.info('Will compute CNN features')
        for raw_file in self.inp.raw_files:
            media_path = raw_file.path
            media_path = raw_file.path
            file_list = os.listdir(media_path)
            total = float(len(file_list))
            for index, img_file in enumerate(file_list):
                img_path = os.path.join(media_path, img_file)
                img_path_list.append(img_path)
                img = image.load_img(img_path, target_size=(224, 224))
                x = keras_image.img_to_array(img)
                x = np.expand_dims(x, axis=0)
                x = preprocess_input(x)
                # extract features
                features = model.predict(x)
                feature_list.append(features[0].flatten())
                self.update_progress(index*70/total)
        self.logger.info('Computed CNN feature!')
        self.logger.info('Start KMeans clustering')
        kmeans = KMeans(n_clusters=n_cluster, random_state=0).fit(feature_list)
        self.logger.info('Clustering completed!')
        counter = 0
        for sim_class, img_path in zip(kmeans.labels_, img_path_list):
            self.outp.request_annos(img_path, img_sim_class=sim_class)
            self.logger.info('Requested annotation for: {} (cluster: {})'.format(img_path, sim_class))
            counter += 1
            self.update_progress(70 + (counter*30/len(img_path_list))) 
Example 34
Project: dog-breed-web-classifier   Author: JacobPolloreno   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def extract_bottleneck_features_resnet(tensor):
    from keras.applications.resnet50 import ResNet50
    return ResNet50(weights='imagenet', include_top=False).predict(tensor) 
Example 35
Project: Aesthetic_attributes_maps   Author: gautamMalu   File: models.py    MIT License 5 votes vote down vote up
def model1(weights_path=None):
    '''
    Basic ResNet-FT for baseline comparisions.
    Creates a model by for all aesthetic attributes along
    with overall aesthetic score, by finetuning resnet50
    :param weights_path: path of the weight file
    :return: Keras model instance
    '''
    _input = Input(shape=(299, 299, 3))
    resnet = ResNet50(include_top=False, weights='imagenet', input_tensor=_input)

    last_layer_output = GlobalAveragePooling2D()(resnet.get_layer('activation_49').output)

    # output of model
    outputs = []
    attrs = ['BalacingElements', 'ColorHarmony', 'Content', 'DoF',
             'Light', 'MotionBlur', 'Object', 'RuleOfThirds', 'VividColor']
    for attribute in attrs:
        outputs.append(Dense(1, init='glorot_uniform', activation='tanh', name=attribute)(last_layer_output))

    non_negative_attrs = ['Repetition', 'Symmetry', 'score']
    for attribute in non_negative_attrs:
        outputs.append(Dense(1, init='glorot_uniform', activation='sigmoid', name=attribute)(last_layer_output))

    model = Model(input=_input, output=outputs)
    if weights_path:
        model.load_weights(weights_path)
    return model 
Example 36
Project: Luz-Vision   Author: general-labs   File: imageclassifier.py    GNU General Public License v3.0 5 votes vote down vote up
def classify_image(path):

    K.clear_session()
    classifier=ResNet50()
    #print(classifier.summary())
    new_image = image.load_img('temp/temp.png', target_size=(224, 224))
    transformed_image= image.img_to_array(new_image)
    #print(transformed_image.shape)
    transformed_image=np.expand_dims(transformed_image,axis=0)
    #print(transformed_image.shape)
    transformed_image=preprocess_input(transformed_image)
    #print(transformed_image)
    y_pred= classifier.predict(transformed_image)
    #print(y_pred)
    #print(y_pred.shape)

    decode_predictions(y_pred, top=5)
    label = decode_predictions(y_pred)
    # retrieve the most likely result, i.e. highest probability
    decoded_label = label[0][0]

    print("######===============########")
    # print the classification
    print('%s (%.2f%%)' % (decoded_label[1], decoded_label[2]*100 ))
    print("######===============########")

    # Destroy references
    del classifier, new_image, transformed_image, y_pred, label
    K.clear_session()
    return ({"Prediction": decoded_label[1], "confidence": decoded_label[2] * 100, "url": path}) 
Example 37
Project: catrank   Author: jmhessel   File: score_example.py    MIT License 5 votes vote down vote up
def get_image_feats(images):
    #Extract ResNet features from images
    print("Extracting image features from {} file(s)".format(len(images)))
    base_model = ResNet50(pooling='avg', include_top=False)
    gen = image_generator(images, 32)
    feats = base_model.predict_generator(gen, int(np.ceil(len(images) / 32)))
    feats = feats[:len(images),:]
    return feats 
Example 38
Project: Training-ValueNet   Author: lukasmyth96   File: classifier.py    MIT License 5 votes vote down vote up
def _build_feature_extractor(self):
        """
        Build keras model for feature extractor - using pre-trained mobilenet_v2 here because it is fast to train
        """

        image_shape = self.config.IMG_DIMS + (3,)
        resnet = ResNet50(weights='imagenet', include_top=False, pooling='avg', input_shape=image_shape)

        return resnet 
Example 39
Project: autowebcompat   Author: marco-c   File: network.py    Mozilla Public License 2.0 5 votes vote down vote up
def create_resnet50_network(input_shape, weights):
    base_model = ResNet50(input_shape=input_shape, weights=weights)
    return Model(inputs=base_model.input, outputs=base_model.get_layer('flatten_1').output) 
Example 40
Project: Image-AI   Author: general-labs   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def classify_image(path):
    with urllib.request.urlopen(path) as url:
        with open('temp.jpg', 'wb') as f:
            f.write(url.read())
    K.clear_session()
    classifier=ResNet50()
    #print(classifier.summary())
    new_image = image.load_img('temp.jpg', target_size=(224, 224))
    transformed_image= image.img_to_array(new_image)
    #print(transformed_image.shape)
    transformed_image=np.expand_dims(transformed_image,axis=0)
    #print(transformed_image.shape)
    transformed_image=preprocess_input(transformed_image)
    #print(transformed_image)
    y_pred= classifier.predict(transformed_image)
    #print(y_pred)
    #print(y_pred.shape)

    decode_predictions(y_pred, top=5)
    label = decode_predictions(y_pred)
    # retrieve the most likely result, i.e. highest probability
    decoded_label = label[0][0]

    print("######===============########")
    # print the classification
    print('%s (%.2f%%)' % (decoded_label[1], decoded_label[2]*100 ))
    print("######===============########")

    # Destroy references
    del classifier, new_image, transformed_image, y_pred, label
    K.clear_session()
    return ({"Prediction": decoded_label[1], "confidence": decoded_label[2] * 100, "url": path}) 
Example 41
Project: Image-AI   Author: general-labs   File: imageclassifier.py    GNU General Public License v3.0 5 votes vote down vote up
def classify_image(path):
    with urllib.request.urlopen(path) as url:
        with open('temp/temp.jpg', 'wb') as f:
            f.write(url.read())
    K.clear_session()
    classifier=ResNet50()
    #print(classifier.summary())
    new_image = image.load_img('temp/temp.jpg', target_size=(224, 224))
    transformed_image= image.img_to_array(new_image)
    #print(transformed_image.shape)
    transformed_image=np.expand_dims(transformed_image,axis=0)
    #print(transformed_image.shape)
    transformed_image=preprocess_input(transformed_image)
    #print(transformed_image)
    y_pred= classifier.predict(transformed_image)
    #print(y_pred)
    #print(y_pred.shape)

    decode_predictions(y_pred, top=5)
    label = decode_predictions(y_pred)
    # retrieve the most likely result, i.e. highest probability
    decoded_label = label[0][0]

    print("######===============########")
    # print the classification
    print('%s (%.2f%%)' % (decoded_label[1], decoded_label[2]*100 ))
    print("######===============########")

    # Destroy references
    del classifier, new_image, transformed_image, y_pred, label
    K.clear_session()
    return ({"Prediction": decoded_label[1], "confidence": decoded_label[2] * 100, "url": path}) 
Example 42
Project: resippy   Author: BeamIO-Inc   File: keras_model_training.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def load_resnet50_model(weights_path,      # type: str
                     include_top=True,  # type: bool
                     ):                 # type:(...) -> Model
    cnn_model = ResNet50(weights=weights_path, include_top=include_top)
    return cnn_model 
Example 43
Project: CarND-Transfer-Learning-Lab   Author: udacity   File: run_bottleneck.py    MIT License 5 votes vote down vote up
def create_model():
    input_tensor = Input(shape=(h, w, ch))
    if FLAGS.network == 'vgg':
        model = VGG16(input_tensor=input_tensor, include_top=False)
        x = model.output
        x = AveragePooling2D((7, 7))(x)
        model = Model(model.input, x)
    elif FLAGS.network == 'inception':
        model = InceptionV3(input_tensor=input_tensor, include_top=False)
        x = model.output
        x = AveragePooling2D((8, 8), strides=(8, 8))(x)
        model = Model(model.input, x)
    else:
        model = ResNet50(input_tensor=input_tensor, include_top=False)
    return model 
Example 44
Project: keras-transfer-learning-for-oxford102   Author: Arsey   File: resnet50.py    MIT License 5 votes vote down vote up
def _create(self):
        base_model = KerasResNet50(include_top=False, input_tensor=self.get_input_tensor())
        self.make_net_layers_non_trainable(base_model)

        x = base_model.output
        x = Flatten()(x)
        x = Dropout(0.5)(x)
        # we could achieve almost the same accuracy without this layer, buy this one helps later
        # for novelty detection part and brings much more useful features.
        x = Dense(self.noveltyDetectionLayerSize, activation='elu', name=self.noveltyDetectionLayerName)(x)
        x = Dropout(0.5)(x)
        predictions = Dense(len(config.classes), activation='softmax', name='predictions')(x)

        self.model = Model(input=base_model.input, output=predictions) 
Example 45
Project: maskrcnn   Author: shtamura   File: frcnn.py    MIT License 5 votes vote down vote up
def _model_backbone_plane(self):
        if self.config.backbone_nn_type == 'vgg':
            model = VGG16(weights='imagenet')
        else:
            model = ResNet50(weights='imagenet')
        return model 
Example 46
Project: Invasive_Species_Monitoring   Author: sdhayalk   File: Transfer_Testing_Ensemble.py    MIT License 5 votes vote down vote up
def ResNet50_model():
    input_tensor = Input(shape=(dimension, dimension, number_of_channels))
    model = ResNet50(input_tensor=input_tensor, weights='imagenet', include_top=True)
    model.layers.pop()
    model.outputs = [model.layers[-1].output]
    model.layers[-1].outbound_nodes = []
    x = Dense(number_of_classes, activation='softmax')(model.output)
    model = Model(model.input, x)

    # the first 24 layers are not trained
    for layer in model.layers[:24]:
        layer.trainable = False

    return model 
Example 47
Project: keras-onnx   Author: onnx   File: test_keras_applications.py    MIT License 5 votes vote down vote up
def test_ResNet50(self):
        from keras.applications.resnet50 import ResNet50
        model = ResNet50(include_top=True, weights='imagenet')
        res = run_image(model, self.model_files, img_path)
        self.assertTrue(*res) 
Example 48
Project: ICIAR2018   Author: alexander-rakhlin   File: models.py    MIT License 5 votes vote down vote up
def __init__(self, batch_size=32):
        self.model = ResNet50(include_top=False, weights='imagenet', pooling="avg")
        self.batch_size = batch_size
        self.data_format = K.image_data_format() 
Example 49
Project: ImageCluster   Author: TomHacker   File: model.py    Apache License 2.0 5 votes vote down vote up
def build_model(self):
        if self.model=='Xception' or self.model=='xception':
            return Xception(include_top=False,pooling='avg',weights='imagenet'),'Xception'
        elif self.model=='DenseNet' or self.model=='densenet':
            return DenseNet121(include_top=False,pooling='avg',weights='imagenet'),'DenseNet'
        elif self.model=='ResNet' or self.model=='resnet':
            return ResNet50(include_top=False,pooling='avg',weights='imagenet'),'ResNet'
        elif self.model=='VGG16' or self.model=='vgg16':
            return VGG16(include_top=False,pooling='avg',weights='imagenet'),'VGG16'
        return VGG19(include_top=False,pooling='avg',weights='imagenet'),'VGG19' 
Example 50
Project: Kaggle-Cdiscount-Image-Classification-Challenge   Author: petrosgk   File: models.py    MIT License 5 votes vote down vote up
def resnet50(self):
        base_model = ResNet50(include_top=False, weights='imagenet',
                              input_shape=self.input_shape)

        self.model.add(base_model)
        self.model.add(Flatten())
        self.model.add(Dense(self.classes, activation='softmax')) 
Example 51
Project: DrivenData-Identify-Fish-Challenge-2nd-Place-Solution   Author: ZFTurbo   File: a02_zoo.py    GNU General Public License v3.0 5 votes vote down vote up
def RESNET_50(classes_number):
    from keras.layers.core import Dense, Dropout, Flatten
    from keras.applications.resnet50 import ResNet50
    from keras.models import Model

    base_model = ResNet50(include_top=True, weights='imagenet')
    x = base_model.layers[-2].output
    del base_model.layers[-1:]
    x = Dense(classes_number, activation='sigmoid', name='predictions')(x)
    model = Model(input=base_model.input, output=x)

    return model


# Batch 40 OK 
Example 52
Project: keras-object-recognition   Author: ltrottier   File: models.py    MIT License 5 votes vote down vote up
def load_resnet50_imagenet(n_classes, weight_decay):
    base_model = resnet50.ResNet50(weights='imagenet', include_top=False)
    for layer in base_model.layers:
        layer.trainable = False
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(n_classes, W_regularizer=l2(weight_decay))(x)
    x = Activation('softmax')(x)

    return Model(input=base_model.input, output=x) 
Example 53
Project: ccyclegan   Author: gtesei   File: classifier2.py    MIT License 5 votes vote down vote up
def build_discriminator2(self):
        
        base_model  = ResNet50(weights= 'imagenet', include_top=False, input_shape= (48,48,3))
        
        # add a global spatial average pooling layer
        x = base_model.output
        latent_repr = GlobalAveragePooling2D()(x)
        
        # let's add a fully-connected layer
        f = Dense(1024, activation='relu')(latent_repr)
        predictions = Dense(self.num_classes, activation='softmax')(f)
        
        return Model(base_model.input, predictions) 
Example 54
Project: jamespy_py3   Author: jskDr   File: dl_resnet50.py    MIT License 5 votes vote down vote up
def __init__(self, input_shape, nb_classes, weights='imagenet'):
        base_model = ResNet50(weights=weights, include_top=False,
                              input_shape=input_shape)

        x = base_model.input
        h = base_model.output
        z_cl = h  # Saving for cl output monitoring.

        h = GlobalAveragePooling2D()(h)
        h = Dense(128, activation='relu')(h)
        h = Dropout(0.5)(h)
        z_fl = h  # Saving for fl output monitoring.

        y = Dense(nb_classes, activation='softmax', name='preds')(h)
        # y = Dense(4, activation='softmax')(h)

        for layer in base_model.layers:
            layer.trainable = False

        model = Model(x, y)
        model.compile(loss='categorical_crossentropy', 
                      optimizer='adadelta', metrics=['accuracy'])

        self.model = model
        self.cl_part = Model(x, z_cl)
        self.fl_part = Model(x, z_fl) 
Example 55
Project: dftlab-yolo-vehiclecounting   Author: departmentfortransport   File: backend.py    MIT License 5 votes vote down vote up
def __init__(self, input_size):
        resnet50 = ResNet50(input_shape=(input_size, input_size, 3), include_top=False)
        resnet50.layers.pop() # remove the average pooling layer
        #resnet50.load_weights(RESNET50_BACKEND_PATH)

        self.feature_extractor = Model(resnet50.layers[0].input, resnet50.layers[-1].output) 
Example 56
Project: PADify   Author: rodrigobressan   File: extract_features_resnet.py    MIT License 5 votes vote down vote up
def extract_features(path_frames, output_path):
    no_imgs = []  # No. of images

    images = len(glob.glob(os.path.join(path_frames, '*.jpg')))  # assuming the images are stored as 'jpg'
    no_imgs.append(images)
    num_samples = np.sum(no_imgs)  # total number of all samples

    # Compute the features
    width, height, channels = (224, 224, 3)
    X = np.zeros((num_samples, width, height, channels))
    cnt = 0
    list_paths = []  # List of image paths
    samples_names = []
    print("Processing images ...")
    for img_file in glob.glob(path_frames + '/*.jpg'):
        # print("[%d] Processing image: %s" % (cnt, img_file))
        list_paths.append(os.path.join(os.getcwd(), img_file))
        img = image.load_img(img_file, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        X[cnt] = x
        cnt += 1
        samples_names.append(img_file)

    print("Images processed: %d" % (cnt))

    # Creating base_model (ResNet50 notop)
    image_shape = (224, 224, 3)

    from keras import backend
    with backend.get_session().graph.as_default() as g:
        base_model = ResNet50(weights='imagenet', input_shape=image_shape, include_top=False)

        filename = os.path.join(output_path, 'features_resnet.npy')
        resnet50features = base_model.predict(X)

        np.save(filename, resnet50features)
    # print('featurs shape: ', resnet50features.shape) 
Example 57
Project: PADify   Author: rodrigobressan   File: classifier.py    MIT License 5 votes vote down vote up
def get_resnet():
    image_shape = (224, 224, 3)
    resnet = ResNet50(weights='imagenet', input_shape=image_shape, include_top=False)
    return [resnet, 'resnet'] 
Example 58
Project: PADify   Author: rodrigobressan   File: classifier_save.py    MIT License 5 votes vote down vote up
def get_resnet():
    image_shape = (224, 224, 3)
    return [ResNet50(weights='imagenet', input_shape=image_shape, include_top=False), 'resnet'] 
Example 59
Project: ResNetCAM-keras   Author: alexisbcook   File: ResNet_CAM.py    MIT License 5 votes vote down vote up
def get_ResNet():
    # define ResNet50 model
    model = ResNet50(weights='imagenet')
    # get AMP layer weights
    all_amp_layer_weights = model.layers[-1].get_weights()[0]
    # extract wanted output
    ResNet_model = Model(inputs=model.input, 
        outputs=(model.layers[-4].output, model.layers[-1].output)) 
    return ResNet_model, all_amp_layer_weights 
Example 60
Project: Classify-ResNetCAM-keras   Author: jiye-ML   File: ResNet_CAM.py    MIT License 5 votes vote down vote up
def get_ResNet():
    # define ResNet50 model
    model = ResNet50(weights='imagenet')
    # get AMP layer weights
    all_amp_layer_weights = model.layers[-1].get_weights()[0]
    # extract wanted output
    ResNet_model = Model(inputs=model.input, 
        outputs=(model.layers[-4].output, model.layers[-1].output)) 
    return ResNet_model, all_amp_layer_weights 
Example 61
Project: GaneratedHandsForReal_TIME   Author: Ninebell   File: regnet.py    MIT License 5 votes vote down vote up
def __init__(self, input_shape, heatmap_shape):
        self.min_loss = [10000.0, 10000., 100000., 100000., 100000., 100000., 100000.]
        self.heatmap_shape=input_shape
        input_layer = Input(input_shape)
        resnet = resnet50.ResNet50(input_tensor=input_layer, weights='imagenet', include_top=False)
        conv = RegNet.make_conv(resnet.output)
        flat = Flatten()(conv)
        fc_joints3d_1_before_proj = Dense(200, name='fc_joints3d_1_before_proj')(flat)
        joints3d_prediction_before_proj = Dense(63, name='joints3d_prediction_before_proj')(fc_joints3d_1_before_proj)
        reshape_joints3D_before_proj = Reshape((21,1,3), name='reshape_joints3D_before_proj')(joints3d_prediction_before_proj)
        temp = Reshape((21,3))(reshape_joints3D_before_proj)
        projLayer = ProjLayer(heatmap_shape)(temp)
        heatmaps_pred3D = RenderingLayer(heatmap_shape, coeff=1, name='heatmaps_pred3D')(projLayer)
        heatmaps_pred3D_reshape = ReshapeChannelToLast(heatmap_shape)(heatmaps_pred3D)

        conv_rendered_2 = Conv2D(filters=64, kernel_size=3, strides=2, padding='same', activation='relu')(heatmaps_pred3D_reshape)
        conv_rendered_3 = Conv2D(filters=128, kernel_size=3, strides=2, padding='same', activation='relu')(conv_rendered_2)
        concat_pred_rendered = concatenate([conv, conv_rendered_3])
        conv_rendered_4 = Conv2D(filters=256, kernel_size=3, strides=1, padding='same', activation='relu')(concat_pred_rendered)

        heatmap_prefinal_small = Conv2D(filters=64, kernel_size=3,strides=1,padding='same')(conv_rendered_4)
        heatmap_prefinal = Deconv2D(filters=21, kernel_size=4, strides=2, padding='same', name='heatmap_prefinal')(heatmap_prefinal_small)
        heatmap_final = Deconv2D(filters=21, kernel_size=4, strides=2, padding='same', name='heatmap_final')(heatmap_prefinal)

        flat = Flatten()(conv_rendered_4)
        fc_joints3D_1_final = Dense(200, name='fc_joints3D_1_final')(flat)
        joints3D_final = Dense(63, name='joints3D_prediction_final')(fc_joints3D_1_final)
        joints3D_final_vec = Reshape((21,1,3), name='joint3d_final')(joints3D_final)

        self.model = Model(inputs=input_layer, output=[reshape_joints3D_before_proj, joints3D_final_vec, heatmap_final])
        # self.model = Model(inputs=input_layer, output=projLayer)
        self.model.summary() 
Example 62
Project: RFMLS-NEU   Author: neu-spiral   File: ResNet50.py    MIT License 4 votes vote down vote up
def ResNet50(input_shape, output_shape, output_name='fc1000', weights=None, name='resnet50'):
    x_input = Input(shape=input_shape)
    
    # Extra padding layer
    min_size = 198
    if input_shape[0] < min_size:
        min_padding = (min_size - input_shape[0]) // 2
        x = ZeroPadding2D((min_padding, min_padding))(x_input)
    else:
        x = x_input
    
    x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1',
               kernel_initializer=glorot_uniform())(x)
    x = BatchNormalization(axis=3, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = convolutional_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = convolutional_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = convolutional_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = convolutional_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x = AveragePooling2D((7, 7), name='avg_pool')(x)

    x = Flatten()(x)
    x = Dense(output_shape, activation='softmax', name=output_name,
               kernel_initializer=glorot_uniform())(x)

    # Create model.
    model = Model(x_input, x, name='resnet50')

    # load weights 
    if weights:
        print ("Adding pre-trained weights from %s" % weights)
        model.load_weights(weights, by_name=True)

    return model 
Example 63
Project: deep-learning-keras   Author: arnaudvl   File: cnn_transfer_learning.py    MIT License 4 votes vote down vote up
def __init__(self,X,y,architecture,base_model,X_aux=None,trainable_layers=None,remove_from_layer=None,
                 output_layer='softmax',optimizer_type='adam',loss_function='categorical_crossentropy',
                 metrics=['accuracy'],learning_rate=0.001,learning_rate_decay=0.,learning_rate_factor=0.1,
                 num_epochs=10000,early_stopping_epochs=100,custom_eval_stopping={'name':'roc-auc','mode':'max','data':'val'},
                 nfolds=5,batch_size=64,pooling='max',runs=3,val_size=0.2,trainable_layers_tune=None,
                 remove_from_layer_tune=None,load_wgt=False,random_state=1,X_pred=None,X_aux_pred=None,
                 train_id=None,pred_id=None,scale_data=None,augment_data={},scale_pre_concat=False,print_out=True,
                 print_training=True,write_output=False,save_dir=None,model_name=None,target_col=['target'],id_col='id'):
        
        """
        Arguments:
            base_model -- str, model used for transfer learning
                               options: 'VGG16','VGG19','ResNet50','Xception','InceptionV3',
                                        'InceptionResNetV2','MobileNet'
            trainable_layers -- list, names of layers base model to retrain weights from, default=None
            remove_from_layer -- str, name of layer from which the base model's layers are removed, including 
                                      'remove_from_layer', eg 'block5_conv1' will remove the 'block5_conv1' until 
                                      'block5_pool' layers and add the pooling/flattening and fully connected layers 
                                      defined in the architecture on top, default=None
            trainable_layers_tune -- list, names of layers base model to retrain weights from during model tuning, 
                                           default=None
            remove_from_layer_tune -- list, names of layers from where base model's layers are removed during tuning, 
                                            default=None
            
            See class CNN for documentation of other arguments. 
        """
        
        CNN.__init__(self,X,y,architecture,X_aux=X_aux,output_layer=output_layer,optimizer_type=optimizer_type,
                     loss_function=loss_function,metrics=metrics,learning_rate=learning_rate,
                     learning_rate_decay=learning_rate_decay,learning_rate_factor=learning_rate_factor,
                     num_epochs=num_epochs,early_stopping_epochs=early_stopping_epochs,
                     custom_eval_stopping=custom_eval_stopping,nfolds=nfolds,batch_size=batch_size,pooling=pooling,
                     runs=runs,val_size=val_size,random_state=random_state,X_pred=X_pred,X_aux_pred=X_aux_pred,
                     train_id=train_id,pred_id=pred_id,scale_data=scale_data,augment_data={},
                     scale_pre_concat=scale_pre_concat,load_wgt=load_wgt,print_out=print_out,print_training=print_training,
                     write_output=write_output,save_dir=save_dir,model_name=model_name,target_col=target_col,id_col=id_col)
                     
        self.base_model = base_model
        self.trainable_layers = trainable_layers
        self.remove_from_layer = remove_from_layer
        self.trainable_layers_tune = trainable_layers_tune
        self.remove_from_layer_tune = remove_from_layer_tune
        
        if self._mode=='min':
            self._score_mult = -1
        else:
            self._score_mult = 1
        
        self._tune_score = None
        self._best_overall_score = False 
Example 64
Project: HA-Growing-Tree-CNN   Author: taveraantonio   File: fun_utils.py    MIT License 4 votes vote down vote up
def create_neural_network(model_name, img_l, img_h, max_output):
	
	#load ResNet if already exists 
	#download and save it if not
	try:
		res_net = load_model('ResNet50.h5')
	except:
		res_net = ResNet50(weights='imagenet', include_top=False, input_shape=(img_l, img_h, 3))
		res_net.compile(loss='categorical_crossentropy',
				optimizer=optimizers.Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),
				metrics=['acc'])
		res_net.save('ResNet50.h5')

	#res_net.summary()
	#freeze the first 45 layers of the ResNet except the last 5 
	for layer in res_net.layers[:-5]:
	    layer.trainable = False
	
	#build a classifier model composed of ResNet50 + a dense network
	#the sequential model is a linear stack of layers
	model = Sequential()
	#add the ResNet50 convolutional base model
	model.add(res_net)
	#add new layers
	#add droput layer, consists in randomly setting a fraction rate of input units to 0 at each update during training time, which helps prevent overfitting.
	model.add(Dropout(0.8))
	#add Flatten layer, to flatten the input 
	model.add(Flatten())
	#add another droput layer
	model.add(Dropout(0.8))
	#add a dense layer (fully connected layer) with 1024 neurons with ReLU as activation function
	model.add(Dense(1024, kernel_initializer='random_uniform', activation='relu'))
	model.add(Dropout(0.8))
	#add another dense layer with a maximum nuber = num of classes 
	model.add(Dense(max_output, kernel_initializer='random_uniform', name='before_softmax'))
	#add the softmax, not done in the definition before in order to take values before softmax in likelihood computation
	model.add(Activation('softmax'))
	
	#model.summary()
	model.save(model_name)
	
	return model



######################### CREATE SMALL NEURAL NETWORK #################################
# create a small neural network for debug purpouse, no use of ResNet here
# 
# receive in input the name of the model, the lenght and height of the image and the 
# maximum number of output for that model 
# returns the model
####################################################################################### 
Example 65
Project: satellite-imagery-change-detection   Author: soroushhashemifar   File: feat.py    GNU General Public License v3.0 4 votes vote down vote up
def extra_feat(img_path):
    #Using a RESNET50 as feature extractor
    base_model = ResNet50(weights='imagenet',include_top=False)
    img = image.load_img(img_path, target_size=RESIZE_SIZE)
    x_img = image.img_to_array(img)
    x = np.expand_dims(x_img, axis=0)
    x = preprocess_input(x)
    block1_pool_features=get_activations(base_model, 10, x)
    block2_pool_features=get_activations(base_model, 15, x)
    block3_pool_features=get_activations(base_model, 17, x)
    #block4_pool_features=get_activations(base_model, 20, x)
    block5_pool_features=get_activations(base_model, 22, x)
    #block6_pool_features=get_activations(base_model, 25, x)
    block7_pool_features=get_activations(base_model, 30, x)
    #block8_pool_features=get_activations(base_model, 35, x)
    block9_pool_features=get_activations(base_model, 37, x)
    #block10_pool_features=get_activations(base_model, 39, x)
    block11_pool_features=get_activations(base_model, 42, x)
    #block12_pool_features=get_activations(base_model, 45, x)
    block13_pool_features=get_activations(base_model, 46, x)
    block14_pool_features=get_activations(base_model, 47, x)
    block15_pool_features=get_activations(base_model, 50, x)
    #block16_pool_features=get_activations(base_model, 23, x)
    block17_pool_features=get_activations(base_model, 27, x)
    block18_pool_features=get_activations(base_model, 33, x)
    #block19_pool_features=get_activations(base_model, 38, x)
    block20_pool_features=get_activations(base_model, 43, x)
    block21_pool_features=get_activations(base_model, 49, x)

    x1 = tf.image.resize_images(block1_pool_features[0],RESIZE_SIZE)
    x2 = tf.image.resize_images(block2_pool_features[0],RESIZE_SIZE)
    x3 = tf.image.resize_images(block3_pool_features[0],RESIZE_SIZE)
    #x4 = tf.image.resize_images(block4_pool_features[0],RESIZE_SIZE)
    x5 = tf.image.resize_images(block5_pool_features[0],RESIZE_SIZE)
    #x6 = tf.image.resize_images(block6_pool_features[0],RESIZE_SIZE)
    x7 = tf.image.resize_images(block7_pool_features[0],RESIZE_SIZE)
    #x8 = tf.image.resize_images(block8_pool_features[0],RESIZE_SIZE)
    x9 = tf.image.resize_images(block9_pool_features[0],RESIZE_SIZE)
    #x10 = tf.image.resize_images(block10_pool_features[0],RESIZE_SIZE)
    x11 = tf.image.resize_images(block11_pool_features[0],RESIZE_SIZE)
    #x12 = tf.image.resize_images(block12_pool_features[0],RESIZE_SIZE)
    x13 = tf.image.resize_images(block13_pool_features[0],RESIZE_SIZE)
    x14 = tf.image.resize_images(block14_pool_features[0],RESIZE_SIZE)
    x15 = tf.image.resize_images(block15_pool_features[0],RESIZE_SIZE)
    #x16 = tf.image.resize_images(block16_pool_features[0],RESIZE_SIZE)
    x17 = tf.image.resize_images(block17_pool_features[0],RESIZE_SIZE)
    x18 = tf.image.resize_images(block18_pool_features[0],RESIZE_SIZE)
    #x19 = tf.image.resize_images(block19_pool_features[0],RESIZE_SIZE)
    x20 = tf.image.resize_images(block20_pool_features[0],RESIZE_SIZE)
    x21 = tf.image.resize_images(block21_pool_features[0],RESIZE_SIZE)

    F = tf.concat([x1,x2,x3,x5,x7,x9,x11,x13,x14,x15,x17,x18,x20,x21], 3)
    return F, x_img 
Example 66
Project: GILA   Author: RParedesPalacios   File: pretrainedmodel.py    MIT License 4 votes vote down vote up
def pretrained_model(args,num_classes):


    h=args.height
    w=args.width
    if (args.chan=="rgb"):
        shape=(h,w,3)
    else:
        shape=(h,w,1)

    print("Setting input shape for pretrained model to",shape)

    input_tensor=Input(shape=shape)

    if (args.model=="vgg16"):
        load_model = VGG16(input_tensor=input_tensor,weights='imagenet', include_top=False)
    elif (args.model=="vgg19"):
        load_model = VGG19(input_tensor=input_tensor,weights='imagenet', include_top=False)
    elif (args.model=="resnet50"):
        load_model = ResNet50(input_tensor=input_tensor,weights='imagenet', include_top=False)
    elif (args.model=="densenet121"):
        load_model = DenseNet121(input_tensor=input_tensor,weights='imagenet', include_top=False)
    elif (args.model=="densenet169"):
        load_model = DenseNet169(input_tensor=input_tensor,weights='imagenet', include_top=False)
    elif (args.model=="densenet201"):
        load_model = DenseNet201(input_tensor=input_tensor,weights='imagenet', include_top=False)
    elif (args.model=="inceptionv3"):
        load_model = InceptionV3(input_tensor=input_tensor,weights='imagenet', include_top=False)
    elif (args.model=="inceptionresnetv2"):
        load_model = InceptionResNetV2(input_tensor=input_tensor,weights='imagenet', include_top=False)
    elif (args.model=="mobilenet"):
        load_model = MobileNet(input_tensor=input_tensor,weights='imagenet', include_top=False)
    elif (args.model=="mobilenetv2"):
        load_model = MobileNetV2(input_tensor=input_tensor,weights='imagenet', include_top=False)


    x=load_model.output
    x=GlobalAveragePooling2D()(x)
    for i in range(args.predlayers):
        x=Dense(args.predsize)(x)
        x=BN()(x)
        x=Activation('relu')(x)

    predictions = Dense(num_classes, activation='softmax')(x)

    model=Model(inputs=load_model.input, outputs=predictions)


    return load_model,model 
Example 67
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_reg.py    MIT License 4 votes vote down vote up
def train_model(self,file_list,labels,n_fold=5,batch_size=16,epochs=40,dim=224,lr=1e-5,model='ResNet50'):
		model_save_dest = {}
		k = 0
		kf = KFold(n_splits=n_fold, random_state=0, shuffle=True)

		for train_index,test_index in kf.split(file_list):


			k += 1
			file_list = np.array(file_list)
			labels   = np.array(labels)
			train_files,train_labels  = file_list[train_index],labels[train_index]
			val_files,val_labels  = file_list[test_index],labels[test_index]
			
			if model == 'Resnet50':
				model_final = self.resnet_pseudo(dim=224,freeze_layers=10,full_freeze='N')
			
			if model == 'VGG16':
				model_final = self.VGG16_pseudo(dim=224,freeze_layers=10,full_freeze='N') 
			
			if model == 'InceptionV3':
				model_final = self.inception_pseudo(dim=224,freeze_layers=10,full_freeze='N')
				
			adam = optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
			model_final.compile(optimizer=adam, loss=["mse"],metrics=['mse'])
			reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.50,patience=3, min_lr=0.000001)
			early = EarlyStopping(monitor='val_loss', patience=10, mode='min', verbose=1)
			logger = CSVLogger('keras-5fold-run-01-v1-epochs_ib.log', separator=',', append=False)
			checkpoint = ModelCheckpoint(
								'kera1-5fold-run-01-v1-fold-' + str('%02d' % (k + 1)) + '-run-' + str('%02d' % (1 + 1)) + '.check',
								monitor='val_loss', mode='min',
								save_best_only=True,
								verbose=1) 
			callbacks = [reduce_lr,early,checkpoint,logger]
			train_gen = DataGenerator(train_files,train_labels,batch_size=32,n_classes=len(self.class_folders),dim=(self.dim,self.dim,3),shuffle=True)
			val_gen = DataGenerator(val_files,val_labels,batch_size=32,n_classes=len(self.class_folders),dim=(self.dim,self.dim,3),shuffle=True)
			model_final.fit_generator(train_gen,epochs=epochs,verbose=1,validation_data=(val_gen),callbacks=callbacks)
			model_name = 'kera1-5fold-run-01-v1-fold-' + str('%02d' % (k + 1)) + '-run-' + str('%02d' % (1 + 1)) + '.check'
			del model_final
			f = h5py.File(model_name, 'r+')
			del f['optimizer_weights']
			f.close()
			model_final = keras.models.load_model(model_name)
			model_name1 = self.outdir + str(model) + '___' + str(k) 
			model_final.save(model_name1)
			model_save_dest[k] = model_name1
				
		return model_save_dest

	# Hold out dataset validation function 
Example 68
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_ffd.py    MIT License 4 votes vote down vote up
def train_model(self,train_dir,val_dir,n_fold=5,batch_size=16,epochs=40,dim=224,lr=1e-5,model='ResNet50'):
        if model == 'Resnet50':
            model_final = self.resnet_pseudo(dim=224,freeze_layers=10,full_freeze='N')
        if model == 'VGG16':
            model_final = self.VGG16_pseudo(dim=224,freeze_layers=10,full_freeze='N') 
        if model == 'InceptionV3':
            model_final = self.inception_pseudo(dim=224,freeze_layers=10,full_freeze='N')
            
        train_file_names = glob.glob(f'{train_dir}/*/*')
        val_file_names = glob.glob(f'{val_dir}/*/*')
        train_steps_per_epoch = len(train_file_names)/float(batch_size)
        val_steps_per_epoch = len(val_file_names)/float(batch_size)
        train_datagen = ImageDataGenerator(horizontal_flip = True,vertical_flip = True,width_shift_range = 0.1,height_shift_range = 0.1,
                channel_shift_range=0,zoom_range = 0.2,rotation_range = 20,preprocessing_function=pre_process)
        val_datagen = ImageDataGenerator(preprocessing_function=pre_process)
        train_generator = train_datagen.flow_from_directory(train_dir,
        target_size=(dim,dim),
        batch_size=batch_size,
        class_mode='categorical')
        val_generator = val_datagen.flow_from_directory(val_dir,
        target_size=(dim,dim),
        batch_size=batch_size,
        class_mode='categorical')
        print(train_generator.class_indices)
        joblib.dump(train_generator.class_indices,f'{self.outdir}/class_indices.pkl')
        adam = optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        model_final.compile(optimizer=adam, loss=["categorical_crossentropy"],metrics=['accuracy'])
        reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.50,patience=3, min_lr=0.000001)
        early = EarlyStopping(monitor='val_loss', patience=10, mode='min', verbose=1)
        logger = CSVLogger(f'{self.outdir}/keras-epochs_ib.log', separator=',', append=False)
        model_name = f'{self.outdir}/keras_transfer_learning-run.check'
        checkpoint = ModelCheckpoint(
                model_name,
                monitor='val_loss', mode='min',
                save_best_only=True,
                verbose=1) 
        callbacks = [reduce_lr,early,checkpoint,logger]
        model_final.fit_generator(train_generator,steps_per_epoch=train_steps_per_epoch,epochs=epochs,verbose=1,validation_data=(val_generator),validation_steps=val_steps_per_epoch,callbacks=callbacks,
                                                                                                                  class_weight={0:0.012,1:0.12,2:0.058,3:0.36,4:0.43})
        #model_final.fit_generator(train_generator,steps_per_epoch=1,epochs=epochs,verbose=1,validation_data=(val_generator),validation_steps=1,callbacks=callbacks)
        
        del model_final
        f = h5py.File(model_name, 'r+')
        del f['optimizer_weights']
        f.close()
        model_final = keras.models.load_model(model_name)
        model_to_store_path = f'{self.outdir}/{model}' 
        model_final.save(model_to_store_path)
        return model_to_store_path,train_generator.class_indices

# Hold out dataset validation function 
Example 69
Project: Aesthetic_attributes_maps   Author: gautamMalu   File: models.py    MIT License 4 votes vote down vote up
def model2(weights_path=None):
    '''
    Creates a model by concatenating the features from lower layers
    with high level convolution features for all aesthetic attributes along
    with overall aesthetic score
    :param weights_path: path of the weight file
    :return: Keras model instance
    This is the model used in the paper
    '''
    _input = Input(shape=(299, 299, 3))
    resnet = ResNet50(include_top=False, weights='imagenet', input_tensor=_input)
    activation_layers = []
    layers = resnet.layers
    for layer in layers:
        #  print layer.name, layer.input_shape, layer.output_shape
        if 'activation' in layer.name:
            activation_layers.append(layer)

    activations = 0
    activation_plus_squared_outputs = []
    # Remove last activation layer so
    # it can be used with spatial pooling layer if required
    nlayers = len(activation_layers) - 1
    for i in range(1, nlayers):
        layer = activation_layers[i]
        if layer.output_shape[-1] > activation_layers[i - 1].output_shape[-1]:
            #         print layer.name, layer.input_shape, layer.output_shape
            activations += layer.output_shape[-1]
            _out = Lambda(squared_root_normalization,
                          output_shape=squared_root_normalization_output_shape, name=layer.name + '_normalized')(layer.output)
            activation_plus_squared_outputs.append(_out)

            #  print "sum of all activations should be {}".format(activations)

    last_layer_output = GlobalAveragePooling2D()(activation_layers[-1].output)

   # last_layer_output = Lambda(K.sqrt, output_shape=squared_root_normalization_output_shape)(last_layer_output)
    last_layer_output = Lambda(l2_normalize, output_shape=l2_normalize_output_shape,
                               name=activation_layers[-1].name+'_normalized')(last_layer_output)

    activation_plus_squared_outputs.append(last_layer_output)

    merged = merge(activation_plus_squared_outputs, mode='concat', concat_axis=1)
    merged = Lambda(l2_normalize, output_shape=l2_normalize_output_shape, name='merge')(merged)

    # output of model
    outputs = []
    attrs = ['BalacingElements', 'ColorHarmony', 'Content', 'DoF',
             'Light', 'MotionBlur', 'Object', 'RuleOfThirds', 'VividColor']
    for attribute in attrs:

        outputs.append(Dense(1, init='glorot_uniform', activation='tanh', name=attribute)(merged))

    non_negative_attrs = ['Repetition', 'Symmetry', 'score']
    for attribute in non_negative_attrs:
        outputs.append(Dense(1, init='glorot_uniform', activation='sigmoid', name=attribute)(merged))

    model = Model(input=_input, output=outputs)
    if weights_path:
        model.load_weights(weights_path)
    return model 
Example 70
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: __init__.py    Apache License 2.0 4 votes vote down vote up
def build_encoder(input_shape,
                  encoder_name,
                  encoder_weights=None,
                  weight_decay=1e-4,
                  kernel_initializer="he_normal",
                  bn_epsilon=1e-3,
                  bn_momentum=0.99):
    """ the main api to build a encoder.
    :param input_shape: tuple, i.e., (height, width. channel).
    :param encoder_name: string, name of the encoder, refer to 'scope_table' above.
    :param encoder_weights: string, path of the weight, default None.
    :param weight_decay: float, default 1e-4.
    :param kernel_initializer: string, default "he_normal".
    :param bn_epsilon: float, default 1e-3.
    :param bn_momentum: float, default 0.99.

    :return: a Keras Model instance.
    """
    encoder_name = encoder_name.lower()

    if encoder_name == "resnet_v1_50":
        encoder = ResNet50(input_shape=input_shape, weights=encoder_weights, include_top=False)

    elif encoder_name=="resnet_v2_50":
        encoder = resnet_v2_50(input_shape, weight_decay=weight_decay, kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    elif encoder_name=="resnet_v2_101":
        encoder = resnet_v2_101(input_shape, weight_decay=weight_decay, kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    elif encoder_name=="resnet_v2_152":
        encoder = resnet_v2_152(input_shape, weight_decay=weight_decay, kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    elif encoder_name=="resnet_v2_200":
        encoder = resnet_v2_200(input_shape, weight_decay=weight_decay, kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)

    elif encoder_name=="resnet_v2_50_separable":
        encoder = resnet_v2_50_separable(input_shape, kernel_size=5, weight_decay=weight_decay, kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    elif encoder_name=="resnet_v2_101_separable":
        encoder = resnet_v2_101_separable(input_shape, kernel_size=5, weight_decay=weight_decay, kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    elif encoder_name=="resnet_v2_152_separable":
        encoder = resnet_v2_152_separable(input_shape, kernel_size=5, weight_decay=weight_decay, kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    elif encoder_name=="resnet_v2_200_separable":
        encoder = resnet_v2_200_separable(input_shape, kernel_size=5, weight_decay=weight_decay, kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)

    elif encoder_name == "xception_41":
        encoder = xception_41(input_shape, weight_decay=weight_decay, kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)

    elif encoder_name=="vgg_16":
        encoder = vgg_16(input_shape, weight_decay=weight_decay, kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    elif encoder_name=="vgg_19":
        encoder = vgg_19(input_shape, weight_decay=weight_decay, kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
    else:
        raise ValueError("Invalid encoder name: {}."
                         "Supported encoder names: 'resnet_v1_50', 'resnet_v2_50', 'resnet_v2_101', 'resnet_v2_152', 'resnet_v2_200', "
                         "'resnet_v2_50_separable', 'resnet_v2_101_separable', 'resnet_v2_152_separable', 'resnet_v2_200_separable', "
                         "'xception_41', 'vgg_16', 'vgg_19'".format(encoder_name))

    if encoder_weights is not None and os.path.exists(encoder_weights):
        encoder.load_weights(encoder_weights)

    return encoder 
Example 71
Project: Lane-Detection-GANs   Author: mvirgo   File: arch-4a.py    MIT License 4 votes vote down vote up
def get_model(input_shape, final_activation):

	# Set pooling size
	pool_size = (2, 2)

	# Using ResNet with ImageNet pre-trained weights
	resnet = ResNet50(weights='imagenet')

	# Get rid of final three layers - average pooling, flatten, FC
	for i in range(3):
		resnet.layers.pop()

	# Grab input and output in order to make a new model
	inp = resnet.input
	out = resnet.layers[-1].output

	# Output above should be 7x7
	# Use zero padding to get up to a 10x10 like previous architectures
	x = ZeroPadding2D(padding=((1, 2),(1, 2)))(out)

	# Upsample 1
	x = UpSampling2D(size=(2,2))(x)

	# Deconv 1
	x = Conv2DTranspose(128, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv1')(x)
	x = Dropout(0.2)(x)

	# Deconv 2
	x = Conv2DTranspose(128, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv2')(x)
	x = Dropout(0.2)(x)

	# Upsample 2
	x = UpSampling2D(size=pool_size)(x)

	# Deconv 3
	x = Conv2DTranspose(64, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv3')(x)
	x = Dropout(0.2)(x)

	# Deconv 4
	x = Conv2DTranspose(64, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv4')(x)
	x = Dropout(0.2)(x)

	# Deconv 5
	x = Conv2DTranspose(32, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv5')(x)
	x = Dropout(0.2)(x)

	# Upsample 3
	x = UpSampling2D(size=pool_size)(x)

	# Deconv 6
	x = Conv2DTranspose(32, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv6')(x)

	# Final layer - only including one channel so 1 filter
	predictions = Conv2DTranspose(1, (3, 3), padding='valid', strides=(1,1), activation = final_activation, name = 'Final')(x)

	# Create model
	model = Model(inputs=inp, outputs=predictions)

	return model 
Example 72
Project: deep-learning-explorer   Author: waspinator   File: weights.py    Apache License 2.0 4 votes vote down vote up
def transfer_FCN_ResNet50(transfered_weights_path):

    if os.path.isfile(transfered_weights_path):
        print('Already transformed')
        return

    input_shape = (224, 224, 3)
    img_input = KL.Input(shape=input_shape)
    bn_axis = 3

    x = KL.Conv2D(64, (7, 7), strides=(2, 2),
                  padding='same', name='conv1')(img_input)
    x = KL.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = KL.Activation('relu')(x)
    x = KL.MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(3, [64, 64, 256], stage=2, block='a', strides=(1, 1))(x)
    x = identity_block(3, [64, 64, 256], stage=2, block='b')(x)
    x = identity_block(3, [64, 64, 256], stage=2, block='c')(x)

    x = conv_block(3, [128, 128, 512], stage=3, block='a')(x)
    x = identity_block(3, [128, 128, 512], stage=3, block='b')(x)
    x = identity_block(3, [128, 128, 512], stage=3, block='c')(x)
    x = identity_block(3, [128, 128, 512], stage=3, block='d')(x)

    x = conv_block(3, [256, 256, 1024], stage=4, block='a')(x)
    x = identity_block(3, [256, 256, 1024], stage=4, block='b')(x)
    x = identity_block(3, [256, 256, 1024], stage=4, block='c')(x)
    x = identity_block(3, [256, 256, 1024], stage=4, block='d')(x)
    x = identity_block(3, [256, 256, 1024], stage=4, block='e')(x)
    x = identity_block(3, [256, 256, 1024], stage=4, block='f')(x)

    x = conv_block(3, [512, 512, 2048], stage=5, block='a')(x)
    x = identity_block(3, [512, 512, 2048], stage=5, block='b')(x)
    x = identity_block(3, [512, 512, 2048], stage=5, block='c')(x)

    x = KL.Conv2D(1000, (1, 1), activation='linear', name='fc1000')(x)

    # Create model
    model = KM.Model(img_input, x)

    flattened_layers = model.layers
    index = {}
    for layer in flattened_layers:
        if layer.name:
            index[layer.name] = layer
    resnet50 = ResNet50()
    for layer in resnet50.layers:
        weights = layer.get_weights()
        if layer.name == 'fc1000':
            weights[0] = np.reshape(weights[0], (1, 1, 2048, 1000))
        if layer.name in index:
            index[layer.name].set_weights(weights)
    model.save_weights(transfered_weights_path)
    print('Successfully transformed') 
Example 73
Project: globus_project   Author: LutzFabio   File: cnn_globus.py    GNU General Public License v3.0 4 votes vote down vote up
def __init__(self, load=False):
        '''
        Initiate the class instance.
        '''

        # Load meta file.
        self.meta_data = pd.read_csv(self.meta_file)

        # Initiate the optimizer.
        if self.optimizer_str == 'Adam':
            self.optimizer = Adam(lr=self.learning_rate)

        elif self.optimizer_str == 'Adagrad':
            self.optimizer = Adagrad(lr=self.learning_rate)

        elif self.optimizer_str == 'Adadelta':
            self.optimizer = Adadelta(lr=self.learning_rate)

        else:
            raise ValueError('Optimizer not implemented yet!')

        # Initiate the CSV logger.
        self.csv_logger = CSVLogger(self.output_dir + 'training_{}.log'.format(
            time.strftime("%Y%m%d-%H%M%S")), separator=',', append=False)

        # Initiate the Checkpoint logger that saves the model after ever
        # fifth epoch.
        self.cp_logger = ModelCheckpoint(
            self.output_dir + 'model_' + self.model_type + '_{epoch:08d}.h5',
            save_weights_only=False, period=self.mod_saved_after_epochs)

        # Define whether to train or to load a model and evaluate it.
        if not load:
            # Define the model to train.
            if self.model_str == 'ResNet50':
                self.base_model = ResNet50(weights='imagenet',
                                           include_top=False,
                                           input_shape=self.model_input)

            else:
                raise ValueError('{} not implemented yet!'.format(
                    self.model_str))

        else:
            raise ValueError('No loading methodology implemented yet!') 
Example 74
Project: globus_project   Author: LutzFabio   File: cnn_globus.py    GNU General Public License v3.0 4 votes vote down vote up
def create_r50_model(self):
        '''
        Method that creates a new model based on the pre-trained ResNet50
        model (transfer learning)
        '''

        # Set all the layers in the original model to non-trainable.
        self.base_model.trainable = True

        # Check if something should NOT be re-trained in the original ResNet50
        # model.
        if self.num_layers_not_trained is not None:
            if self.num_layers_not_trained != 'all':
                for layer in self.base_model.layers[
                             :self.num_layers_not_trained]:
                    layer.trainable = False
            else:
                for layer in self.base_model.layers:
                    layer.trainable = False

        # Add an input as well as a global average pooling layer to the
        # original model.
        inp = Input(self.model_input)
        mod = self.base_model(inp)
        mod = GlobalAveragePooling2D()(mod)

        # Create the categorical branch.
        mod_cat = Dense(len(self.gen_pseudo_train_cat.class_indices),
                        activation=self.activation_mid_both,
                        name='category_dense')(mod)
        mod_cat = Dropout(0.5)(mod_cat)
        mod_cat = Dense(len(self.gen_pseudo_train_cat.class_indices),
                        activation=self.activation_last_cat,
                        name='category')(mod_cat)

        # Create the feature branch.
        mod_feat = Dense(len(self.gen_pseudo_train_feat.class_indices),
                         activation=self.activation_mid_both,
                         name='feature_dense')(mod)
        mod_feat = Dropout(0.5)(mod_feat)
        mod_feat = Dense(len(self.gen_pseudo_train_feat.class_indices),
                         activation=self.activation_last_feat,
                         name='feature')(mod_feat)

        # Combine both branches to one model.
        self.model = Model(inputs=inp,
                           outputs=[mod_cat, mod_feat])

        return 
Example 75
Project: EAST   Author: kurapan   File: model.py    GNU General Public License v3.0 4 votes vote down vote up
def __init__(self, input_size=512):
        input_image = Input(shape=(None, None, 3), name='input_image')
        overly_small_text_region_training_mask = Input(shape=(None, None, 1), name='overly_small_text_region_training_mask')
        text_region_boundary_training_mask = Input(shape=(None, None, 1), name='text_region_boundary_training_mask')
        target_score_map = Input(shape=(None, None, 1), name='target_score_map')
        resnet = ResNet50(input_tensor=input_image, weights='imagenet', include_top=False, pooling=None)
        x = resnet.get_layer('activation_49').output

        x = Lambda(resize_bilinear, name='resize_1')(x)
        x = concatenate([x, resnet.get_layer('activation_40').output], axis=3)
        x = Conv2D(128, (1, 1), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
        x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
        x = Activation('relu')(x)
        x = Conv2D(128, (3, 3), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
        x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
        x = Activation('relu')(x)

        x = Lambda(resize_bilinear, name='resize_2')(x)
        x = concatenate([x, resnet.get_layer('activation_22').output], axis=3)
        x = Conv2D(64, (1, 1), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
        x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
        x = Activation('relu')(x)
        x = Conv2D(64, (3, 3), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
        x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
        x = Activation('relu')(x)

        x = Lambda(resize_bilinear, name='resize_3')(x)
        x = concatenate([x, ZeroPadding2D(((1, 0),(1, 0)))(resnet.get_layer('activation_10').output)], axis=3)
        x = Conv2D(32, (1, 1), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
        x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
        x = Activation('relu')(x)
        x = Conv2D(32, (3, 3), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
        x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
        x = Activation('relu')(x)

        x = Conv2D(32, (3, 3), padding='same', kernel_regularizer=regularizers.l2(1e-5))(x)
        x = BatchNormalization(momentum=0.997, epsilon=1e-5, scale=True)(x)
        x = Activation('relu')(x)

        pred_score_map = Conv2D(1, (1, 1), activation=tf.nn.sigmoid, name='pred_score_map')(x)
        rbox_geo_map = Conv2D(4, (1, 1), activation=tf.nn.sigmoid, name='rbox_geo_map')(x) 
        rbox_geo_map = Lambda(lambda x: x * input_size)(rbox_geo_map)
        angle_map = Conv2D(1, (1, 1), activation=tf.nn.sigmoid, name='rbox_angle_map')(x)
        angle_map = Lambda(lambda x: (x - 0.5) * np.pi / 2)(angle_map)
        pred_geo_map = concatenate([rbox_geo_map, angle_map], axis=3, name='pred_geo_map')

        model = Model(inputs=[input_image, overly_small_text_region_training_mask, text_region_boundary_training_mask, target_score_map], outputs=[pred_score_map, pred_geo_map])

        self.model = model
        self.input_image = input_image
        self.overly_small_text_region_training_mask = overly_small_text_region_training_mask
        self.text_region_boundary_training_mask = text_region_boundary_training_mask
        self.target_score_map = target_score_map
        self.pred_score_map = pred_score_map
        self.pred_geo_map = pred_geo_map