Python keras.layers() Examples

The following are 30 code examples for showing how to use keras.layers(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras , or try the search function .

Example 1
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    License: Apache License 2.0 6 votes vote down vote up
def weather_ae(layers, lr, decay, loss, 
               input_len, input_features):
    
    inputs = Input(shape=(input_len, input_features), name='input_layer')
    
    for i, hidden_nums in enumerate(layers):
        if i==0:
            hn = Dense(hidden_nums, activation='relu')(inputs)
        else:
            hn = Dense(hidden_nums, activation='relu')(hn)

    outputs = Dense(3, activation='sigmoid', name='output_layer')(hn)

    weather_model = Model(inputs, outputs=[outputs])

    return weather_model 
Example 2
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    License: Apache License 2.0 6 votes vote down vote up
def build_graph(self):
        #keras.backend.clear_session() # clear session/graph    
        self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)

        self.model = Seq2Seq_MVE_subnets_swish(id_embd=True, time_embd=True,
            lr=self.lr, decay=self.decay,
            num_input_features=self.num_input_features, num_output_features=self.num_output_features,
            num_decoder_features=self.num_decoder_features, layers=self.layers,
            loss=self.loss, regulariser=self.regulariser)

        def _mve_loss(y_true, y_pred):
            pred_u = crop(2,0,3)(y_pred)
            pred_sig = crop(2,3,6)(y_pred)
            print(pred_sig)
            #exp_sig = tf.exp(pred_sig) # avoid pred_sig is too small such as zero    
            #precision = 1./exp_sig
            precision = 1./pred_sig
            #log_loss= 0.5*tf.log(exp_sig)+0.5*precision*((pred_u-y_true)**2)
            log_loss= 0.5*tf.log(pred_sig)+0.5*precision*((pred_u-y_true)**2)            
          
            log_loss=tf.reduce_mean(log_loss)
            return log_loss

        print(self.model.summary())
        self.model.compile(optimizer = self.optimizer, loss=_mve_loss) 
Example 3
Project: AIX360   Author: IBM   File: test_shap.py    License: Apache License 2.0 6 votes vote down vote up
def test_ShapGradientExplainer(self):

    #     model = VGG16(weights='imagenet', include_top=True)
    #     X, y = shap.datasets.imagenet50()
    #     to_explain = X[[39, 41]]
    #
    #     url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
    #     fname = shap.datasets.cache(url)
    #     with open(fname) as f:
    #         class_names = json.load(f)
    #
    #     def map2layer(x, layer):
    #         feed_dict = dict(zip([model.layers[0].input], [preprocess_input(x.copy())]))
    #         return K.get_session().run(model.layers[layer].input, feed_dict)
    #
    #     e = GradientExplainer((model.layers[7].input, model.layers[-1].output),
    #                           map2layer(preprocess_input(X.copy()), 7))
    #     shap_values, indexes = e.explain_instance(map2layer(to_explain, 7), ranked_outputs=2)
    #
          print("Skipped Shap GradientExplainer") 
Example 4
Project: rasa_wechat   Author: Rowl1ng   File: keras_policy.py    License: Apache License 2.0 6 votes vote down vote up
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.

        :param max_history_len: The maximum number of historical
                                turns used to decide on next action
        """
        from keras.layers import LSTM, Activation, Masking, Dense
        from keras.models import Sequential

        n_hidden = 32  # Neural Net and training params
        batch_shape = (None, max_history_len, num_features)
        # Build Model
        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_shape))
        model.add(LSTM(n_hidden, batch_input_shape=batch_shape))
        model.add(Dense(input_dim=n_hidden, units=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
Example 5
Project: SeqGAN   Author: tyo-yo   File: models.py    License: MIT License 6 votes vote down vote up
def Highway(x, num_layers=1, activation='relu', name_prefix=''):
    '''
    Layer wrapper function for Highway network
    # Arguments:
        x: tensor, shape = (B, input_size)
    # Optional Arguments:
        num_layers: int, dafault is 1, the number of Highway network layers
        activation: keras activation, default is 'relu'
        name_prefix: str, default is '', layer name prefix
    # Returns:
        out: tensor, shape = (B, input_size)
    '''
    input_size = K.int_shape(x)[1]
    for i in range(num_layers):
        gate_ratio_name = '{}Highway/Gate_ratio_{}'.format(name_prefix, i)
        fc_name = '{}Highway/FC_{}'.format(name_prefix, i)
        gate_name = '{}Highway/Gate_{}'.format(name_prefix, i)

        gate_ratio = Dense(input_size, activation='sigmoid', name=gate_ratio_name)(x)
        fc = Dense(input_size, activation=activation, name=fc_name)(x)
        x = Lambda(lambda args: args[0] * args[2] + args[1] * (1 - args[2]), name=gate_name)([fc, x, gate_ratio])
    return x 
Example 6
Project: neural_collaborative_filtering   Author: hexiangnan   File: MLP.py    License: Apache License 2.0 6 votes vote down vote up
def parse_args():
    parser = argparse.ArgumentParser(description="Run MLP.")
    parser.add_argument('--path', nargs='?', default='Data/',
                        help='Input data path.')
    parser.add_argument('--dataset', nargs='?', default='ml-1m',
                        help='Choose a dataset.')
    parser.add_argument('--epochs', type=int, default=100,
                        help='Number of epochs.')
    parser.add_argument('--batch_size', type=int, default=256,
                        help='Batch size.')
    parser.add_argument('--layers', nargs='?', default='[64,32,16,8]',
                        help="Size of each layer. Note that the first layer is the concatenation of user and item embeddings. So layers[0]/2 is the embedding size.")
    parser.add_argument('--reg_layers', nargs='?', default='[0,0,0,0]',
                        help="Regularization for each layer")
    parser.add_argument('--num_neg', type=int, default=4,
                        help='Number of negative instances to pair with a positive instance.')
    parser.add_argument('--lr', type=float, default=0.001,
                        help='Learning rate.')
    parser.add_argument('--learner', nargs='?', default='adam',
                        help='Specify an optimizer: adagrad, adam, rmsprop, sgd')
    parser.add_argument('--verbose', type=int, default=1,
                        help='Show performance per X iterations')
    parser.add_argument('--out', type=int, default=1,
                        help='Whether to save the trained model.')
    return parser.parse_args() 
Example 7
Project: neural_collaborative_filtering   Author: hexiangnan   File: NeuMF.py    License: Apache License 2.0 6 votes vote down vote up
def load_pretrain_model(model, gmf_model, mlp_model, num_layers):
    # MF embeddings
    gmf_user_embeddings = gmf_model.get_layer('user_embedding').get_weights()
    gmf_item_embeddings = gmf_model.get_layer('item_embedding').get_weights()
    model.get_layer('mf_embedding_user').set_weights(gmf_user_embeddings)
    model.get_layer('mf_embedding_item').set_weights(gmf_item_embeddings)
    
    # MLP embeddings
    mlp_user_embeddings = mlp_model.get_layer('user_embedding').get_weights()
    mlp_item_embeddings = mlp_model.get_layer('item_embedding').get_weights()
    model.get_layer('mlp_embedding_user').set_weights(mlp_user_embeddings)
    model.get_layer('mlp_embedding_item').set_weights(mlp_item_embeddings)
    
    # MLP layers
    for i in xrange(1, num_layers):
        mlp_layer_weights = mlp_model.get_layer('layer%d' %i).get_weights()
        model.get_layer('layer%d' %i).set_weights(mlp_layer_weights)
        
    # Prediction weights
    gmf_prediction = gmf_model.get_layer('prediction').get_weights()
    mlp_prediction = mlp_model.get_layer('prediction').get_weights()
    new_weights = np.concatenate((gmf_prediction[0], mlp_prediction[0]), axis=0)
    new_b = gmf_prediction[1] + mlp_prediction[1]
    model.get_layer('prediction').set_weights([0.5*new_weights, 0.5*new_b])    
    return model 
Example 8
Project: deep_complex_networks   Author: ChihebTrabelsi   File: __init__.py    License: MIT License 6 votes vote down vote up
def get_shallow_convnet(window_size=4096, channels=1, output_size=84):
    model = keras.models.Sequential()
    model.add(keras.layers.Conv1D(
        64, 512, strides=16, input_shape=(window_size, channels),
        activation='relu',
        kernel_initializer='glorot_normal'))
    model.add(keras.layers.MaxPooling1D(pool_size=4, strides=2))

    model.add(keras.layers.Flatten())
    model.add(keras.layers.Dense(2048, activation='relu',
                                 kernel_initializer='glorot_normal'))
    model.add(keras.layers.Dense(output_size, activation='sigmoid',
                                 bias_initializer=keras.initializers.Constant(value=-5)))
    model.compile(optimizer=keras.optimizers.Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model 
Example 9
Project: neural-fingerprinting   Author: StephanZheng   File: utils_keras.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _get_softmax_name(self):
        """
        Looks for the name of the softmax layer.
        :return: Softmax layer name
        """
        for i, layer in enumerate(self.model.layers):
            cfg = layer.get_config()
            if 'activation' in cfg and cfg['activation'] == 'softmax':
                return layer.name

        raise Exception("No softmax layers found") 
Example 10
Project: neural-fingerprinting   Author: StephanZheng   File: utils_keras.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_layer_names(self):
        """
        :return: Names of all the layers kept by Keras
        """
        layer_names = [x.name for x in self.model.layers]
        return layer_names 
Example 11
Project: neural-fingerprinting   Author: StephanZheng   File: utils_keras.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fprop(self, x):
        """
        Exposes all the layers of the model returned by get_layer_names.
        :param x: A symbolic representation of the network input
        :return: A dictionary mapping layer names to the symbolic
                 representation of their output.
        """
        from keras.models import Model as KerasModel

        if self.keras_model is None:
            # Get the input layer
            new_input = self.model.get_input_at(0)

            # Make a new model that returns each of the layers as output
            out_layers = [x_layer.output for x_layer in self.model.layers]
            self.keras_model = KerasModel(new_input, out_layers)

        # and get the outputs for that model on the input x
        outputs = self.keras_model(x)

        # Keras only returns a list for outputs of length >= 1, if the model
        # is only one layer, wrap a list
        if len(self.model.layers) == 1:
            outputs = [outputs]

        # compute the dict to return
        fprop_dict = dict(zip(self.get_layer_names(), outputs))

        return fprop_dict 
Example 12
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    License: Apache License 2.0 5 votes vote down vote up
def weather_conv1D(layers, lr, decay, loss, 
               input_len, input_features, 
               strides_len, kernel_size):
    
    inputs = Input(shape=(input_len, input_features), name='input_layer')
    for i, hidden_nums in enumerate(layers):
        if i==0:
            #inputs = BatchNormalization(name='BN_input')(inputs)
            hn = Conv1D(hidden_nums, kernel_size=kernel_size, strides=strides_len, 
                        data_format='channels_last', 
                        padding='same', activation='linear')(inputs)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
        elif i<len(layers)-1:
            hn = Conv1D(hidden_nums, kernel_size=kernel_size, strides=strides_len,
                        data_format='channels_last', 
                        padding='same',activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn) 
            hn = Activation('relu')(hn)
        else:
            hn = Conv1D(hidden_nums, kernel_size=kernel_size, strides=strides_len,
                        data_format='channels_last', 
                        padding='same',activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn) 

    outputs = Dense(80, activation='relu', name='dense_layer')(hn)
    outputs = Dense(3, activation='tanh', name='output_layer')(outputs)

    weather_model = Model(inputs, outputs=[outputs])

    return weather_model 
Example 13
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    License: Apache License 2.0 5 votes vote down vote up
def weather_fnn(layers, lr,
            decay, loss, seq_len, 
            input_features, output_features):
    
    ori_inputs = Input(shape=(seq_len, input_features), name='input_layer')
    #print(seq_len*input_features)
    conv_ = Conv1D(11, kernel_size=13, strides=1, 
                        data_format='channels_last', 
                        padding='valid', activation='linear')(ori_inputs)
    conv_ = BatchNormalization(name='BN_conv')(conv_)
    conv_ = Activation('relu')(conv_)
    conv_ = Conv1D(5, kernel_size=7, strides=1, 
                        data_format='channels_last', 
                        padding='valid', activation='linear')(conv_)
    conv_ = BatchNormalization(name='BN_conv2')(conv_)
    conv_ = Activation('relu')(conv_)

    inputs = Reshape((-1,))(conv_)

    for i, hidden_nums in enumerate(layers):
        if i==0:
            hn = Dense(hidden_nums, activation='linear')(inputs)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
        else:
            hn = Dense(hidden_nums, activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
            #hn = Dropout(0.1)(hn)
    #print(seq_len, output_features)
    #print(hn)
    outputs = Dense(seq_len*output_features, activation='sigmoid', name='output_layer')(hn) # 37*3
    outputs = Reshape((seq_len, output_features))(outputs)

    weather_fnn = Model(ori_inputs, outputs=[outputs])

    return weather_fnn 
Example 14
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: seq2seq_class.py    License: Apache License 2.0 5 votes vote down vote up
def build_graph(self):
        #keras.backend.clear_session() # clear session/graph    
        self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)

        self.model = Seq2Seq_MVE(id_embd=self.id_embd, time_embd=self.time_embd,
            lr=self.lr, decay=self.decay, 
            num_input_features=self.num_input_features, num_output_features=self.num_output_features,
            num_decoder_features=self.num_decoder_features, layers=self.layers,
            loss=self.loss, regulariser=self.regulariser, dropout_rate = self.dropout_rate)

        def loss_fn(y_true, y_pred):
            pred_u = crop(2,0,3)(y_pred) # mean of Gaussian distribution
            pred_sig = crop(2,3,6)(y_pred) # variance of Gaussian distribution
            if self.loss == 'mve':
                precision = 1./pred_sig
                log_loss= 0.5*tf.log(pred_sig)+0.5*precision*((pred_u-y_true)**2)                                 
                log_loss=tf.reduce_mean(log_loss)
                return log_loss
            elif self.loss == 'mse':
                mse_loss = tf.reduce_mean((pred_u-y_true)**2)
                return mse_loss
            elif self.loss == 'mae':
                mae_loss = tf.reduce_mean(tf.abs(y_true-pred_u))
                return mae_loss
            else:
                sys.exit("'Loss type wrong! They can only be mae, mse or mve'")
                
        print(self.model.summary())
        self.model.compile(optimizer = self.optimizer, loss=loss_fn) 
Example 15
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    License: Apache License 2.0 5 votes vote down vote up
def build_graph(self):
        keras.backend.clear_session() # clear session/graph

        self.model = weather_conv1D(self.layers, self.lr,
            self.decay, self.loss, self.input_len, 
            self.input_features, self.kernel_strides, self.kernel_size)

        print(self.model.summary()) 
Example 16
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, regulariser,lr, decay, loss, 
        layers, batch_size, seq_len, input_features, output_features):

        self.regulariser=regulariser
        self.layers=layers
        self.lr=lr
        self.decay=decay
        self.loss=loss
        self.seq_len=seq_len
        self.input_features=input_features
        self.output_features = output_features 
Example 17
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    License: Apache License 2.0 5 votes vote down vote up
def build_graph(self):
        keras.backend.clear_session() # clear session/graph

        self.model = weather_fnn(self.layers, self.lr,
            self.decay, self.loss, self.seq_len, 
            self.input_features, self.output_features)

        print(self.model.summary()) 
Example 18
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, num_input_features, num_output_features, num_decoder_features,
               input_sequence_length, target_sequence_length,
              num_steps_to_predict, regulariser = None,
              lr=0.001, decay=0, loss = "mse",
              layers=[35, 35]):
        
        self.num_input_features = num_input_features
        self.num_output_features = num_output_features
        self.num_decoder_features = num_decoder_features
        self.input_sequence_length = input_sequence_length
        self.target_sequence_length = target_sequence_length
        self.num_steps_to_predict = num_steps_to_predict
        self.regulariser = regulariser
        self.layers = layers
        self.lr = lr
        self.decay = decay
        self.loss = loss
        self.pred_result = None
        self.train_loss=[]

        self.target_list=['t2m','rh2m','w10m']

        self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
                         'rh2m':[0.0,100.0],
                         'w10m':[0.0, 30.0]}

        print('Initialized!') 
Example 19
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    License: Apache License 2.0 5 votes vote down vote up
def build_graph(self):
        keras.backend.clear_session() # clear session/graph
        self.model = RNN_builder(self.num_output_features, self.num_decoder_features,
                self.target_sequence_length,
              self.num_steps_to_predict, self.regulariser,
              self.lr, self.decay, self.loss, self.layers)

        print(self.model.summary()) 
Example 20
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, id_embd, time_embd, 
        num_input_features, num_output_features, num_decoder_features,
               input_sequence_length, target_sequence_length,
              num_steps_to_predict, regulariser = None,
              lr=0.001, decay=0, loss = "mse",
              layers=[35, 35], model_save_path='../models', 
              model_structure_name='seq2seq_model.json', model_weights_name='seq2seq_model_weights.h5'):
        
        super().__init__(num_input_features, num_output_features, num_decoder_features,
               input_sequence_length, target_sequence_length,
              num_steps_to_predict, regulariser = None,
              lr=lr, decay=decay, loss = loss,
              layers=layers)

        self.id_embd = id_embd
        self.time_embd = time_embd
        self.val_loss_list=[]
        self.train_loss_list=[]
        self.current_mean_val_loss = None
        self.early_stop_limit = 10 # with the unit of Iteration Display
        self.EARLY_STOP=False
        self.pred_var_result = []

        self.pi_dic={0.95:1.96, 0.9:1.645, 0.8:1.28, 0.68:1.}
        self.target_list=['t2m','rh2m','w10m']
        self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
                         'rh2m':[0.0,100.0],
                         'w10m':[0.0, 30.0]}
        self.obs_and_output_feature_index_map = {'t2m':0,'rh2m':1,'w10m':2}
        self.ruitu_feature_index_map = {'t2m':1,'rh2m':3,'w10m':4}
        self.model_save_path = model_save_path
        self.model_structure_name=model_structure_name
        self.model_weights_name=model_weights_name 
Example 21
Project: CapsNet   Author: l11x0m7   File: capsule.py    License: MIT License 5 votes vote down vote up
def call(self, inputs):
        output = self.conv1(inputs)
        output = layers.Reshape(target_shape=[-1, self.dim_capsule], name='primarycap_reshape')(output)
        return squash(output) 
Example 22
Project: CapsNet   Author: l11x0m7   File: capsule.py    License: MIT License 5 votes vote down vote up
def CapsuleNet(input_shape, n_class, num_routing):
    """
    The whole capsule network for MNIST recognition.
    """
    # (None, H, W, C)
    x = Input(input_shape)

    conv1 = Conv2D(filters=256, kernel_size=9, padding='valid', activation='relu', name='init_conv')(x)

    # (None, num_capsules, capsule_dim)
    prim_caps = PrimaryCapsules(filters=32, kernel_size=9, dim_capsule=8, padding='valid', strides=(2, 2))(conv1)
    # (None, n_class, dim_vector)
    digit_caps = DigiCaps(num_capsule=n_class, dim_capsule=16, 
            num_routing=num_routing, name='digitcaps')(prim_caps)

    # (None, n_class)
    pred = Length(name='out_caps')(digit_caps)

    # (None, n_class)
    y = Input(shape=(n_class, ))

    # (None, n_class * dim_vector)
    masked = Mask()([digit_caps, y])  

    x_recon = layers.Dense(512, activation='relu')(masked)
    x_recon = layers.Dense(1024, activation='relu')(x_recon)
    x_recon = layers.Dense(784, activation='sigmoid')(x_recon)
    x_recon = layers.Reshape(target_shape=[28, 28, 1], name='out_recon')(x_recon)

    # two-input-two-output keras Model
    return Model([x, y], [pred, x_recon]) 
Example 23
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def call(self, inputs, training=None):
        """
        Note about training values:
            None: Train BN layers. This is the normal mode
            False: Freeze BN layers. Good when batch size is small
            True: (don't use). Set layer in training mode even when making inferences
        """
        return super(self.__class__, self).call(inputs, training=training) 
Example 24
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def identity_block(input_tensor, kernel_size, filters, stage, block,
                   use_bias=True, train_bn=True):
    """The identity_block is the block that has no conv layer at shortcut
    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
        use_bias: Boolean. To use or not use a bias in conv layers.
        train_bn: Boolean. Train or freeze Batch Norm layers
    """
    nb_filter1, nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
                  use_bias=use_bias)(input_tensor)
    x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
                  name=conv_name_base + '2b', use_bias=use_bias)(x)
    x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
                  use_bias=use_bias)(x)
    x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)

    x = KL.Add()([x, input_tensor])
    x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
    return x 
Example 25
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def conv_block(input_tensor, kernel_size, filters, stage, block,
               strides=(2, 2), use_bias=True, train_bn=True):
    """conv_block is the block that has a conv layer at shortcut
    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
        use_bias: Boolean. To use or not use a bias in conv layers.
        train_bn: Boolean. Train or freeze Batch Norm layers
    Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
    And the shortcut should have subsample=(2,2) as well
    """
    nb_filter1, nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
                  name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
    x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
                  name=conv_name_base + '2b', use_bias=use_bias)(x)
    x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
                  '2c', use_bias=use_bias)(x)
    x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)

    shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
                         name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
    shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)

    x = KL.Add()([x, shortcut])
    x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
    return x 
Example 26
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
    """Build a ResNet graph.
        architecture: Can be resnet50 or resnet101
        stage5: Boolean. If False, stage5 of the network is not created
        train_bn: Boolean. Train or freeze Batch Norm layers
    """
    assert architecture in ["resnet50", "resnet101"]
    # Stage 1
    x = KL.ZeroPadding2D((3, 3))(input_image)
    x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
    x = BatchNorm(name='bn_conv1')(x, training=train_bn)
    x = KL.Activation('relu')(x)
    C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
    # Stage 2
    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
    C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
    # Stage 3
    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
    C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
    # Stage 4
    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
    block_count = {"resnet50": 5, "resnet101": 22}[architecture]
    for i in range(block_count):
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
    C4 = x
    # Stage 5
    if stage5:
        x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
        C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
    else:
        C5 = None
    return [C1, C2, C3, C4, C5]


############################################################
#  Proposal Layer
############################################################ 
Example 27
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
        """Sets model layers as trainable if their names match
        the given regular expression.
        """
        # Print message on the first call (but not on recursive calls)
        if verbose > 0 and keras_model is None:
            log("Selecting layers to train")

        keras_model = keras_model or self.keras_model

        # In multi-GPU training, we wrap the model. Get layers
        # of the inner model because they have the weights.
        layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
            else keras_model.layers

        for layer in layers:
            # Is the layer a model?
            if layer.__class__.__name__ == 'Model':
                print("In model: ", layer.name)
                self.set_trainable(
                    layer_regex, keras_model=layer, indent=indent + 4)
                continue

            if not layer.weights:
                continue
            # Is it trainable?
            trainable = bool(re.fullmatch(layer_regex, layer.name))
            # Update layer. If layer is a container, update inner layer.
            if layer.__class__.__name__ == 'TimeDistributed':
                layer.layer.trainable = trainable
            else:
                layer.trainable = trainable
            # Print trainable layer names
            if trainable and verbose > 0:
                log("{}{:20}   ({})".format(" " * indent, layer.name,
                                            layer.__class__.__name__)) 
Example 28
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def get_trainable_layers(self):
        """Returns a list of layers that have weights."""
        layers = []
        # Loop through all layers
        for l in self.keras_model.layers:
            # If layer is a wrapper, find inner trainable layer
            l = self.find_trainable_layer(l)
            # Include layer if it has weights
            if l.get_weights():
                layers.append(l)
        return layers 
Example 29
Project: Semantic-Segmentation   Author: bubbliiiing   File: resnet50.py    License: MIT License 5 votes vote down vote up
def identity_block(input_tensor, kernel_size, filters, stage, block):

    filters1, filters2, filters3 = filters
    
    if IMAGE_ORDERING == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    # 1x1压缩
    x = Conv2D(filters1, (1, 1) , data_format=IMAGE_ORDERING , name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)
    # 3x3提取特征
    x = Conv2D(filters2, kernel_size , data_format=IMAGE_ORDERING ,
               padding='same', name=conv_name_base + '2b')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)
    # 1x1扩张特征
    x = Conv2D(filters3 , (1, 1), data_format=IMAGE_ORDERING , name=conv_name_base + '2c')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
    # 残差网络
    x = layers.add([x, input_tensor])
    x = Activation('relu')(x)
    return x

# 与identity_block最大差距为,其可以减少wh,进行压缩 
Example 30
Project: Semantic-Segmentation   Author: bubbliiiing   File: resnet50.py    License: MIT License 5 votes vote down vote up
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):

    filters1, filters2, filters3 = filters
    
    if IMAGE_ORDERING == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    # 1x1压缩
    x = Conv2D(filters1, (1, 1) , data_format=IMAGE_ORDERING  , strides=strides,
               name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)
    # 3x3提取特征
    x = Conv2D(filters2, kernel_size , data_format=IMAGE_ORDERING  , padding='same',
               name=conv_name_base + '2b')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)
    # 1x1扩张特征
    x = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING  , name=conv_name_base + '2c')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
    # 1x1扩张特征
    shortcut = Conv2D(filters3, (1, 1) , data_format=IMAGE_ORDERING  , strides=strides,
                      name=conv_name_base + '1')(input_tensor)
    shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
    # add
    x = layers.add([x, shortcut])
    x = Activation('relu')(x)
    return x