Python keras.regularizers.l2() Examples
The following are 30
code examples of keras.regularizers.l2().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.regularizers
, or try the search function
.
Example #1
Source File: weather_model.py From Deep_Learning_Weather_Forecasting with Apache License 2.0 | 7 votes |
def weather_l2(hidden_nums=100,l2=0.01): input_img = Input(shape=(37,)) hn = Dense(hidden_nums, activation='relu')(input_img) hn = Dense(hidden_nums, activation='relu', kernel_regularizer=regularizers.l2(l2))(hn) out_u = Dense(37, activation='sigmoid', name='ae_part')(hn) out_sig = Dense(37, activation='linear', name='pred_part')(hn) out_both = concatenate([out_u, out_sig], axis=1, name = 'concatenate') #weather_model = Model(input_img, outputs=[out_ae, out_pred]) mve_model = Model(input_img, outputs=[out_both]) mve_model.compile(optimizer='adam', loss=mve_loss, loss_weights=[1.]) return mve_model
Example #2
Source File: plot_model_struct.py From speaker_recognition with Apache License 2.0 | 6 votes |
def construct_model(classe_nums): model = Sequential() model.add( Conv1D(filters=256, kernel_size=3, strides=1, activation='relu', input_shape=(99, 40), name='block1_conv1')) model.add(MaxPool1D(pool_size=2, name='block1_pool1')) model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, axis=1)) model.add(Conv1D(filters=256, kernel_size=3, strides=1, activation='relu', name='block1_conv2')) model.add(MaxPool1D(pool_size=2, name='block1_pool2')) model.add(Flatten(name='block1_flat1')) model.add(Dropout(0.5, name='block1_drop1')) model.add(Dense(512, activation='relu', name='block2_dense2')) model.add(MaxoutDense(512, nb_feature=4, name="block2_maxout2")) model.add(Dropout(0.5, name='block2_drop2')) model.add(Dense(512, activation='relu', name='block2_dense3', kernel_regularizer=l2(1e-4))) model.add(MaxoutDense(512, nb_feature=4, name="block2_maxout3")) model.add(Dense(classe_nums, activation='softmax', name="predict")) # plot_model(model, to_file='model_struct.png', show_shapes=True, show_layer_names=False) model.summary()
Example #3
Source File: architectures.py From steppy-toolkit with MIT License | 6 votes |
def cudnn_lstm_block(unit_nr, return_sequences, bidirectional, kernel_reg_l2, recurrent_reg_l2, bias_reg_l2, use_batch_norm, batch_norm_first, dropout, dropout_mode, use_prelu): def f(x): gru_layer = CuDNNLSTM(uunits=unit_nr, return_sequences=return_sequences, kernel_regularizer=regularizers.l2(kernel_reg_l2), recurrent_regularizer=regularizers.l2(recurrent_reg_l2), bias_regularizer=regularizers.l2(bias_reg_l2) ) if bidirectional: x = Bidirectional(gru_layer)(x) else: x = gru_layer(x) x = bn_relu_dropout_block(use_batch_norm=use_batch_norm, batch_norm_first=batch_norm_first, dropout=dropout, dropout_mode=dropout_mode, use_prelu=use_prelu)(x) return x return f
Example #4
Source File: localizer.py From cnn-levelset with MIT License | 6 votes |
def __init__(self, model_path=None): if model_path is not None: self.model = self.load_model(model_path) else: # VGG16 last conv features inputs = Input(shape=(7, 7, 512)) x = Convolution2D(128, 1, 1)(inputs) x = Flatten()(x) # Cls head h_cls = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x) h_cls = Dropout(p=0.5)(h_cls) cls_head = Dense(20, activation='softmax', name='cls')(h_cls) # Reg head h_reg = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x) h_reg = Dropout(p=0.5)(h_reg) reg_head = Dense(4, activation='linear', name='reg')(h_reg) # Joint model self.model = Model(input=inputs, output=[cls_head, reg_head])
Example #5
Source File: architectures.py From steppy-toolkit with MIT License | 6 votes |
def cudnn_gru_block(unit_nr, return_sequences, bidirectional, kernel_reg_l2, recurrent_reg_l2, bias_reg_l2, use_batch_norm, batch_norm_first, dropout, dropout_mode, use_prelu): def f(x): gru_layer = CuDNNGRU(units=unit_nr, return_sequences=return_sequences, kernel_regularizer=regularizers.l2(kernel_reg_l2), recurrent_regularizer=regularizers.l2(recurrent_reg_l2), bias_regularizer=regularizers.l2(bias_reg_l2) ) if bidirectional: x = Bidirectional(gru_layer)(x) else: x = gru_layer(x) x = bn_relu_dropout_block(use_batch_norm=use_batch_norm, batch_norm_first=batch_norm_first, dropout=dropout, dropout_mode=dropout_mode, use_prelu=use_prelu)(x) return x return f
Example #6
Source File: Build_Model.py From DOVE with GNU General Public License v3.0 | 6 votes |
def makecnn(learningrate,regular,decay,channel_number): #model structure model=Sequential() model.add(Conv3D(100, kernel_size=(3,3,3), strides=(1, 1, 1), input_shape = (20,20,20,channel_number),padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(Conv3D(200, kernel_size=(3,3,3), strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)) model.add(Conv3D(400, kernel_size=(3,3,3),strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(Flatten()) model.add(Dropout(0.3)) model.add(Dense(1000, use_bias=True, input_shape = (32000,),kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(100, use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(1, activation='sigmoid', use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) nadam=Nadam(lr=learningrate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=decay) model.compile(loss='binary_crossentropy', optimizer=nadam, metrics=['accuracy',f1score,precision,recall]) return model
Example #7
Source File: dual_path_network.py From Keras-DualPathNetworks with Apache License 2.0 | 6 votes |
def _initial_conv_block_inception(input, initial_conv_filters, weight_decay=5e-4): ''' Adds an initial conv block, with batch norm and relu for the DPN Args: input: input tensor initial_conv_filters: number of filters for initial conv block weight_decay: weight decay factor Returns: a keras tensor ''' channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=(2, 2))(input) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) return x
Example #8
Source File: dual_path_network.py From Keras-DualPathNetworks with Apache License 2.0 | 6 votes |
def _bn_relu_conv_block(input, filters, kernel=(3, 3), stride=(1, 1), weight_decay=5e-4): ''' Adds a Batchnorm-Relu-Conv block for DPN Args: input: input tensor filters: number of output filters kernel: convolution kernel size stride: stride of convolution Returns: a keras tensor ''' channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(filters, kernel, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=stride)(input) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) return x
Example #9
Source File: pie_predict.py From PIEPredict with Apache License 2.0 | 6 votes |
def __init__(self, num_hidden_units=256, regularizer_val=0.0001, activation='softsign', embed_size=64, embed_dropout=0): # Network parameters self._num_hidden_units = num_hidden_units self._regularizer_value = regularizer_val self._regularizer = regularizers.l2(regularizer_val) self._activation = activation self._embed_size = embed_size self._embed_dropout = embed_dropout # model parameters self._observe_length = 15 self._predict_length = 15 self._encoder_feature_size = 4 self._decoder_feature_size = 4 self._prediction_size = 4
Example #10
Source File: se_resnext.py From keras-squeeze-excite-network with MIT License | 6 votes |
def __initial_conv_block_inception(input_tensor, weight_decay=5e-4): """ Adds an initial conv block, with batch norm and relu for the inception resnext Args: input_tensor: input Keras tensor weight_decay: weight decay factor Returns: a Keras tensor """ channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=(2, 2))(input_tensor) x = BatchNormalization(axis=channel_axis)(x) x = LeakyReLU()(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) return x
Example #11
Source File: gc_densenet.py From keras-global-context-networks with MIT License | 6 votes |
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4): ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D Args: ip: keras tensor nb_filter: number of filters compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block. dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip) x = Activation('relu')(x) x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = AveragePooling2D((2, 2), strides=(2, 2))(x) # global context block x = global_context_block(x) return x
Example #12
Source File: GMF.py From neural_collaborative_filtering with Apache License 2.0 | 6 votes |
def get_model(num_users, num_items, latent_dim, regs=[0,0]): # Input variables user_input = Input(shape=(1,), dtype='int32', name = 'user_input') item_input = Input(shape=(1,), dtype='int32', name = 'item_input') MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding', init = init_normal, W_regularizer = l2(regs[0]), input_length=1) MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding', init = init_normal, W_regularizer = l2(regs[1]), input_length=1) # Crucial to flatten an embedding vector! user_latent = Flatten()(MF_Embedding_User(user_input)) item_latent = Flatten()(MF_Embedding_Item(item_input)) # Element-wise product of user and item embeddings predict_vector = merge([user_latent, item_latent], mode = 'mul') # Final prediction layer #prediction = Lambda(lambda x: K.sigmoid(K.sum(x)), output_shape=(1,))(predict_vector) prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = 'prediction')(predict_vector) model = Model(input=[user_input, item_input], output=prediction) return model
Example #13
Source File: model.py From Deep-Speckle-Correlation with BSD 3-Clause "New" or "Revised" License | 6 votes |
def conv_factory(x, concat_axis, nb_filter, dropout_rate=None, weight_decay=1E-4): x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) x = Activation('relu')(x) x = Conv2D(nb_filter, (5, 5), dilation_rate=(2, 2), kernel_initializer="he_uniform", padding="same", kernel_regularizer=l2(weight_decay))(x) if dropout_rate: x = Dropout(dropout_rate)(x) return x # define dense block
Example #14
Source File: training.py From deep_complex_networks with MIT License | 6 votes |
def learnConcatRealImagBlock(I, filter_size, featmaps, stage, block, convArgs, bnArgs, d): """Learn initial imaginary component for input.""" conv_name_base = 'res'+str(stage)+block+'_branch' bn_name_base = 'bn' +str(stage)+block+'_branch' O = BatchNormalization(name=bn_name_base+'2a', **bnArgs)(I) O = Activation(d.act)(O) O = Convolution2D(featmaps[0], filter_size, name = conv_name_base+'2a', padding = 'same', kernel_initializer = 'he_normal', use_bias = False, kernel_regularizer = l2(0.0001))(O) O = BatchNormalization(name=bn_name_base+'2b', **bnArgs)(O) O = Activation(d.act)(O) O = Convolution2D(featmaps[1], filter_size, name = conv_name_base+'2b', padding = 'same', kernel_initializer = 'he_normal', use_bias = False, kernel_regularizer = l2(0.0001))(O) return O
Example #15
Source File: core.py From text-detection-ocr with Apache License 2.0 | 6 votes |
def _transition_block(input, nb_filter, dropout_rate=None, pooltype=1, weight_decay=1e-4): x = BatchNormalization(epsilon=1.1e-5)(input) x = Activation('relu')(x) x = Conv2D(nb_filter, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) if dropout_rate: x = Dropout(dropout_rate)(x) if pooltype == 2: x = AveragePooling2D((2, 2), strides=(2, 2))(x) elif pooltype == 1: x = ZeroPadding2D(padding=(0, 1))(x) x = AveragePooling2D((2, 2), strides=(2, 1))(x) elif pooltype == 3: x = AveragePooling2D((2, 2), strides=(2, 1))(x) return x, nb_filter
Example #16
Source File: blocks.py From keras-fcn with MIT License | 6 votes |
def vgg_fc(filters, weight_decay=0., block_name='block5'): """A fully convolutional block for encoding. :param filters: Integer, number of filters per fc layer >>> from keras_fcn.blocks import vgg_fc >>> x = vgg_fc(filters=4096)(x) """ def f(x): fc6 = Conv2D(filters=4096, kernel_size=(7, 7), activation='relu', padding='same', dilation_rate=(2, 2), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), name='{}_fc6'.format(block_name))(x) drop6 = Dropout(0.5)(fc6) fc7 = Conv2D(filters=4096, kernel_size=(1, 1), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), name='{}_fc7'.format(block_name))(drop6) drop7 = Dropout(0.5)(fc7) return drop7 return f
Example #17
Source File: model.py From Vehicle-Detection-and-Tracking-Usig-YOLO-and-Deep-Sort-with-Keras-and-Tensorflow with MIT License | 5 votes |
def DarknetConv2D(*args, **kwargs): """Wrapper to set Darknet parameters for Convolution2D.""" darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)} darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same' darknet_conv_kwargs.update(kwargs) return Conv2D(*args, **darknet_conv_kwargs)
Example #18
Source File: model.py From keras-yolo3-master with MIT License | 5 votes |
def DarknetConv2D(*args, **kwargs): """Wrapper to set Darknet parameters for Convolution2D.""" darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)} darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same' darknet_conv_kwargs.update(kwargs) return Conv2D(*args, **darknet_conv_kwargs)
Example #19
Source File: model.py From deep_sort_yolov3 with GNU General Public License v3.0 | 5 votes |
def DarknetConv2D(*args, **kwargs): """Wrapper to set Darknet parameters for Convolution2D.""" darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)} darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same' darknet_conv_kwargs.update(kwargs) return Conv2D(*args, **darknet_conv_kwargs)
Example #20
Source File: squeezeDet.py From squeezedet-keras with MIT License | 5 votes |
def _fire_layer(self, name, input, s1x1, e1x1, e3x3, stdd=0.01): """ wrapper for fire layer constructions :param name: name for layer :param input: previous layer :param s1x1: number of filters for squeezing :param e1x1: number of filter for expand 1x1 :param e3x3: number of filter for expand 3x3 :param stdd: standard deviation used for intialization :return: a keras fire layer """ sq1x1 = Conv2D( name = name + '/squeeze1x1', filters=s1x1, kernel_size=(1, 1), strides=(1, 1), use_bias=True, padding='SAME', kernel_initializer=TruncatedNormal(stddev=stdd), activation="relu", kernel_regularizer=l2(self.config.WEIGHT_DECAY))(input) ex1x1 = Conv2D( name = name + '/expand1x1', filters=e1x1, kernel_size=(1, 1), strides=(1, 1), use_bias=True, padding='SAME', kernel_initializer=TruncatedNormal(stddev=stdd), activation="relu", kernel_regularizer=l2(self.config.WEIGHT_DECAY))(sq1x1) ex3x3 = Conv2D( name = name + '/expand3x3', filters=e3x3, kernel_size=(3, 3), strides=(1, 1), use_bias=True, padding='SAME', kernel_initializer=TruncatedNormal(stddev=stdd), activation="relu", kernel_regularizer=l2(self.config.WEIGHT_DECAY))(sq1x1) return concatenate([ex1x1, ex3x3], axis=3) #wrapper for padding, written in tensorflow. If you want to change to theano you need to rewrite this!
Example #21
Source File: utils.py From sesemi with MIT License | 5 votes |
def compile_sesemi(network, input_shape, nb_classes, lrate, in_network_dropout, super_dropout): weight_decay = 0.0005 initer = initializers.glorot_uniform() fc_params = dict( use_bias=True, activation='softmax', kernel_initializer=initer, kernel_regularizer=l2(weight_decay), ) cnn_trunk = network.create_network(input_shape, in_network_dropout) super_in = Input(shape=input_shape, name='super_data') self_in = Input(shape=input_shape, name='self_data') super_out = cnn_trunk(super_in) self_out = cnn_trunk(self_in) super_out = GlobalAveragePooling2D(name='super_gap')(super_out) self_out = GlobalAveragePooling2D(name='self_gap')(self_out) if super_dropout > 0.0: super_out = Dropout(super_dropout, name='super_dropout')(super_out) super_out = Dense(nb_classes, name='super_clf', **fc_params)(super_out) self_out = Dense(proxy_labels, name='self_clf', **fc_params)(self_out) sesemi_model = Model(inputs=[self_in, super_in], outputs=[self_out, super_out]) inference_model = Model(inputs=[super_in], outputs=[super_out]) sgd = optimizers.SGD(lr=lrate, momentum=0.9, nesterov=True) sesemi_model.compile(optimizer=sgd, loss={'super_clf': 'categorical_crossentropy', 'self_clf' : 'categorical_crossentropy'}, loss_weights={'super_clf': 1.0, 'self_clf': 1.0}, metrics=None) return sesemi_model, inference_model
Example #22
Source File: model.py From YOLO-3D-Box with MIT License | 5 votes |
def DarknetConv2D(*args, **kwargs): """Wrapper to set Darknet parameters for Convolution2D.""" darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)} darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same' darknet_conv_kwargs.update(kwargs) return Conv2D(*args, **darknet_conv_kwargs)
Example #23
Source File: auto_encoder.py From pyod with BSD 2-Clause "Simplified" License | 5 votes |
def _build_model(self): model = Sequential() # Input layer model.add(Dense( self.hidden_neurons_[0], activation=self.hidden_activation, input_shape=(self.n_features_,), activity_regularizer=l2(self.l2_regularizer))) model.add(Dropout(self.dropout_rate)) # Additional layers for i, hidden_neurons in enumerate(self.hidden_neurons_, 1): model.add(Dense( hidden_neurons, activation=self.hidden_activation, activity_regularizer=l2(self.l2_regularizer))) model.add(Dropout(self.dropout_rate)) # Output layers model.add(Dense(self.n_features_, activation=self.output_activation, activity_regularizer=l2(self.l2_regularizer))) # Compile model model.compile(loss=self.loss, optimizer=self.optimizer) if self.verbose >= 1: print(model.summary()) return model # noinspection PyUnresolvedReferences
Example #24
Source File: lenet.py From keras-deepcv with MIT License | 5 votes |
def lenet_model(img_shape=(28, 28, 1), n_classes=10, l2_reg=0., weights=None): # Initialize model lenet = Sequential() # 2 sets of CRP (Convolution, RELU, Pooling) lenet.add(Conv2D(20, (5, 5), padding="same", input_shape=img_shape, kernel_regularizer=l2(l2_reg))) lenet.add(Activation("relu")) lenet.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) lenet.add(Conv2D(50, (5, 5), padding="same", kernel_regularizer=l2(l2_reg))) lenet.add(Activation("relu")) lenet.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) # Fully connected layers (w/ RELU) lenet.add(Flatten()) lenet.add(Dense(500, kernel_regularizer=l2(l2_reg))) lenet.add(Activation("relu")) # Softmax (for classification) lenet.add(Dense(n_classes, kernel_regularizer=l2(l2_reg))) lenet.add(Activation("softmax")) if weights is not None: lenet.load_weights(weights) # Return the constructed network return lenet
Example #25
Source File: architectures.py From steppy-toolkit with MIT License | 5 votes |
def dense_block(dense_size, use_batch_norm, use_prelu, dropout, kernel_reg_l2, bias_reg_l2, batch_norm_first): def f(x): x = Dense(dense_size, activation='linear', kernel_regularizer=regularizers.l2(kernel_reg_l2), bias_regularizer=regularizers.l2(bias_reg_l2))(x) x = bn_relu_dropout_block(use_batch_norm=use_batch_norm, use_prelu=use_prelu, dropout=dropout, dropout_mode='simple', batch_norm_first=batch_norm_first)(x) return x return f
Example #26
Source File: MLP.py From neural_collaborative_filtering with Apache License 2.0 | 5 votes |
def get_model(num_users, num_items, layers = [20,10], reg_layers=[0,0]): assert len(layers) == len(reg_layers) num_layer = len(layers) #Number of layers in the MLP # Input variables user_input = Input(shape=(1,), dtype='int32', name = 'user_input') item_input = Input(shape=(1,), dtype='int32', name = 'item_input') MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = layers[0]/2, name = 'user_embedding', init = init_normal, W_regularizer = l2(reg_layers[0]), input_length=1) MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = layers[0]/2, name = 'item_embedding', init = init_normal, W_regularizer = l2(reg_layers[0]), input_length=1) # Crucial to flatten an embedding vector! user_latent = Flatten()(MLP_Embedding_User(user_input)) item_latent = Flatten()(MLP_Embedding_Item(item_input)) # The 0-th layer is the concatenation of embedding layers vector = merge([user_latent, item_latent], mode = 'concat') # MLP layers for idx in xrange(1, num_layer): layer = Dense(layers[idx], W_regularizer= l2(reg_layers[idx]), activation='relu', name = 'layer%d' %idx) vector = layer(vector) # Final prediction layer prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = 'prediction')(vector) model = Model(input=[user_input, item_input], output=prediction) return model
Example #27
Source File: gc_densenet.py From keras-global-context-networks with MIT License | 5 votes |
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4): ''' Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout Args: ip: Input keras tensor nb_filter: number of filters bottleneck: add bottleneck block dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck) ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip) x = Activation('relu')(x) if bottleneck: inter_channel = nb_filter * 4 # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x) x = Activation('relu')(x) x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same', use_bias=False)(x) if dropout_rate: x = Dropout(dropout_rate)(x) return x
Example #28
Source File: se_resnext.py From keras-squeeze-excite-network with MIT License | 5 votes |
def __grouped_convolution_block(input_tensor, grouped_channels, cardinality, strides, weight_decay=5e-4): """ Adds a grouped convolution block. It is an equivalent block from the paper Args: input_tensor: input Keras tensor grouped_channels: grouped number of filters cardinality: cardinality factor describing the number of groups strides: performs strided convolution for downscaling if > 1 weight_decay: weight decay term Returns: a Keras tensor """ init = input_tensor channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 group_list = [] if cardinality == 1: # with cardinality 1, it is a standard convolution x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init) x = BatchNormalization(axis=channel_axis)(x) x = LeakyReLU()(x) return x for c in range(cardinality): x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels] if K.image_data_format() == 'channels_last' else lambda _z: _z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input_tensor) x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x) group_list.append(x) group_merge = concatenate(group_list, axis=channel_axis) x = BatchNormalization(axis=channel_axis)(group_merge) x = LeakyReLU()(x) return x
Example #29
Source File: se_resnext.py From keras-squeeze-excite-network with MIT License | 5 votes |
def __initial_conv_block(input_tensor, weight_decay=5e-4): """ Adds an initial convolution block, with batch normalization and relu activation Args: input_tensor: input Keras tensor weight_decay: weight decay factor Returns: a Keras tensor """ channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(64, (3, 3), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input_tensor) x = BatchNormalization(axis=channel_axis)(x) x = LeakyReLU()(x) return x
Example #30
Source File: autoencoder.py From AIAlpha with MIT License | 5 votes |
def build_model(self, encoded1_shape, encoded2_shape, decoded1_shape, decoded2_shape): input_data = Input(shape=(1, self.input_shape)) # encoded1 = Dense(encoded1_shape, activation="relu", activity_regularizer=regularizers.l2(0))(input_data) # encoded2 = Dense(encoded2_shape, activation="relu", activity_regularizer=regularizers.l2(0))(encoded1) # encoded3 = Dense(self.encoding_dim, activation="relu", activity_regularizer=regularizers.l2(0))(encoded2) # decoded1 = Dense(decoded1_shape, activation="relu", activity_regularizer=regularizers.l2(0))(encoded3) # decoded2 = Dense(decoded2_shape, activation="relu", activity_regularizer=regularizers.l2(0))(decoded1) # decoded = Dense(self.input_shape, activation="sigmoid", activity_regularizer=regularizers.l2(0))(decoded2) encoded3 = Dense(self.encoding_dim, activation="relu", activity_regularizer=regularizers.l2(0))(input_data) decoded = Dense(self.input_shape, activation="sigmoid", activity_regularizer=regularizers.l2(0))(encoded3) self.autoencoder = Model(inputs=input_data, outputs=decoded) self.encoder = Model(input_data, encoded3)