Python keras.regularizers.l1l2() Examples

The following are code examples for showing how to use keras.regularizers.l1l2(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: MODS_ConvNet   Author: santiagolopezg   File: convnet_keras_3.py    MIT License 5 votes vote down vote up
def network(regl1, regl2, weight_init, dropout, optimize):   
    
    #create network architecture
    model = Sequential()
    
    model.add(Convolution2D(16, 7, 7,input_shape=(1, 256, 192),W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(32, 6, 6, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 2, 2, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Flatten())
    model.add(Dense(50,W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    
    model.add(Dense(output_dim=1))
    model.add(Activation('sigmoid'))    

    model.compile(optimizer=optimize, loss='binary_crossentropy', metrics=['accuracy'])
    
    return model 
Example 2
Project: dense_tensor   Author: bstriner   File: utils.py    MIT License 5 votes vote down vote up
def l1l2(l1_weight=0, l2_weight=0):
    if keras_2:
        from keras.regularizers import L1L2
        return L1L2(l1_weight, l2_weight)
    else:
        from keras.regularizers import l1l2
        return l1l2(l1_weight, l2_weight) 
Example 3
Project: CAPTCHA-breaking   Author: lllcho   File: test_regularizers.py    MIT License 5 votes vote down vote up
def test_W_reg(self):
        for reg in [regularizers.identity(), regularizers.l1(), regularizers.l2(), regularizers.l1l2()]:
            model = create_model(weight_reg=reg)
            model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
            model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
            model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0) 
Example 4
Project: workspace_2017   Author: nwiizo   File: test_regularizers.py    MIT License 5 votes vote down vote up
def test_W_reg():
    (X_train, Y_train), (X_test, Y_test), test_ids = get_data()
    for reg in [regularizers.l1(),
                regularizers.l2(),
                regularizers.l1l2()]:
        model = create_model(weight_reg=reg)
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        assert len(model.losses) == 1
        model.fit(X_train, Y_train, batch_size=batch_size,
                  nb_epoch=nb_epoch, verbose=0)
        model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0) 
Example 5
Project: dream2016_dm   Author: lishen   File: dm_resnet.py    GNU General Public License v3.0 5 votes vote down vote up
def l1l2_penalty_reg(alpha=1.0, l1_ratio=0.5):
        '''Calculate L1 and L2 penalties for a Keras layer
        This follows the same formulation as in the R package glmnet and Sklearn
        Args:
            alpha ([float]): amount of regularization.
            l1_ratio ([float]): portion of L1 penalty. Setting to 1.0 equals 
                    Lasso.
        '''
        if l1_ratio == .0:
            return l2(alpha)
        elif l1_ratio == 1.:
            return l1(alpha)
        else:
            return l1l2(l1_ratio*alpha, 1./2*(1 - l1_ratio)*alpha) 
Example 6
Project: fplbot   Author: dizzy54   File: regression_neural_fpl_2.py    MIT License 5 votes vote down vote up
def baseline_model(num_of_features=0):
    # # create model
    K.set_learning_phase(1)
    model = Sequential()
    model.add(Dense(num_of_features, input_dim=num_of_features, W_regularizer=l1l2(0.005), init='normal', activation='relu'))
    # model.add(BatchNormalization())
    model.add(Dropout(0.2))
    model.add(Dense(int(num_of_features * 1.5), init='normal', activation='relu', W_constraint=maxnorm(3)))
    # model.add(BatchNormalization())
    model.add(Dense(num_of_features / 4, init='normal', activation='relu', W_constraint=maxnorm(3)))
    # model.add(BatchNormalization())
    model.add(Dense(1, init='normal'))
    # # compile model
    model.compile(loss='mean_squared_error', optimizer='adam')
    return model 
Example 7
Project: allen-ai-science-qa   Author: arranger1044   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def get_regularizer(lambda_l1=None, lambda_l2=None):
    regularizer = None
    if lambda_l1 is None and lambda_l2 is not None:
        regularizer = l2(l=lambda_l2)
    elif lambda_l1 is not None and lambda_l2 is None:
        regularizer = l1(l=lambda_l1)
    elif lambda_l1 is not None and lambda_l2 is not None:
        regularizer = l1l2(l1=lambda_l1, l2=lambda_l2)
    return regularizer 
Example 8
Project: drider   Author: w4nderlust   File: utils.py    MIT License 5 votes vote down vote up
def get_regularizer(lambda_l1=None, lambda_l2=None):
    regularizer = None
    if lambda_l1 is None and lambda_l2 is not None:
        regularizer = l2(l=lambda_l2)
    elif lambda_l1 is not None and lambda_l2 is None:
        regularizer = l1(l=lambda_l1)
    elif lambda_l1 is not None and lambda_l2 is not None:
        regularizer = l1l2(l1=lambda_l1, l2=lambda_l2)
    return regularizer 
Example 9
Project: MODS_ConvNet   Author: santiagolopezg   File: convnet_keras_1.py    MIT License 4 votes vote down vote up
def network(regl1, regl2, weight_init, dropout, optimize):   
    
    #create network architecture
    model = Sequential()
    
    model.add(Convolution2D(16, 5, 5,input_shape=(1, 256, 192),W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(32, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Flatten())
    model.add(Dense(50,W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    
    model.add(Dense(output_dim=1))
    model.add(Activation('sigmoid'))    

    model.compile(optimizer=optimize, loss='binary_crossentropy', metrics=['accuracy'])
    
    return model 
Example 10
Project: MODS_ConvNet   Author: santiagolopezg   File: convnet_keras_2.py    MIT License 4 votes vote down vote up
def network(regl1, regl2, weight_init, dropout, optimize):   
    
    #create network architecture
    model = Sequential()
    
    model.add(Convolution2D(16, 5, 5,input_shape=(1, 256, 192),W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(32, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Convolution2D(64, 3, 3, W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))  
    model.add(Dropout(dropout))
    
    model.add(Flatten())
    model.add(Dense(50,W_regularizer=l1l2(l1=regl1, l2=regl2),init=weight_init))
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    
    model.add(Dense(output_dim=1))
    model.add(Activation('sigmoid'))    

    model.compile(optimizer=optimize, loss='binary_crossentropy', metrics=['accuracy'])
    
    return model