Python keras.constraints.maxnorm() Examples

The following are 13 code examples of keras.constraints.maxnorm(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.constraints , or try the search function .
Example #1
Source Project: CAPTCHA-breaking   Author: lllcho   File: test_constraints.py    License: MIT License 5 votes vote down vote up
def test_maxnorm(self):
        from keras.constraints import maxnorm

        for m in self.some_values:
            norm_instance = maxnorm(m)
            normed = norm_instance(self.example_array)
            assert (np.all(normed.eval() < m))

        # a more explicit example
        norm_instance = maxnorm(2.0)
        x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
        x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0], [2.0, 0, 0], [2./np.sqrt(3), 2./np.sqrt(3), 2./np.sqrt(3)]]).T
        x_normed_actual = norm_instance(x).eval()
        assert_allclose(x_normed_actual, x_normed_target) 
Example #2
Source Project: deep   Author: 54chen   File: test7.py    License: Apache License 2.0 5 votes vote down vote up
def create_model(neurons=1):
	# create model
	model = Sequential()
	model.add(Dense(neurons, input_dim=8, init='uniform', activation='softplus', W_constraint=maxnorm(4)))
	model.add(Dropout(0.1))
	model.add(Dense(1, init='uniform', activation='sigmoid'))
	# Compile model
	model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
	return model
# fix random seed for reproducibility 
Example #3
Source Project: deep   Author: 54chen   File: test6.py    License: Apache License 2.0 5 votes vote down vote up
def create_model(dropout_rate=0.0, weight_constraint=0):
	# create model
	model = Sequential()
	model.add(Dense(12, input_dim=8, init='uniform', activation='softplus', W_constraint=maxnorm(weight_constraint)))
	model.add(Dropout(dropout_rate))
	model.add(Dense(1, init='uniform', activation='sigmoid'))
	# Compile model
	model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
	return model
# fix random seed for reproducibility 
Example #4
Source Project: RMDL   Author: kk7nc   File: BuildModel.py    License: GNU General Public License v3.0 5 votes vote down vote up
def Build_Model_CNN_Image(shape, nclasses, sparse_categorical,
                          min_hidden_layer_cnn, max_hidden_layer_cnn, min_nodes_cnn,
                          max_nodes_cnn, random_optimizor, dropout):
    """""
    def Image_model_CNN(num_classes,shape):
    num_classes is number of classes,
    shape is (w,h,p)
    """""

    model = Sequential()
    values = list(range(min_nodes_cnn,max_nodes_cnn))
    Layers = list(range(min_hidden_layer_cnn, max_hidden_layer_cnn))
    Layer = random.choice(Layers)
    Filter = random.choice(values)
    model.add(Conv2D(Filter, (3, 3), padding='same', input_shape=shape))
    model.add(Activation('relu'))
    model.add(Conv2D(Filter, (3, 3)))
    model.add(Activation('relu'))

    for i in range(0,Layer):
        Filter = random.choice(values)
        model.add(Conv2D(Filter, (3, 3),padding='same'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(dropout))

    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(dropout))
    model.add(Dense(nclasses,activation='softmax',kernel_constraint=maxnorm(3)))
    model_tmp = model
    if sparse_categorical:
        model.compile(loss='sparse_categorical_crossentropy',
                      optimizer=optimizors(random_optimizor),
                      metrics=['accuracy'])
    else:
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizors(random_optimizor),
                      metrics=['accuracy'])

    return model,model_tmp 
Example #5
Source Project: DeepIV   Author: jhartford   File: architectures.py    License: MIT License 5 votes vote down vote up
def feed_forward_net(input, output, hidden_layers=[64, 64], activations='relu',
                     dropout_rate=0., l2=0., constrain_norm=False):
    '''
    Helper function for building a Keras feed forward network.

    input:  Keras Input object appropriate for the data. e.g. input=Input(shape=(20,))
    output: Function representing final layer for the network that maps from the last
            hidden layer to output.
            e.g. if output = Dense(10, activation='softmax') if we're doing 10 class
            classification or output = Dense(1, activation='linear') if we're doing
            regression.
    '''
    state = input
    if isinstance(activations, str):
        activations = [activations] * len(hidden_layers)
    
    for h, a in zip(hidden_layers, activations):
        if l2 > 0.:
            w_reg = keras.regularizers.l2(l2)
        else:
            w_reg = None
        const = maxnorm(2) if constrain_norm else  None
        state = Dense(h, activation=a, kernel_regularizer=w_reg, kernel_constraint=const)(state)
        if dropout_rate > 0.:
            state = Dropout(dropout_rate)(state)
    return output(state) 
Example #6
Source Project: DeepIV   Author: jhartford   File: architectures.py    License: MIT License 5 votes vote down vote up
def convnet(input, output, dropout_rate=0., input_shape=(1, 28, 28), batch_size=100,
            l2_rate=0.001, nb_epoch=12, img_rows=28, img_cols=28, nb_filters=64,
            pool_size=(2, 2), kernel_size=(3, 3), activations='relu', constrain_norm=False):
    '''
    Helper function for building a Keras convolutional network.

    input:  Keras Input object appropriate for the data. e.g. input=Input(shape=(20,))
    output: Function representing final layer for the network that maps from the last
            hidden layer to output.
            e.g. if output = Dense(10, activation='softmax') if we're doing 10 class
            classification or output = Dense(1, activation='linear') if we're doing
            regression.
    '''
    const = maxnorm(2) if constrain_norm else None

    state = Convolution2D(nb_filters, kernel_size, padding='valid',
                          input_shape=input_shape, activation=activations,
                          kernel_regularizer=l2(l2_rate), kernel_constraint=const)(input)

    state = Convolution2D(nb_filters, kernel_size,
                          activation=activations, kernel_regularizer=l2(l2_rate),
                          kernel_constraint=const)(state)

    state = MaxPooling2D(pool_size=pool_size)(state)

    state = Flatten()(state)

    if dropout_rate > 0.:
        state = Dropout(dropout_rate)(state)
    state = Dense(128, activation=activations, kernel_regularizer=l2(l2_rate), kernel_constraint=const)(state)

    if dropout_rate > 0.:
        state = Dropout(dropout_rate)(state)
    return output(state) 
Example #7
Source Project: crnn   Author: ultimate010   File: sst1_cnn_rnn.py    License: MIT License 4 votes vote down vote up
def build_model():
    main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
    embedding  = Embedding(max_features, embedding_dims,
                  weights=[np.matrix(W)], input_length=maxlen,
                  name='embedding')(main_input)

    embedding = Dropout(0.50)(embedding)

    conv4 = Convolution1D(nb_filter=nb_filter,
                          filter_length=4,
                          border_mode='valid',
                          activation='relu',
                          subsample_length=1,
                          name='conv4')(embedding)
    maxConv4 = MaxPooling1D(pool_length=2,
                             name='maxConv4')(conv4)

    conv5 = Convolution1D(nb_filter=nb_filter,
                          filter_length=5,
                          border_mode='valid',
                          activation='relu',
                          subsample_length=1,
                          name='conv5')(embedding)
    maxConv5 = MaxPooling1D(pool_length=2,
                            name='maxConv5')(conv5)

    x = merge([maxConv4, maxConv5], mode='concat')

    x = Dropout(0.15)(x)

    x = RNN(rnn_output_size)(x)

    x = Dense(hidden_dims, activation='relu', init='he_normal',
              W_constraint = maxnorm(3), b_constraint=maxnorm(3),
              name='mlp')(x)

    x = Dropout(0.10, name='drop')(x)

    output = Dense(nb_classes, init='he_normal',
                   activation='softmax', name='output')(x)

    model = Model(input=main_input, output=output)
    model.compile(loss={'output':'categorical_crossentropy'},
                optimizer=Adadelta(lr=0.95, epsilon=1e-06),
                metrics=["accuracy"])
    return model 
Example #8
Source Project: crnn   Author: ultimate010   File: sst2_cnn_rnn_kera1.py    License: MIT License 4 votes vote down vote up
def build_model():
    main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
    embedding  = Embedding(max_features, embedding_dims,
                  weights=[np.matrix(W)], input_length=maxlen,
                  name='embedding')(main_input)

    embedding = Dropout(0.50)(embedding)

    conv4 = Convolution1D(nb_filter=nb_filter,
                          filter_length=4,
                          border_mode='valid',
                          activation='relu',
                          subsample_length=1,
                          name='conv4')(embedding)
    maxConv4 = MaxPooling1D(pool_length=2,
                             name='maxConv4')(conv4)

    conv5 = Convolution1D(nb_filter=nb_filter,
                          filter_length=5,
                          border_mode='valid',
                          activation='relu',
                          subsample_length=1,
                          name='conv5')(embedding)
    maxConv5 = MaxPooling1D(pool_length=2,
                            name='maxConv5')(conv5)

    x = merge([maxConv4, maxConv5], mode='concat')

    x = Dropout(0.15)(x)

    x = RNN(rnn_output_size)(x)

    x = Dense(hidden_dims, activation='relu', init='he_normal',
              W_constraint = maxnorm(3), b_constraint=maxnorm(3),
              name='mlp')(x)

    x = Dropout(0.10, name='drop')(x)

    output = Dense(1, init='he_normal',
                   activation='sigmoid', name='output')(x)

    model = Model(input=main_input, output=output)
    model.compile(loss={'output':'binary_crossentropy'},
                optimizer=Adadelta(lr=0.95, epsilon=1e-06),
                metrics=["accuracy"])
    return model 
Example #9
Source Project: crnn   Author: ultimate010   File: mr_cnn_rnn.py    License: MIT License 4 votes vote down vote up
def build_model():
        print('Build model...%d of %d' % (i + 1, folds))
        main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
        embedding  = Embedding(max_features, embedding_dims,
                      weights=[np.matrix(W)], input_length=maxlen,
                      name='embedding')(main_input)

        embedding = Dropout(0.50)(embedding)


        conv4 = Convolution1D(nb_filter=nb_filter,
                              filter_length=4,
                              border_mode='valid',
                              activation='relu',
                              subsample_length=1,
                              name='conv4')(embedding)
        maxConv4 = MaxPooling1D(pool_length=2,
                                 name='maxConv4')(conv4)

        conv5 = Convolution1D(nb_filter=nb_filter,
                              filter_length=5,
                              border_mode='valid',
                              activation='relu',
                              subsample_length=1,
                              name='conv5')(embedding)
        maxConv5 = MaxPooling1D(pool_length=2,
                                name='maxConv5')(conv5)

        x = merge([maxConv4, maxConv5], mode='concat')

        x = Dropout(0.15)(x)

        x = RNN(rnn_output_size)(x)

        x = Dense(hidden_dims, activation='relu', init='he_normal',
                  W_constraint = maxnorm(3), b_constraint=maxnorm(3),
                  name='mlp')(x)

        x = Dropout(0.10, name='drop')(x)

        output = Dense(1, init='he_normal',
                       activation='sigmoid', name='output')(x)

        model = Model(input=main_input, output=output)
        model.compile(loss={'output':'binary_crossentropy'},
                    optimizer=Adadelta(lr=0.95, epsilon=1e-06),
                    metrics=["accuracy"])
        return model 
Example #10
Source Project: crnn   Author: ultimate010   File: sst2_cnn_rnn.py    License: MIT License 4 votes vote down vote up
def build_model():
    main_input = Input(shape=(maxlen, ), dtype='int32', name='main_input')
    embedding  = Embedding(max_features, embedding_dims,
                  weights=[np.matrix(W)], input_length=maxlen,
                  name='embedding')(main_input)

    embedding = Dropout(0.50)(embedding)

    conv4 = Conv1D(filters=nb_filter,
                          kernel_size=4,
                          padding='valid',
                          activation='relu',
                          strides=1,
                          name='conv4')(embedding)
    maxConv4 = MaxPooling1D(pool_size=2,
                             name='maxConv4')(conv4)

    conv5 = Conv1D(filters=nb_filter,
                          kernel_size=5,
                          padding='valid',
                          activation='relu',
                          strides=1,
                          name='conv5')(embedding)
    maxConv5 = MaxPooling1D(pool_size=2,
                            name='maxConv5')(conv5)

    # x = merge([maxConv4, maxConv5], mode='concat')
    x = keras.layers.concatenate([maxConv4, maxConv5])

    x = Dropout(0.15)(x)

    x = RNN(rnn_output_size)(x)

    x = Dense(hidden_dims, activation='relu', kernel_initializer='he_normal',
              kernel_constraint = maxnorm(3), bias_constraint=maxnorm(3),
              name='mlp')(x)

    x = Dropout(0.10, name='drop')(x)

    output = Dense(1, kernel_initializer='he_normal',
                   activation='sigmoid', name='output')(x)

    model = Model(inputs=main_input, outputs=output)
    model.compile(loss='binary_crossentropy',
               	# optimizer=Adadelta(lr=0.95, epsilon=1e-06),
               	# optimizer=Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0),
                # optimizer=Adagrad(lr=0.01, epsilon=1e-08, decay=1e-4),
                metrics=["accuracy"])
    return model 
Example #11
Source Project: costar_plan   Author: jhu-lcsr   File: planner.py    License: Apache License 2.0 4 votes vote down vote up
def AddDense(x, size, activation, dropout_rate, output=False, momentum=MOMENTUM,
    constraint=3,
    bn=True,
    kr=0.,
    ar=0.,
    perm_drop=False):
    '''
    Add a single dense block with batchnorm and activation.

    Parameters:
    -----------
    x: input tensor
    size: number of dense neurons
    activation: activation fn to use
    dropout_rate: dropout to use after activation

    Returns:
    --------
    x: output tensor
    '''

    if isinstance(kr, float) and kr > 0:
        kr = keras.regularizers.l2(kr)
    elif isinstance(kr, float):
        kr = None
    else:
        kr = kr

    if isinstance(ar, float) and ar > 0:
        ar = keras.regularizers.l1(ar)
    elif isinstance(ar, float):
        ar = None
    else:
        ar = ar

    if constraint is not None:
        x = Dense(size, kernel_constraint=maxnorm(constraint),
                  kernel_regularizer=kr,
                  activity_regularizer=ar,)(x)
    else:
        x = Dense(size,
                  kernel_regularizer=kr,
                  activity_regularizer=ar,)(x)

    if not output and bn:
        #x = BatchNormalization(momentum=momentum)(x)
        x = InstanceNormalization()(x)

    if activation == "lrelu":
        x = LeakyReLU(alpha=0.2)(x)
    else:
        x = Activation(activation)(x)
    if dropout_rate > 0:
        if perm_drop:
            x = PermanentDropout(dropout_rate)(x)
        else:
            x = Dropout(dropout_rate)(x)
    return x 
Example #12
Source Project: textfool   Author: bogdan-kulynych   File: model.py    License: MIT License 4 votes vote down vote up
def build_model(max_length=1000,
                nb_filters=64,
                kernel_size=3,
                pool_size=2,
                regularization=0.01,
                weight_constraint=2.,
                dropout_prob=0.4,
                clear_session=True):
    if clear_session:
        K.clear_session()

    model = Sequential()
    model.add(Embedding(
        embeddings.shape[0],
        embeddings.shape[1],
        input_length=max_length,
        trainable=False,
        weights=[embeddings]))

    model.add(Conv1D(nb_filters, kernel_size, activation='relu'))
    model.add(Conv1D(nb_filters, kernel_size, activation='relu'))
    model.add(MaxPooling1D(pool_size))

    model.add(Dropout(dropout_prob))

    model.add(Conv1D(nb_filters * 2, kernel_size, activation='relu'))
    model.add(Conv1D(nb_filters * 2, kernel_size, activation='relu'))
    model.add(MaxPooling1D(pool_size))

    model.add(Dropout(dropout_prob))

    model.add(GlobalAveragePooling1D())
    model.add(Dense(1,
        kernel_regularizer=l2(regularization),
        kernel_constraint=maxnorm(weight_constraint),
        activation='sigmoid'))

    model.compile(
        loss='binary_crossentropy',
        optimizer='rmsprop',
        metrics=['accuracy'])

    return model 
Example #13
Source Project: datastories-semeval2017-task4   Author: cbaziotis   File: nn_models.py    License: MIT License 4 votes vote down vote up
def build_attention_RNN(embeddings, classes, max_length, unit=LSTM, cells=64,
                        layers=1, **kwargs):
    # parameters
    bi = kwargs.get("bidirectional", False)
    noise = kwargs.get("noise", 0.)
    dropout_words = kwargs.get("dropout_words", 0)
    dropout_rnn = kwargs.get("dropout_rnn", 0)
    dropout_rnn_U = kwargs.get("dropout_rnn_U", 0)
    dropout_attention = kwargs.get("dropout_attention", 0)
    dropout_final = kwargs.get("dropout_final", 0)
    attention = kwargs.get("attention", None)
    final_layer = kwargs.get("final_layer", False)
    clipnorm = kwargs.get("clipnorm", 1)
    loss_l2 = kwargs.get("loss_l2", 0.)
    lr = kwargs.get("lr", 0.001)

    model = Sequential()
    model.add(embeddings_layer(max_length=max_length, embeddings=embeddings,
                               trainable=False, masking=True, scale=False,
                               normalize=False))

    if noise > 0:
        model.add(GaussianNoise(noise))
    if dropout_words > 0:
        model.add(Dropout(dropout_words))

    for i in range(layers):
        rs = (layers > 1 and i < layers - 1) or attention
        model.add(get_RNN(unit, cells, bi, return_sequences=rs,
                          dropout_U=dropout_rnn_U))
        if dropout_rnn > 0:
            model.add(Dropout(dropout_rnn))

    if attention == "memory":
        model.add(AttentionWithContext())
        if dropout_attention > 0:
            model.add(Dropout(dropout_attention))
    elif attention == "simple":
        model.add(Attention())
        if dropout_attention > 0:
            model.add(Dropout(dropout_attention))

    if final_layer:
        model.add(MaxoutDense(100, W_constraint=maxnorm(2)))
        # model.add(Highway())
        if dropout_final > 0:
            model.add(Dropout(dropout_final))

    model.add(Dense(classes, activity_regularizer=l2(loss_l2)))
    model.add(Activation('softmax'))

    model.compile(optimizer=Adam(clipnorm=clipnorm, lr=lr),
                  loss='categorical_crossentropy')
    return model