Python keras.regularizers.L1L2() Examples

The following are code examples for showing how to use keras.regularizers.L1L2(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: nlp_toolkit   Author: stevewyl   File: embedding.py    MIT License 6 votes vote down vote up
def Token_Embedding(x, input_dim, output_dim, embed_weights=None,
                    mask_zero=False, input_length=None, dropout_rate=0,
                    embed_l2=1E-6, name='', time_distributed=False, **kwargs):
    """
    Basic token embedding layer, also included some dropout layer.
    """
    embed_reg = L1L2(l2=embed_l2) if embed_l2 != 0 else None
    embed_layer = Embedding(input_dim=input_dim,
                            output_dim=output_dim,
                            weights=embed_weights,
                            mask_zero=mask_zero,
                            input_length=input_length,
                            embeddings_regularizer=embed_reg,
                            name=name)
    if time_distributed:
        embed = TimeDistributed(embed_layer)(x)
    else:
        embed = embed_layer(x)
    # entire embedding channels are dropped out instead of the
    # normal Keras embedding dropout, which drops all channels for entire words
    # many of the datasets contain so few words that losing one or more words can alter the emotions completely
    if dropout_rate != 0:
        embed = SpatialDropout1D(dropout_rate)(embed)
    return embed 
Example 2
Project: deepcpg   Author: cangermueller   File: dna.py    MIT License 6 votes vote down vote up
def __call__(self, inputs):
        x = inputs[0]

        kernel_regularizer = kr.L1L2(self.l1_decay, self.l2_decay)
        x = kl.Conv1D(128, 11,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.MaxPooling1D(4)(x)

        x = kl.Flatten()(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Dense(self.nb_hidden,
                     kernel_initializer=self.init,
                     kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x) 
Example 3
Project: deepcpg   Author: cangermueller   File: dna.py    MIT License 6 votes vote down vote up
def __call__(self, inputs):
        x = inputs[0]

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(128, 11,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.MaxPooling1D(4)(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(256, 7,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.MaxPooling1D(4)(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        gru = kl.recurrent.GRU(256, kernel_regularizer=kernel_regularizer)
        x = kl.Bidirectional(gru)(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x) 
Example 4
Project: deepcpg   Author: cangermueller   File: cpg.py    MIT License 6 votes vote down vote up
def __call__(self, inputs):
        x = self._merge_inputs(inputs)

        shape = getattr(x, '_keras_shape')
        replicate_model = self._replicate_model(kl.Input(shape=shape[2:]))
        x = kl.TimeDistributed(replicate_model)(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Bidirectional(kl.GRU(128, kernel_regularizer=kernel_regularizer,
                                    return_sequences=True),
                             merge_mode='concat')(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        gru = kl.GRU(256, kernel_regularizer=kernel_regularizer)
        x = kl.Bidirectional(gru)(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x) 
Example 5
Project: dense_tensor   Author: bstriner   File: utils.py    MIT License 5 votes vote down vote up
def l1l2(l1_weight=0, l2_weight=0):
    if keras_2:
        from keras.regularizers import L1L2
        return L1L2(l1_weight, l2_weight)
    else:
        from keras.regularizers import l1l2
        return l1l2(l1_weight, l2_weight) 
Example 6
Project: sisy   Author: qorrect   File: build.py    Apache License 2.0 5 votes vote down vote up
def _get_regularizer(regularizer_parameter):
    if regularizer_parameter is None\
            or len(regularizer_parameter) == 0\
            or all(value is None for _, value in regularizer_parameter.items()):
        return None
    l1 = regularizer_parameter.get('l1', 0.)
    l2 = regularizer_parameter.get('l2', 0.)
    return L1L2(l1, l2) 
Example 7
Project: applications   Author: geomstats   File: core_test.py    MIT License 5 votes vote down vote up
def test_dense():
    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(3, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(3, 4, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(None, None, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(3, 4, 5, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3,
                       'kernel_regularizer': regularizers.l2(0.01),
                       'bias_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.L1L2(l1=0.01, l2=0.01),
                       'kernel_constraint': constraints.MaxNorm(1),
                       'bias_constraint': constraints.max_norm(1)},
               input_shape=(3, 2))

    layer = layers.Dense(3,
                         kernel_regularizer=regularizers.l1(0.01),
                         bias_regularizer='l1')
    layer.build((None, 4))
    assert len(layer.losses) == 2 
Example 8
Project: Keras-TextClassification   Author: yongzhuo   File: attention_self.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
        # W、K and V
        self.kernel = self.add_weight(name='WKV',
                                        shape=(3, input_shape[2], self.output_dim),
                                        initializer='uniform',
                                        regularizer=L1L2(0.0000032),
                                        trainable=True)
        super().build(input_shape) 
Example 9
Project: TranskribusDU   Author: Transkribus   File: taggerTrainKeras3.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def training(self,traindata):
        """
            training
        """
        train_X,_ = traindata #self.load_data(self.lTrain)
        
        self.initTransformeur()
        
        fX= [item  for sublist in train_X  for item in sublist ]
        self.node_transformer.fit(fX)
#         
        lX,lY = self.prepareTensor(traindata)

#         print lX.shape
#         print lY.shape

        model = Sequential()
        reg= L1L2(l1=0.001, l2=0.0)

        model.add(Masking(mask_value=0., input_shape=(self.max_sentence_len, self.max_features)))
        model.add(Bidirectional(LSTM(self.hiddenSize,return_sequences = True,bias_regularizer=reg))) 
        model.add(Dropout(0.5))
        model.add(TimeDistributed(Dense(self.nbClasses, activation='softmax')))
        #keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=['categorical_accuracy']  )
        print (model.summary())
        _ = model.fit(lX, lY, epochs = self.nbEpochs,batch_size = self.batch_size, verbose = 1,validation_split = 0.33, shuffle=True)
        
        del lX,lY
        
        auxdata = self.max_features,self.max_sentence_len,self.nbClasses,self.tag_vector,self.node_transformer
        
        return model, auxdata 
Example 10
Project: TranskribusDU   Author: Transkribus   File: taggerTrainKeras2.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def training(self,traindata):
        """
            training
        """
        train_X,_ = traindata #self.load_data(self.lTrain)
        
        self.initTransformeur()
        
        fX= [item  for sublist in train_X  for item in sublist ]
        self.node_transformer.fit(fX)
#         
        lX,lY = self.prepareTensor(traindata)

#         print lX.shape
#         print lY.shape

        model = Sequential()
        reg= L1L2(l1=0.001, l2=0.0)

        model.add(Masking(mask_value=0., input_shape=(self.max_sentence_len, self.max_features)))
        model.add(Bidirectional(LSTM(self.hiddenSize,return_sequences = True,bias_regularizer=reg))) 
        model.add(Dropout(0.5))
        model.add(TimeDistributed(Dense(self.nbClasses, activation='softmax')))
        #keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=['categorical_accuracy']  )
        print (model.summary())
        _ = model.fit(lX, lY, epochs = self.nbEpochs,batch_size = self.batch_size, verbose = 1,validation_split = 0.1, shuffle=True)
        
        del lX,lY
        
        auxdata = self.max_features,self.max_sentence_len,self.nbClasses,self.tag_vector,self.node_transformer
        
        return model, auxdata 
Example 11
Project: deepcpg   Author: cangermueller   File: dna.py    MIT License 5 votes vote down vote up
def __call__(self, inputs):
        x = inputs[0]

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(128, 11,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.MaxPooling1D(4)(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(256, 3,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.MaxPooling1D(2)(x)

        x = kl.Flatten()(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Dense(self.nb_hidden,
                     kernel_initializer=self.init,
                     kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x) 
Example 12
Project: deepcpg   Author: cangermueller   File: dna.py    MIT License 5 votes vote down vote up
def __call__(self, inputs):
        x = inputs[0]

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(128, 11,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.MaxPooling1D(4)(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(256, 3,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.MaxPooling1D(2)(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(512, 3,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.MaxPooling1D(2)(x)

        x = kl.Flatten()(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Dense(self.nb_hidden,
                     kernel_initializer=self.init,
                     kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu')(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x) 
Example 13
Project: deepcpg   Author: cangermueller   File: dna.py    MIT License 5 votes vote down vote up
def __call__(self, inputs):
        x = inputs[0]

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(128, 11,
                      name='conv1',
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.BatchNormalization(name='bn1')(x)
        x = kl.Activation('relu', name='act1')(x)
        x = kl.MaxPooling1D(2, name='pool1')(x)

        # 124
        x = self._res_unit(x, [32, 32, 128], stage=1, block=1, stride=2)
        x = self._res_unit(x, [32, 32, 128], stage=1, block=2)
        x = self._res_unit(x, [32, 32, 128], stage=1, block=3)

        # 64
        x = self._res_unit(x, [64, 64, 256], stage=2, block=1, stride=2)
        x = self._res_unit(x, [64, 64, 256], stage=2, block=2)
        x = self._res_unit(x, [64, 64, 256], stage=2, block=3)

        # 32
        x = self._res_unit(x, [128, 128, 512], stage=3, block=1, stride=2)
        x = self._res_unit(x, [128, 128, 512], stage=3, block=2)
        x = self._res_unit(x, [128, 128, 512], stage=3, block=3)

        # 16
        x = self._res_unit(x, [256, 256, 1024], stage=4, block=1, stride=2)

        x = kl.GlobalAveragePooling1D()(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x) 
Example 14
Project: deepcpg   Author: cangermueller   File: dna.py    MIT License 5 votes vote down vote up
def _res_unit(self, inputs, nb_filter, size=3, stride=1, stage=1, block=1):

        name = '%02d-%02d/' % (stage, block)
        id_name = '%sid_' % (name)
        res_name = '%sres_' % (name)

        # Residual branch
        x = kl.BatchNormalization(name=res_name + 'bn1')(inputs)
        x = kl.Activation('relu', name=res_name + 'act1')(x)
        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(nb_filter, size,
                      name=res_name + 'conv1',
                      border_mode='same',
                      subsample_length=stride,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)

        x = kl.BatchNormalization(name=res_name + 'bn2')(x)
        x = kl.Activation('relu', name=res_name + 'act2')(x)
        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(nb_filter, size,
                      name=res_name + 'conv2',
                      border_mode='same',
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)

        # Identity branch
        if nb_filter != inputs._keras_shape[-1] or stride > 1:
            kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
            identity = kl.Conv1D(nb_filter, size,
                                 name=id_name + 'conv1',
                                 border_mode='same',
                                 subsample_length=stride,
                                 kernel_initializer=self.init,
                                 kernel_regularizer=kernel_regularizer)(inputs)
        else:
            identity = inputs

        x = kl.merge([identity, x], name=name + 'merge', mode='sum')

        return x 
Example 15
Project: deepcpg   Author: cangermueller   File: dna.py    MIT License 5 votes vote down vote up
def __call__(self, inputs):
        x = inputs[0]

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(128, 11,
                      name='conv1',
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.BatchNormalization(name='bn1')(x)
        x = kl.Activation('relu', name='act1')(x)
        x = kl.MaxPooling1D(2, name='pool1')(x)

        # 124
        x = self._res_unit(x, 128, stage=1, block=1, stride=2)
        x = self._res_unit(x, 128, stage=1, block=2)

        # 64
        x = self._res_unit(x, 256, stage=2, block=1, stride=2)

        # 32
        x = self._res_unit(x, 256, stage=3, block=1, stride=2)

        # 32
        x = self._res_unit(x, 512, stage=4, block=1, stride=2)

        x = kl.GlobalAveragePooling1D()(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x) 
Example 16
Project: deepcpg   Author: cangermueller   File: dna.py    MIT License 5 votes vote down vote up
def __call__(self, inputs):
        x = inputs[0]

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(128, 11,
                      name='conv1',
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.Activation('relu', name='act1')(x)
        x = kl.MaxPooling1D(2, name='pool1')(x)

        # 124
        x = self._res_unit(x, [32, 32, 128], stage=1, block=1, stride=2)
        x = self._res_unit(x, [32, 32, 128], atrous=2, stage=1, block=2)
        x = self._res_unit(x, [32, 32, 128], atrous=4, stage=1, block=3)

        # 64
        x = self._res_unit(x, [64, 64, 256], stage=2, block=1, stride=2)
        x = self._res_unit(x, [64, 64, 256], atrous=2, stage=2, block=2)
        x = self._res_unit(x, [64, 64, 256], atrous=4, stage=2, block=3)

        # 32
        x = self._res_unit(x, [128, 128, 512], stage=3, block=1, stride=2)
        x = self._res_unit(x, [128, 128, 512], atrous=2, stage=3, block=2)
        x = self._res_unit(x, [128, 128, 512], atrous=4, stage=3, block=3)

        # 16
        x = self._res_unit(x, [256, 256, 1024], stage=4, block=1, stride=2)

        x = kl.GlobalAveragePooling1D()(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x) 
Example 17
Project: deepcpg   Author: cangermueller   File: cpg.py    MIT License 5 votes vote down vote up
def _replicate_model(self, input):
        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Dense(256, kernel_initializer=self.init,
                     kernel_regularizer=kernel_regularizer)(input)
        x = kl.Activation(self.act_replicate)(x)

        return km.Model(input, x) 
Example 18
Project: deepcpg   Author: cangermueller   File: cpg.py    MIT License 5 votes vote down vote up
def __call__(self, inputs):
        x = self._merge_inputs(inputs)

        shape = getattr(x, '_keras_shape')
        replicate_model = self._replicate_model(kl.Input(shape=shape[2:]))
        x = kl.TimeDistributed(replicate_model)(x)

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        gru = kl.GRU(256, kernel_regularizer=kernel_regularizer)
        x = kl.Bidirectional(gru)(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x) 
Example 19
Project: deepcpg   Author: cangermueller   File: joint.py    MIT License 5 votes vote down vote up
def __call__(self, models):
        layers = []
        for layer in range(self.nb_layer):
            kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
            layers.append(kl.Dense(self.nb_hidden,
                                   kernel_initializer=self.init,
                                   kernel_regularizer=kernel_regularizer))
            layers.append(kl.Activation('relu'))
            layers.append(kl.Dropout(self.dropout))

        return self._build(models, layers) 
Example 20
Project: DeepScope   Author: miguelesteras   File: gan_test_bugs.py    MIT License 5 votes vote down vote up
def filt(inobj, nfilt, k, BN=False, act=True, prelu=False, dilation=1):
    reg = lambda: L1L2(l1=1e-7, l2=1e-7)
    
    conv = Convolution2D(nfilt, k, strides=(1,1), activation=None, border_mode='same', 
                         init=he_normal(), dilation_rate=dilation, kernel_regularizer=reg())(inobj)
    
    if BN: conv = BatchNormalization(axis=-1)(conv)
    if act and prelu: conv = PReLU(shared_axes=[1,2])(conv)
    if act and not prelu: conv = Activation('relu')(conv)
    
    return conv 
Example 21
Project: market-analysis-system   Author: terentjew-alexey   File: autoencoders.py    MIT License 4 votes vote down vote up
def deep_ae(input_shape, encoding_dim=64,
                    output_activation='linear', kernel_activation='elu',
                    lambda_l1=0.0):
    """
    Example from https://habr.com/post/331382/

    Arguments
        input_shape (tuple of int):
        encoding_dim (int):
        output_activation (str):
        kernel_activation (str):
        lambda_l1 (float): Regularisation value for sparse encoding.

    Returns
        encoder:
        decoder:
        autoencoder:
    """

    decoder_dim = 1
    for i in input_shape:
        decoder_dim *= i

    # Encoder
    input_tensor = Input(shape=input_shape)
    x = Flatten()(input_tensor)
    # x = Dense(encoding_dim*4, activation=kernel_activation)(x)
    x = Dense(encoding_dim*3, activation=kernel_activation)(x)
    x = Dense(encoding_dim*2, activation=kernel_activation)(x)
    encoded = Dense(encoding_dim, activation='linear',
                    activity_regularizer=L1L2(lambda_l1, 0))(x)
    
    # Decoder
    input_encoded = Input(shape=(encoding_dim,))
    y = Dense(encoding_dim*2, activation=kernel_activation)(input_encoded)
    y = Dense(encoding_dim*3, activation=kernel_activation)(y)
    # y = Dense(encoding_dim*4, activation=kernel_activation)(y)
    y = Dense(decoder_dim, activation=output_activation)(y)
    decoded = Reshape(input_shape)(y)

    # Create models
    encoder = Model(input_tensor, encoded, name="encoder")
    decoder = Model(input_encoded, decoded, name="decoder")
    autoencoder = Model(input_tensor, decoder(encoder(input_tensor)), name="autoencoder")

    return encoder, decoder, autoencoder 
Example 22
Project: TranskribusDU   Author: Transkribus   File: taggerTrainKeras.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def training(self,traindata):
        """
            training
        """
        train_X,_ = traindata #self.load_data(self.lTrain)
        
        self.initTransformeur()
        
        fX= [item  for sublist in train_X  for item in sublist ]
        self.node_transformer.fit(fX)
#         
        lX,lY = self.prepareTensor(traindata)

#         lX.reshape(1000,self.max_sentence_len)
        print(lX.shape)
#         print lY.shape

        """
        (1000, 26, 128)
        (?, 26, 128)
        (?, 26, 128)
        (?, ?, 64)

        """
        model = Sequential()
        reg= L1L2(l1=0.001, l2=0.0)

        print ('feature: %s sent:%s  hid:%s'%(self.max_features,self.max_sentence_len,self.hiddenSize))
        model.add(Masking(mask_value=0., input_shape=(self.max_sentence_len, self.max_features)))
        model.add(TimeDistributed(Dense(self.hiddenSize)))
        model.add(Bidirectional(LSTM(self.hiddenSize,return_sequences = True,bias_regularizer=reg))) 
#         model.add(Dropout(0.5))
#         model.add(Bidirectional(LSTM(self.hiddenSize,return_sequences = True,bias_regularizer=reg)))
#         model.add(Dropout(0.5)) 
#         model.add(Bidirectional(LSTM(self.hiddenSize,return_sequences = True,bias_regularizer=reg)))
#         model.add(Dropout(0.5)) 
        print (model.layers[-1].output.get_shape())
        if self.bAttentionLayer:
            model.add(AttentionDecoder(self.max_sentence_len, self.nbClasses))
        else:
            model.add(TimeDistributed(Dense(self.nbClasses, activation='softmax')))
#        keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
        model.add(Dropout(0.5))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=['categorical_accuracy']  )
        print (model.summary())
        _ = model.fit(lX, lY, epochs = self.nbEpochs,batch_size = self.batch_size, verbose = 1,validation_split = 0.33, shuffle=True)
        
        del lX,lY
        
        auxdata = self.max_features,self.max_sentence_len,self.nbClasses,self.tag_vector,self.node_transformer
        
        return model, auxdata 
Example 23
Project: deepcpg   Author: cangermueller   File: dna.py    MIT License 4 votes vote down vote up
def _res_unit(self, inputs, nb_filter, size=3, stride=1, stage=1, block=1):

        name = '%02d-%02d/' % (stage, block)
        id_name = '%sid_' % (name)
        res_name = '%sres_' % (name)

        # Residual branch

        # 1x1 down-sample conv
        x = kl.BatchNormalization(name=res_name + 'bn1')(inputs)
        x = kl.Activation('relu', name=res_name + 'act1')(x)
        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(nb_filter[0], 1,
                      name=res_name + 'conv1',
                      subsample_length=stride,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)

        # LxL conv
        x = kl.BatchNormalization(name=res_name + 'bn2')(x)
        x = kl.Activation('relu', name=res_name + 'act2')(x)
        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(nb_filter[1], size,
                      name=res_name + 'conv2',
                      border_mode='same',
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)

        # 1x1 up-sample conv
        x = kl.BatchNormalization(name=res_name + 'bn3')(x)
        x = kl.Activation('relu', name=res_name + 'act3')(x)
        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(nb_filter[2], 1,
                      name=res_name + 'conv3',
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)

        # Identity branch
        if nb_filter[-1] != inputs._keras_shape[-1] or stride > 1:
            kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
            identity = kl.Conv1D(nb_filter[2], 1,
                                 name=id_name + 'conv1',
                                 subsample_length=stride,
                                 kernel_initializer=self.init,
                                 kernel_regularizer=kernel_regularizer)(inputs)
        else:
            identity = inputs

        x = kl.merge([identity, x], name=name + 'merge', mode='sum')

        return x 
Example 24
Project: deepcpg   Author: cangermueller   File: dna.py    MIT License 4 votes vote down vote up
def _res_unit(self, inputs, nb_filter, size=3, stride=1, atrous=1,
                  stage=1, block=1):

        name = '%02d-%02d/' % (stage, block)
        id_name = '%sid_' % (name)
        res_name = '%sres_' % (name)

        # Residual branch

        # 1x1 down-sample conv
        x = kl.BatchNormalization(name=res_name + 'bn1')(inputs)
        x = kl.Activation('relu', name=res_name + 'act1')(x)
        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(nb_filter[0], 1,
                      name=res_name + 'conv1',
                      subsample_length=stride,
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)

        # LxL conv
        x = kl.BatchNormalization(name=res_name + 'bn2')(x)
        x = kl.Activation('relu', name=res_name + 'act2')(x)
        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.AtrousConv1D(nb_filter[1], size,
                            atrous_rate=atrous,
                            name=res_name + 'conv2',
                            border_mode='same',
                            kernel_initializer=self.init,
                            kernel_regularizer=kernel_regularizer)(x)

        # 1x1 up-sample conv
        x = kl.BatchNormalization(name=res_name + 'bn3')(x)
        x = kl.Activation('relu', name=res_name + 'act3')(x)
        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(nb_filter[2], 1,
                      name=res_name + 'conv3',
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)

        # Identity branch
        if nb_filter[-1] != inputs._keras_shape[-1] or stride > 1:
            kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
            identity = kl.Conv1D(nb_filter[2], 1,
                                 name=id_name + 'conv1',
                                 subsample_length=stride,
                                 kernel_initializer=self.init,
                                 kernel_regularizer=kernel_regularizer)(inputs)
        else:
            identity = inputs

        x = kl.merge([identity, x], name=name + 'merge', mode='sum')

        return x 
Example 25
Project: DeepScope   Author: miguelesteras   File: gan_test_bugs.py    MIT License 4 votes vote down vote up
def model_generator(nch=256, edge_len=10):
    reg = lambda: L1L2(l1=1e-7, l2=1e-7)
    
    inputs = Input(shape=(80*80,))
    
    l = Dense(nch * edge_len**2)(inputs)
    l = BatchNormalization()(l)
    
    l = Reshape((edge_len, edge_len, nch))(l)
    
    l = filt(l, 256, 3, BN=True, prelu=True)
    l = filt(l, 32, 3, BN=True, prelu=True)
    
    
    
    l = incep_module(l, 32)
    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = Conv2DTranspose(256, 3, strides=2, padding='same', activation='relu', kernel_regularizer=reg())(l)
#    l = filt(l, 32, 3, BN=True)
    l = UpSampling2D()(l)
    
    l = incep_module(l, 32)
    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = Conv2DTranspose(128, 3, strides=2, padding='same', activation='relu', kernel_regularizer=reg())(l)
#    l = filt(l, 32, 3, BN=True)
    l = UpSampling2D()(l)
    
    l = incep_module(l, 32)
    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = Conv2DTranspose(64, 3, strides=2, padding='same', activation='relu', kernel_regularizer=reg())(l)
#    l = filt(l, 32, 3, BN=True)
    l = UpSampling2D()(l)
    
    l = incep_module(l, 32)
#    l = incep_module(l, 32)
#    l = incep_module(l, 32)
    
    l = filt(l, 1, 5, BN=False, act=False)
    l = Activation('sigmoid')(l)
    
    return Model(input=inputs, output=l) 
Example 26
Project: Norfolk_Groundwater_Model   Author: UVAdMIST   File: keras_mmps043_18hr_hyperas_rnn.py    MIT License 4 votes vote down vote up
def create_model(train_X, train_y, test_X, test_y):
    def rmse(y_true, y_pred):
        return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))

    n_ahead = 19
    n_test = 7548
    n_epochs = 10000
    n_neurons = 10
    n_batch = 49563

    os.environ['PYTHONHASHSEED'] = '0'
    np.random.seed(42)
    rn.seed(12345)
    session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
    tf.set_random_seed(1234)
    sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
    K.set_session(sess)

    model = Sequential()
    model.add(SimpleRNN(units={{choice([10, 15, 20, 40, 50, 75])}}, activation={{choice(['relu', 'tanh', 'sigmoid'])}},
                        input_shape=(None, train_X.shape[2]), use_bias=True,
                        bias_regularizer=L1L2(l1=0.01, l2=0.01), return_sequences=False))
    model.add(Dropout({{uniform(0.1, 0.5)}}))
    model.add(Dense(activation='linear', units=n_ahead-1, use_bias=True))

    adam = keras.optimizers.Adam(lr={{choice([10 ** -3, 10 ** -2, 10 ** -1])}})
    rmsprop = keras.optimizers.RMSprop(lr={{choice([10 ** -3, 10 ** -2, 10 ** -1])}})
    sgd = keras.optimizers.SGD(lr={{choice([10 ** -3, 10 ** -2, 10 ** -1])}})

    choiceval = {{choice(['adam', 'sgd', 'rmsprop'])}}
    if choiceval == 'adam':
        optim = adam
    elif choiceval == 'rmsprop':
        optim = rmsprop
    else:
        optim = sgd

    model.compile(loss=rmse, optimizer=optim)

    earlystop = keras.callbacks.EarlyStopping(monitor='loss', min_delta=0.00000001, patience=5, verbose=1, mode='auto')
    model.fit(train_X, train_y, batch_size=n_batch, epochs=n_epochs, verbose=2, shuffle=False, callbacks=[earlystop])
    loss = model.evaluate(test_X, test_y, batch_size=n_test, verbose=0)
    return {'loss': loss, 'status': STATUS_OK, 'model': model} 
Example 27
Project: Norfolk_Groundwater_Model   Author: UVAdMIST   File: keras_mmps043_18hr_hyperas_lstm.py    MIT License 4 votes vote down vote up
def create_model(train_X, train_y, test_X, test_y):
    def rmse(y_true, y_pred):
        return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))

    n_ahead = 19
    n_test = 7548
    n_epochs = 10000
    n_neurons = 10
    n_batch = 49563

    os.environ['PYTHONHASHSEED'] = '0'
    np.random.seed(42)
    rn.seed(12345)
    session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
    tf.set_random_seed(1234)
    sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
    K.set_session(sess)

    model = Sequential()
    model.add(LSTM(units={{choice([10, 15, 20, 40, 50, 75])}}, activation={{choice(['relu', 'tanh', 'sigmoid'])}},
                   input_shape=(None, train_X.shape[2]), use_bias=True,
                   bias_regularizer=L1L2(l1=0.01, l2=0.01)))
    model.add(Dropout({{uniform(0.1, 0.5)}}))
    model.add(Dense(activation='linear', units=n_ahead-1, use_bias=True))

    adam = keras.optimizers.Adam(lr={{choice([10 ** -3, 10 ** -2, 10 ** -1])}})
    rmsprop = keras.optimizers.RMSprop(lr={{choice([10 ** -3, 10 ** -2, 10 ** -1])}})
    sgd = keras.optimizers.SGD(lr={{choice([10 ** -3, 10 ** -2, 10 ** -1])}})

    choiceval = {{choice(['adam', 'sgd', 'rmsprop'])}}
    if choiceval == 'adam':
        optim = adam
    elif choiceval == 'rmsprop':
        optim = rmsprop
    else:
        optim = sgd

    model.compile(loss=rmse, optimizer=optim)

    earlystop = keras.callbacks.EarlyStopping(monitor='loss', min_delta=0.00000001, patience=5, verbose=1, mode='auto')
    model.fit(train_X, train_y, batch_size=n_batch, epochs=n_epochs, verbose=2, shuffle=False, callbacks=[earlystop])
    loss = model.evaluate(test_X, test_y, batch_size=n_test, verbose=0)
    return {'loss': loss, 'status': STATUS_OK, 'model': model}