Python keras.regularizers.l1_l2() Examples

The following are code examples for showing how to use keras.regularizers.l1_l2(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: X-CNN   Author: ernstlab   File: train_X-SCNN.py    MIT License 7 votes vote down vote up
def make_subnetwork(args, input_length, num_tracks, encoder=None):
	# Create input. Will have to be shape (1, input_length, num_tracks)
	if args.pad:
		chip_input = Input(shape=(input_length + 2*args.filter_len, num_tracks))
	else:
		chip_input = Input(shape=(input_length, num_tracks))
	if encoder:
		x = encoder(chip_input)
	else:
		x = chip_input
	x = Conv1D(
		filters=args.conv_kernel,
		kernel_size=args.filter_len,
		activation='relu',
		kernel_regularizer=regularizers.l1_l2(args.regularizer),
		use_bias=args.bias)(x)
	x = GlobalMaxPooling1D()(x)
	conv_model = Model(chip_input, x)
	return conv_model 
Example 2
Project: GEM-Benchmark   Author: palash1992   File: sdne_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_variational_encoder(node_num, d,
                            n_units, nu1, nu2,
                            activation_fn):
    K = len(n_units) + 1
    # Input
    x = Input(shape=(node_num,))
    # Encoder layers
    y = [None] * (K + 3)
    y[0] = x
    for i in range(K - 1):
        y[i + 1] = Dense(n_units[i], activation=activation_fn,
                         W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[i])
    y[K] = Dense(d, activation=activation_fn,
                 W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
    y[K + 1] = Dense(d)(y[K - 1])
    # y[K + 1] = Dense(d, W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
    y[K + 2] = Lambda(sampling, output_shape=(d,))([y[K], y[K + 1]])
    # Encoder model
    encoder = Model(input=x, outputs=[y[K], y[K + 1], y[K + 2]])
    return encoder 
Example 3
Project: GEM-Benchmark   Author: palash1992   File: sdne_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_decoder(node_num, d,
                n_units, nu1, nu2,
                activation_fn):
    K = len(n_units) + 1
    # Input
    y = Input(shape=(d,))
    # Decoder layers
    y_hat = [None] * (K + 1)
    y_hat[K] = y
    for i in range(K - 1, 0, -1):
        y_hat[i] = Dense(n_units[i - 1],
                         activation=activation_fn,
                         W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[i + 1])
    y_hat[0] = Dense(node_num, activation=activation_fn,
                     W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[1])
    # Output
    x_hat = y_hat[0]  # decoder's output is also the actual output
    # Decoder Model
    decoder = Model(input=y, output=x_hat)
    return decoder 
Example 4
Project: dynamicgem   Author: Sujit-O   File: dnn_utils.py    MIT License 6 votes vote down vote up
def get_decoder(node_num, d,
                n_units, nu1, nu2,
                activation_fn):
    K = len(n_units) + 1
    # Input
    y = Input(shape=(d,))
    # Decoder layers
    y_hat = [None] * (K + 1)
    y_hat[K] = y
    for i in range(K - 1, 0, -1):
        y_hat[i] = Dense(n_units[i - 1],
                         activation=activation_fn,
                         W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[i + 1])
    y_hat[0] = Dense(node_num, activation=activation_fn,
                     W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[1])
    # Output
    x_hat = y_hat[0]  # decoder's output is also the actual output
    # Decoder Model
    decoder = Model(input=y, output=x_hat)
    return decoder 
Example 5
Project: dynamicgem   Author: Sujit-O   File: dnn_utils.py    MIT License 6 votes vote down vote up
def get_decoder_dynaernn(node_num, d,
                         n_units, nu1, nu2,
                         activation_fn):
    K = len(n_units) + 1
    # Input
    y = Input(shape=(d,))
    # Decoder layers
    y_hat = [None] * (K + 1)
    y_hat[K] = y
    for i in range(K - 1, 0, -1):
        y_hat[i] = Dense(n_units[i - 1],
                         activation=LeakyReLU(),
                         W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[i + 1])
    y_hat[0] = Dense(node_num, activation=LeakyReLU(),
                     W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[1])

    # Output
    x_hat = y_hat[0]  # decoder's output is also the actual output
    # Decoder Model
    decoder = Model(input=y, output=x_hat)
    return decoder 
Example 6
Project: dynamicgem   Author: Sujit-O   File: sdne_utils.py    MIT License 6 votes vote down vote up
def get_decoder(node_num, d,
                n_units, nu1, nu2,
                activation_fn):
    K = len(n_units) + 1
    # Input
    y = Input(shape=(d,))
    # Decoder layers
    y_hat = [None] * (K + 1)
    y_hat[K] = y
    for i in range(K - 1, 0, -1):
        y_hat[i] = Dense(n_units[i - 1],
                         activation=activation_fn,
                         W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[i + 1])
    y_hat[0] = Dense(node_num, activation=activation_fn,
                     W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[1])
    # Output
    x_hat = y_hat[0]  # decoder's output is also the actual output
    # Decoder Model
    decoder = Model(input=y, output=x_hat)
    return decoder 
Example 7
Project: RPGOne   Author: RTHMaK   File: convolutional_encoder.py    Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 units: int,
                 num_filters: int,
                 ngram_filter_sizes: Tuple[int]=(2, 3, 4, 5),
                 conv_layer_activation: str='relu',
                 l1_regularization: float=None,
                 l2_regularization: float=None,
                 **kwargs):
        self.num_filters = num_filters
        self.ngram_filter_sizes = ngram_filter_sizes
        self.output_dim = units
        self.conv_layer_activation = conv_layer_activation
        self.l1_regularization = l1_regularization
        self.l2_regularization = l2_regularization
        self.regularizer = lambda: l1_l2(l1=self.l1_regularization, l2=self.l2_regularization)

        # These are member variables that will be defined during self.build().
        self.convolution_layers = None
        self.max_pooling_layers = None
        self.projection_layer = None

        self.input_spec = [InputSpec(ndim=3)]
        super(CNNEncoder, self).__init__(**kwargs) 
Example 8
Project: RPGOne   Author: RTHMaK   File: threshold_tuple_matcher.py    Apache License 2.0 6 votes vote down vote up
def build(self, input_shape):
        super(ThresholdTupleMatcher, self).build(input_shape)
        # Add the parameter for the similarity threshold
        self.similarity_threshold = self.add_weight(shape=(1,),
                                                    name=self.name + '_similarity_thresh',
                                                    initializer=self.hidden_layer_init,
                                                    regularizer=l1_l2(l2=0.001),
                                                    trainable=True)

        # Add the weights for the hidden layers.
        hidden_layer_input_dim = input_shape[0][1]
        for i in range(self.num_hidden_layers):
            hidden_layer = self.add_weight(shape=(hidden_layer_input_dim, self.hidden_layer_width),
                                           initializer=initializers.get(self.hidden_layer_init),
                                           name='%s_hiddenlayer_%d' % (self.name, i))
            self.hidden_layer_weights.append(hidden_layer)
            hidden_layer_input_dim = self.hidden_layer_width
        # Add the weights for the final layer.
        self.score_layer = self.add_weight(shape=(self.hidden_layer_width, 1),
                                           initializer=initializers.get(self.hidden_layer_init),
                                           name='%s_score' % self.name) 
Example 9
Project: autonomio   Author: autonomio   File: regression.py    MIT License 6 votes vote down vote up
def regression(X, Y, epochs, reg_mode):

    x, y = np.array(X),np.array(Y)
    
    model = Sequential()
    
    if reg_mode == 'linear':
        model.add(Dense(1, input_dim=x.shape[1]))
        model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='mse')
    
    elif reg_mode == 'logistic':
        model.add(Dense(1, activation='sigmoid', input_dim=x.shape[1]))
        model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='binary_crossentropy')
    
    elif reg_mode == 'regularized':
        reg = l1_l2(l1=0.01, l2=0.01)
        model.add(Dense(1, activation='sigmoid', W_regularizer=reg, input_dim=x.shape[1]))
        model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='binary_crossentropy')

    out = model.fit(x, y, nb_epoch=epochs, verbose=0, validation_split=.33)
    
    return model, out 
Example 10
Project: CNNArt   Author: thomaskuestner   File: MNetArt.py    Apache License 2.0 6 votes vote down vote up
def fCreateMNet_Block(input_t, channels, kernel_size=(3, 3), type=1, forwarding=True, l1_reg=0.0, l2_reg=1e-6):
    tower_t = Conv2D(channels,
                     kernel_size=kernel_size,
                     kernel_initializer='he_normal',
                     weights=None,
                     padding='same',
                     strides=(1, 1),
                     kernel_regularizer=l1_l2(l1_reg, l2_reg),
                     )(input_t)
    tower_t = Activation('relu')(tower_t)
    for counter in range(1, type):
        tower_t = Conv2D(channels,
                         kernel_size=kernel_size,
                         kernel_initializer='he_normal',
                         weights=None,
                         padding='same',
                         strides=(1, 1),
                         kernel_regularizer=l1_l2(l1_reg, l2_reg),
                         )(tower_t)
        tower_t = Activation('relu')(tower_t)
    if (forwarding):
        tower_t = concatenate([tower_t, input_t], axis=1)
    return tower_t 
Example 11
Project: CNNArt   Author: thomaskuestner   File: MNetArt.py    Apache License 2.0 6 votes vote down vote up
def fCreateMNet_Block(input_t, channels, kernel_size=(3, 3), type=1, forwarding=True, l1_reg=0.0, l2_reg=1e-6):
    tower_t = Conv2D(channels,
                     kernel_size=kernel_size,
                     kernel_initializer='he_normal',
                     weights=None,
                     padding='same',
                     strides=(1, 1),
                     kernel_regularizer=l1_l2(l1_reg, l2_reg),
                     )(input_t)
    tower_t = Activation('relu')(tower_t)
    for counter in range(1, type):
        tower_t = Conv2D(channels,
                         kernel_size=kernel_size,
                         kernel_initializer='he_normal',
                         weights=None,
                         padding='same',
                         strides=(1, 1),
                         kernel_regularizer=l1_l2(l1_reg, l2_reg),
                         )(tower_t)
        tower_t = Activation('relu')(tower_t)
    if (forwarding):
        tower_t = concatenate([tower_t, input_t], axis=1)
    return tower_t 
Example 12
Project: CNNArt   Author: thomaskuestner   File: VNetArt.py    Apache License 2.0 6 votes vote down vote up
def fCreateVNet_Block(input_t, channels, type=1, kernel_size=(3, 3, 3), l1_reg=0.0, l2_reg=1e-6, iPReLU=0, dr_rate=0):
    tower_t = Dropout(dr_rate)(input_t)
    tower_t = Conv3D(channels,
                     kernel_size=kernel_size,
                     kernel_initializer='he_normal',
                     weights=None,
                     padding='same',
                     strides=(1, 1, 1),
                     kernel_regularizer=l1_l2(l1_reg, l2_reg),
                     )(tower_t)

    tower_t = fGetActivation(tower_t, iPReLU=iPReLU)
    for counter in range(1, type):
        tower_t = Dropout(dr_rate)(tower_t)
        tower_t = Conv3D(channels,
                         kernel_size=kernel_size,
                         kernel_initializer='he_normal',
                         weights=None,
                         padding='same',
                         strides=(1, 1, 1),
                         kernel_regularizer=l1_l2(l1_reg, l2_reg),
                         )(tower_t)
        tower_t = fGetActivation(tower_t, iPReLU=iPReLU)
    tower_t = concatenate([tower_t, input_t], axis=1)
    return tower_t 
Example 13
Project: CNNArt   Author: thomaskuestner   File: VNetArt.py    Apache License 2.0 6 votes vote down vote up
def fCreateVNet_Block(input_t, channels, type=1, kernel_size=(3, 3, 3), l1_reg=0.0, l2_reg=1e-6, iPReLU=0, dr_rate=0):
    tower_t = Dropout(dr_rate)(input_t)
    tower_t = Conv3D(channels,
                     kernel_size=kernel_size,
                     kernel_initializer='he_normal',
                     weights=None,
                     padding='same',
                     strides=(1, 1, 1),
                     kernel_regularizer=l1_l2(l1_reg, l2_reg),
                     )(tower_t)

    tower_t = fGetActivation(tower_t, iPReLU=iPReLU)
    for counter in range(1, type):
        tower_t = Dropout(dr_rate)(tower_t)
        tower_t = Conv3D(channels,
                         kernel_size=kernel_size,
                         kernel_initializer='he_normal',
                         weights=None,
                         padding='same',
                         strides=(1, 1, 1),
                         kernel_regularizer=l1_l2(l1_reg, l2_reg),
                         )(tower_t)
        tower_t = fGetActivation(tower_t, iPReLU=iPReLU)
    tower_t = concatenate([tower_t, input_t], axis=1)
    return tower_t 
Example 14
Project: CNNArt   Author: thomaskuestner   File: motion_MNetArt.py    Apache License 2.0 6 votes vote down vote up
def fCreateMNet_Block(input_t, channels, kernel_size=(3,3), type=1, forwarding=True,l1_reg=0.0, l2_reg=1e-6 ):
    tower_t = Conv2D(channels,
                     kernel_size=kernel_size,
                     kernel_initializer='he_normal',
                     weights=None,
                     padding='same',
                     strides=(1, 1),
                     kernel_regularizer=l1_l2(l1_reg, l2_reg),
                     )(input_t)
    tower_t = Activation('relu')(tower_t)
    for counter in range(1, type):
        tower_t = Conv2D(channels,
                         kernel_size=kernel_size,
                         kernel_initializer='he_normal',
                         weights=None,
                         padding='same',
                         strides=(1, 1),
                         kernel_regularizer=l1_l2(l1_reg, l2_reg),
                         )(tower_t)
        tower_t = Activation('relu')(tower_t)
    if (forwarding):
        tower_t = concatenate([tower_t, input_t], axis=1)
    return tower_t 
Example 15
Project: CNNArt   Author: thomaskuestner   File: MSnetworks.py    Apache License 2.0 6 votes vote down vote up
def fConvIncep(input_t, KB=64, layernum=2, l1_reg=0.0, l2_reg=1e-6, iPReLU=0):
    tower_t = Conv3D(filters=KB,
                     kernel_size=[2,2,1],
                     kernel_initializer='he_normal',
                     weights=None,
                     padding='same',
                     strides=(1, 1, 1),
                     kernel_regularizer=l1_l2(l1_reg, l2_reg),
                     )(input_t)
    incep = fGetActivation(tower_t, iPReLU=iPReLU)

    for counter in range(1,layernum):
        incep = InceptionBlock(incep, l1_reg=l1_reg, l2_reg=l2_reg)

    incepblock_out = concatenate([incep, input_t], axis=1)
    return incepblock_out 
Example 16
Project: CNNArt   Author: thomaskuestner   File: MSnetworks.py    Apache License 2.0 6 votes vote down vote up
def InceptionBlock(inp, l1_reg=0.0, l2_reg=1e-6):
    KN = fgetKernelNumber()
    branch1 = Conv3D(filters=KN[0], kernel_size=(1,1,1), kernel_initializer='he_normal', weights=None,padding='same',
                     strides=(1,1,1),kernel_regularizer=l1_l2(l1_reg, l2_reg),activation='relu')(inp)

    branch3 = Conv3D(filters=KN[0], kernel_size=(1, 1, 1), kernel_initializer='he_normal', weights=None, padding='same',
                     strides=(1, 1, 1), kernel_regularizer=l1_l2(l1_reg, l2_reg), activation='relu')(inp)
    branch3 = Conv3D(filters=KN[2], kernel_size=(3, 3, 3), kernel_initializer='he_normal', weights=None, padding='same',
                     strides=(1, 1, 1), kernel_regularizer=l1_l2(l1_reg, l2_reg), activation='relu')(branch3)

    branch5 = Conv3D(filters=KN[0], kernel_size=(1, 1, 1), kernel_initializer='he_normal', weights=None, padding='same',
                     strides=(1, 1, 1), kernel_regularizer=l1_l2(l1_reg, l2_reg), activation='relu')(inp)
    branch5 = Conv3D(filters=KN[1], kernel_size=(5, 5, 5), kernel_initializer='he_normal', weights=None, padding='same',
                     strides=(1, 1, 1), kernel_regularizer=l1_l2(l1_reg, l2_reg), activation='relu')(branch5)

    branchpool = MaxPooling3D(pool_size=(3,3,3),strides=(1,1,1),padding='same',data_format='channels_first')(inp)
    branchpool = Conv3D(filters=KN[0], kernel_size=(1, 1, 1), kernel_initializer='he_normal', weights=None, padding='same',
                     strides=(1, 1, 1), kernel_regularizer=l1_l2(l1_reg, l2_reg), activation='relu')(branchpool)
    out = concatenate([branch1, branch3, branch5, branchpool], axis=1)
    return out 
Example 17
Project: pydl   Author: rafaeltg   File: mlp.py    MIT License 6 votes vote down vote up
def _create_layers(self, input_shape, n_output):

        """ Create the network layers
        :param input_shape:
        :param n_output:
        :return: self
        """

        # Hidden layers
        for i, l in enumerate(self.layers):
            self._model.add(Dense(units=l,
                                  input_shape=[input_shape[-1] if i == 0 else None],
                                  activation=self.activation[i],
                                  kernel_regularizer=l1_l2(self.l1_reg[i], self.l2_reg[i]),
                                  bias_regularizer=l1_l2(self.l1_reg[i], self.l2_reg[i])))

            if self.dropout[i] > 0:
                self._model.add(Dropout(rate=self.dropout[i]))

        # Output layer
        self._model.add(Dense(units=n_output, activation=self.out_activation)) 
Example 18
Project: pydl   Author: rafaeltg   File: deep_autoencoder.py    MIT License 6 votes vote down vote up
def _create_layers(self, input_layer):

        """ Create the encoding and the decoding layers of the deep autoencoder.
        :param input_layer: Input size.
        :return: self
        """

        encode_layer = input_layer
        for i, l in enumerate(self.n_hidden):
            encode_layer = Dense(units=l,
                                 name='encoder_%d' % i,
                                 activation=self.enc_activation[i],
                                 kernel_regularizer=l1_l2(self.l1_reg[i], self.l2_reg[i]),
                                 bias_regularizer=l1_l2(self.l1_reg[i], self.l2_reg[i]))(encode_layer)

        self._decode_layer = encode_layer
        for i, l in enumerate(self.n_hidden[-2:-(len(self.n_hidden)+1):-1] + [K.int_shape(input_layer)[1]]):
            self._decode_layer = Dense(units=l,
                                       name='decoder_%d' % i,
                                       activation=self.dec_activation[i])(self._decode_layer) 
Example 19
Project: pydl   Author: rafaeltg   File: stacked_autoencoder.py    MIT License 6 votes vote down vote up
def _create_layers(self, input_shape, n_output):

        """ Create the finetuning model
        :param input_shape:
        :param n_output:
        :return: self
        """

        # Hidden layers
        for i, l in enumerate(self.layers):
            self._model.add(Dense(input_shape=[input_shape[1] if i == 0 else None],
                                  units=l.n_hidden,
                                  weights=l.get_model_parameters()['enc'],
                                  activation=l.enc_activation,
                                  kernel_regularizer=l1_l2(l.l1_reg, l.l2_reg),
                                  bias_regularizer=l1_l2(l.l1_reg, l.l2_reg)))

            if self.dropout[i] > 0:
                self._model.add(Dropout(rate=self.dropout[i]))

        # Output layer
        self._model.add(Dense(units=n_output, activation=self.out_activation)) 
Example 20
Project: pydl   Author: rafaeltg   File: autoencoder.py    MIT License 6 votes vote down vote up
def _create_layers(self, input_layer):

        """ Create the encoding and the decoding layers of the autoencoder.
        :return: self
        """

        encode_layer = Dense(name='encoder',
                             units=self.n_hidden,
                             activation=self.enc_activation,
                             kernel_regularizer=l1_l2(self.l1_reg, self.l2_reg),
                             bias_regularizer=l1_l2(self.l1_reg, self.l2_reg))(input_layer)

        n_inputs = K.int_shape(input_layer)[-1]
        self._decode_layer = Dense(name='decoder',
                                   units=n_inputs,
                                   activation=self.dec_activation)(encode_layer) 
Example 21
Project: open-solution-home-credit   Author: minerva-ml   File: models.py    MIT License 6 votes vote down vote up
def _build_model(self, input_shape, **kwargs):
        K.clear_session()
        model = Sequential()
        for layer in range(self.model_params['layers']):
            config = {key: val[layer] for key, val in self.model_params.items() if key != 'layers'}
            if layer == 0:
                model.add(Dense(config['neurons'],
                                kernel_regularizer=l1_l2(l1=config['l1'], l2=config['l2']),
                                input_shape=input_shape))
            else:
                model.add(Dense(config['neurons'],
                                kernel_regularizer=l1_l2(l1=config['l1'], l2=config['l2'])))
            if config['batch_norm']:
                model.add(BatchNormalization())
            model.add(Activation(config['activation']))
            model.add(Dropout(config['dropout']))

        return model 
Example 22
Project: gemben   Author: Sujit-O   File: sdne_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_variational_encoder(node_num, d,
                            n_units, nu1, nu2,
                            activation_fn):
    K = len(n_units) + 1
    # Input
    x = Input(shape=(node_num,))
    # Encoder layers
    y = [None] * (K + 3)
    y[0] = x
    for i in range(K - 1):
        y[i + 1] = Dense(n_units[i], activation=activation_fn,
                         W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[i])
    y[K] = Dense(d, activation=activation_fn,
                 W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
    y[K + 1] = Dense(d)(y[K - 1])
    # y[K + 1] = Dense(d, W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
    y[K + 2] = Lambda(sampling, output_shape=(d,))([y[K], y[K + 1]])
    # Encoder model
    encoder = Model(input=x, outputs=[y[K], y[K + 1], y[K + 2]])
    return encoder 
Example 23
Project: gemben   Author: Sujit-O   File: sdne_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_decoder(node_num, d,
                n_units, nu1, nu2,
                activation_fn):
    K = len(n_units) + 1
    # Input
    y = Input(shape=(d,))
    # Decoder layers
    y_hat = [None] * (K + 1)
    y_hat[K] = y
    for i in range(K - 1, 0, -1):
        y_hat[i] = Dense(n_units[i - 1],
                         activation=activation_fn,
                         W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[i + 1])
    y_hat[0] = Dense(node_num, activation=activation_fn,
                     W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[1])
    # Output
    x_hat = y_hat[0]  # decoder's output is also the actual output
    # Decoder Model
    decoder = Model(input=y, output=x_hat)
    return decoder 
Example 24
Project: deep_qa   Author: allenai   File: convolutional_encoder.py    Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 units: int,
                 num_filters: int,
                 ngram_filter_sizes: Tuple[int]=(2, 3, 4, 5),
                 conv_layer_activation: str='relu',
                 l1_regularization: float=None,
                 l2_regularization: float=None,
                 **kwargs):
        self.num_filters = num_filters
        self.ngram_filter_sizes = ngram_filter_sizes
        self.output_dim = units
        self.conv_layer_activation = conv_layer_activation
        self.l1_regularization = l1_regularization
        self.l2_regularization = l2_regularization
        self.regularizer = lambda: l1_l2(l1=self.l1_regularization, l2=self.l2_regularization)

        # These are member variables that will be defined during self.build().
        self.convolution_layers = None
        self.max_pooling_layers = None
        self.projection_layer = None

        self.input_spec = [InputSpec(ndim=3)]
        super(CNNEncoder, self).__init__(**kwargs) 
Example 25
Project: GEM   Author: palash1992   File: sdne_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_decoder(node_num, d, K,
                n_units, nu1, nu2,
                activation_fn):
    # Input
    y = Input(shape=(d,))
    # Decoder layers
    y_hat = [None] * (K + 1)
    y_hat[K] = y
    for i in range(K - 1, 0, -1):
        y_hat[i] = Dense(n_units[i - 1],
                         activation=activation_fn,
                         W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[i + 1])
    y_hat[0] = Dense(node_num, activation=activation_fn,
                     W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[1])
    # Output
    x_hat = y_hat[0]  # decoder's output is also the actual output
    # Decoder Model
    decoder = Model(input=y, output=x_hat)
    return decoder 
Example 26
Project: GEM-Benchmark   Author: palash1992   File: sdne_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_encoder(node_num, d, n_units, nu1, nu2, activation_fn):
    K = len(n_units) + 1
    # Input
    x = Input(shape=(node_num,))
    # Encoder layers
    y = [None] * (K + 1)
    y[0] = x  # y[0] is assigned the input
    for i in range(K - 1):
        y[i + 1] = Dense(n_units[i], activation=activation_fn,
                         W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[i])
    y[K] = Dense(d, activation=activation_fn,
                 W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
    # Encoder model
    encoder = Model(input=x, output=y[K])
    return encoder 
Example 27
Project: malware-prediction-rnn   Author: mprhode   File: RNN.py    Apache License 2.0 5 votes vote down vote up
def __generate_regulariser(self, l1_value, l2_value):
		""" Returns keras l1/l2 regulariser"""
		if l1_value and l2_value:
			return l1_l2(l1=l1_value, l2=l2_value)
		elif l1_value and not l2_value:
			return l1(l1_value)
		elif l2_value:
			return l2(l2_value)
		else:
			return None 
Example 28
Project: dynamicgem   Author: Sujit-O   File: dnn_utils.py    MIT License 5 votes vote down vote up
def get_encoder(node_num, d, n_units, nu1, nu2, activation_fn):
    K = len(n_units) + 1
    # Input
    x = Input(shape=(node_num,))
    # Encoder layers
    y = [None] * (K + 1)
    y[0] = x  # y[0] is assigned the input
    for i in range(K - 1):
        y[i + 1] = Dense(n_units[i], activation=activation_fn,
                         W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[i])
    y[K] = Dense(d, activation=activation_fn,
                 W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
    # Encoder model
    encoder = Model(input=x, output=y[K])
    return encoder 
Example 29
Project: dynamicgem   Author: Sujit-O   File: dnn_utils.py    MIT License 5 votes vote down vote up
def get_encoder_dynaernn(node_num, d, n_units, nu1, nu2, activation_fn):
    K = len(n_units) + 1
    # Input
    x = Input(shape=(node_num,))
    # Encoder layers
    y = [None] * (K + 1)
    y[0] = x  # y[0] is assigned the input
    for i in range(K - 1):
        y[i + 1] = Dense(n_units[i], activation=LeakyReLU(),
                         W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[i])
    y[K] = Dense(d, activation=LeakyReLU(),
                 W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
    # Encoder model
    encoder = Model(input=x, output=y[K])
    return encoder 
Example 30
Project: dynamicgem   Author: Sujit-O   File: dnn_utils.py    MIT License 5 votes vote down vote up
def get_lstm_encoder(n_nodes, look_back, d,
                     n_units, activation_fn,
                     bias_reg, input_reg, recurr_reg,
                     ret_seq=True
                     ):
    model = Sequential()
    # model.add(Dense(d, input_shape=(look_back, n_nodes),
    #             activation=activation_fn,
    #              W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))
    #              )
    n_rnn_layers = len(n_units)
    return_sequences = bool(n_rnn_layers - 1)
    model.add(LSTM(n_units[0],
                   input_shape=(look_back, n_nodes),
                   return_sequences=return_sequences,
                   bias_regularizer=bias_reg,
                   kernel_regularizer=input_reg,
                   recurrent_regularizer=recurr_reg

                   )
              )
    for l_idx, n_unit in enumerate(n_units[1:-1]):
        model.add(LSTM(n_unit,
                       return_sequences=True,
                       bias_regularizer=bias_reg,
                       kernel_regularizer=input_reg,
                       recurrent_regularizer=recurr_reg
                       )
                  )
    if n_rnn_layers > 1:
        model.add(LSTM(n_units[-1],
                       return_sequences=False,
                       bias_regularizer=bias_reg,
                       kernel_regularizer=input_reg,
                       recurrent_regularizer=recurr_reg
                       )
                  )
    return model 
Example 31
Project: dynamicgem   Author: Sujit-O   File: dnn_utils.py    MIT License 5 votes vote down vote up
def get_lstm_encoder_v2(n_nodes, look_back, d,
                        n_units, activation_fn, nu1, nu2,
                        bias_reg, input_reg, recurr_reg,
                        ret_seq=True
                        ):
    model = Sequential()
    # model.add(Dense(d, input_shape=(look_back, n_nodes),
    #             activation=activation_fn,
    #              W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))
    #              )
    n_rnn_layers = len(n_units)
    model.add(LSTM(n_units[0],
                   input_shape=(look_back, n_nodes),
                   return_sequences=True,
                   bias_regularizer=bias_reg,
                   kernel_regularizer=input_reg,
                   recurrent_regularizer=recurr_reg

                   )
              )
    for l_idx, n_unit in enumerate(n_units[1:]):
        model.add(LSTM(n_unit,
                       return_sequences=True,
                       bias_regularizer=bias_reg,
                       kernel_regularizer=input_reg,
                       recurrent_regularizer=recurr_reg
                       )
                  )
    model.add(LSTM(d,
                   return_sequences=True,
                   bias_regularizer=bias_reg,
                   kernel_regularizer=input_reg,
                   recurrent_regularizer=recurr_reg
                   )
              )
    return model 
Example 32
Project: dynamicgem   Author: Sujit-O   File: dnn_utils.py    MIT License 5 votes vote down vote up
def get_lstm_decoder_v2(n_nodes, look_back, d,
                        n_units, activation_fn,
                        nu1, nu2,
                        bias_reg, input_reg, recurr_reg
                        ):
    model = Sequential()
    n_rnn_layers = len(n_units)
    model.add(LSTM(d,
                   input_shape=(look_back, d),
                   # input_shape=(1, n_nodes),
                   return_sequences=True,
                   bias_regularizer=bias_reg,
                   kernel_regularizer=input_reg,
                   recurrent_regularizer=recurr_reg
                   )
              )
    for l_idx, n_unit in enumerate(n_units[::-1]):
        if l_idx < n_rnn_layers - 1:
            model.add(LSTM(n_unit,
                           return_sequences=True,
                           bias_regularizer=bias_reg,
                           kernel_regularizer=input_reg,
                           recurrent_regularizer=recurr_reg
                           )
                      )
        else:
            model.add(LSTM(n_nodes,
                           return_sequences=False,
                           bias_regularizer=bias_reg,
                           kernel_regularizer=input_reg,
                           recurrent_regularizer=recurr_reg
                           )
                      )

    # model.add(Dense(n_nodes, activation=activation_fn,
    #              W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))
    #              )
    return model 
Example 33
Project: dynamicgem   Author: Sujit-O   File: dnn_utils.py    MIT License 5 votes vote down vote up
def get_lstm_decoder_v3(n_nodes, look_back, d,
                        n_units, activation_fn,
                        nu1, nu2,
                        bias_reg, input_reg, recurr_reg
                        ):
    model = Sequential()
    n_rnn_layers = len(n_units)
    model.add(LSTM(d,
                   input_shape=(1, d),
                   # input_shape=(1, n_nodes),
                   return_sequences=True,
                   bias_regularizer=bias_reg,
                   kernel_regularizer=input_reg,
                   recurrent_regularizer=recurr_reg
                   )
              )
    for l_idx, n_unit in enumerate(n_units[::-1]):
        if l_idx < n_rnn_layers - 1:
            model.add(LSTM(n_unit,
                           return_sequences=True,
                           bias_regularizer=bias_reg,
                           kernel_regularizer=input_reg,
                           recurrent_regularizer=recurr_reg
                           )
                      )
        else:
            model.add(LSTM(n_nodes,
                           return_sequences=False,
                           bias_regularizer=bias_reg,
                           kernel_regularizer=input_reg,
                           recurrent_regularizer=recurr_reg
                           )
                      )

    model.add(Dense(n_nodes, activation=activation_fn,
                    W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))
              )
    return model 
Example 34
Project: RPGOne   Author: RTHMaK   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def set_regularization_params(encoder_type: str, params: Dict[str, Any]):
    """
    This method takes regularization parameters that are specified in `params` and converts them
    into Keras regularization objects, modifying `params` to contain the correct keys for the given
    encoder_type.

    Currently, we only allow specifying a consistent regularization across all the weights of a
    layer.
    """
    l1_regularization = params.pop("l1_regularization", None)
    l2_regularization = params.pop("l2_regularization", None)
    regularizer = lambda: l1_l2(l1=l1_regularization, l2=l2_regularization)
    if encoder_type == 'cnn':
        # Regularization with the CNN encoder is complicated, so we'll just pass in the L1 and L2
        # values directly, and let the encoder deal with them.
        params["l1_regularization"] = l1_regularization
        params["l2_regularization"] = l2_regularization
    elif encoder_type == 'lstm':
        params["W_regularizer"] = regularizer()
        params["U_regularizer"] = regularizer()
        params["b_regularizer"] = regularizer()
    elif encoder_type == 'tree_lstm':
        params["W_regularizer"] = regularizer()
        params["U_regularizer"] = regularizer()
        params["V_regularizer"] = regularizer()
        params["b_regularizer"] = regularizer()
    return params


# The first item added here will be used as the default in some cases. 
Example 35
Project: RPGOne   Author: RTHMaK   File: noisy_or.py    Apache License 2.0 5 votes vote down vote up
def build(self, input_shape):
        # Add the trainable weight variable for the noise parameter.
        self.noise_parameter = self.add_weight(shape=(),
                                               name=self.name + '_noise_param',
                                               initializer=self.param_init,
                                               regularizer=l1_l2(l2=0.001),
                                               constraint=self.noise_param_constraint,
                                               trainable=True)
        super(NoisyOr, self).build(input_shape) 
Example 36
Project: keras-image-captioning   Author: danieljl   File: models_test.py    MIT License 5 votes vote down vote up
def test_arg_l1_reg_and_l2_reg(self, model):
        model._regularizer = l1_l2(0.01, 0.01)
        self._build_and_assert(model) 
Example 37
Project: applications   Author: geomstats   File: regularizers_test.py    MIT License 5 votes vote down vote up
def test_kernel_regularization():
    x_train, y_train = get_data()
    for reg in [regularizers.l1(),
                regularizers.l2(),
                regularizers.l1_l2()]:
        model = create_model(kernel_regularizer=reg)
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        assert len(model.losses) == 1
        model.train_on_batch(x_train, y_train) 
Example 38
Project: toxic_comments   Author: Donskov7   File: models.py    MIT License 5 votes vote down vote up
def _get_regularizer(regularizer_name, weight):
    if regularizer_name is None:
        return None
    if regularizer_name == 'l1':
        return l1(weight)
    if regularizer_name == 'l2':
        return l2(weight)
    if regularizer_name == 'l1_l2':
        return l1_l2(weight)
    return None 
Example 39
Project: CNNArt   Author: thomaskuestner   File: network.py    Apache License 2.0 5 votes vote down vote up
def fCreateLeakyReluConv2D(filters, kernel_size=(3, 3), strides=(1, 1), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        conv2d = Conv2D(filters,
                        kernel_size=kernel_size,
                        strides=strides,
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        return LeakyReLU()(conv2d)
    return f 
Example 40
Project: CNNArt   Author: thomaskuestner   File: network.py    Apache License 2.0 5 votes vote down vote up
def fCreateLeakyReluBNConv2D(filters, kernel_size=(3, 3), strides=(1, 1), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        output = Conv2D(filters,
                        kernel_size=kernel_size,
                        strides=strides,
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        output = BatchNormalization(axis=1)(output)

        return LeakyReLU()(output)
    return f 
Example 41
Project: CNNArt   Author: thomaskuestner   File: network.py    Apache License 2.0 5 votes vote down vote up
def fCreateLeakyReluConv3D(filters, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        conv3d = Conv3D(filters,
                        kernel_size=kernel_size,
                        strides=strides,
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        return LeakyReLU()(conv3d)
    return f 
Example 42
Project: CNNArt   Author: thomaskuestner   File: network.py    Apache License 2.0 5 votes vote down vote up
def fCreateLeakyReluBNConv3D(filters, kernel_size, strides, padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        conv3d = Conv3D(filters,
                        kernel_size=kernel_size,
                        strides=strides,
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        return BatchNormalization(axis=1)(LeakyReLU()(conv3d))
    return f 
Example 43
Project: CNNArt   Author: thomaskuestner   File: network.py    Apache License 2.0 5 votes vote down vote up
def fCreateConv2D_ResBlock(filters, kernel_size=(3, 3), strides=(2, 2), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        output = Conv2D(filters,
                        kernel_size=kernel_size,
                        strides=strides,
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        skip = LeakyReLU()(output)

        output = Conv2D(filters,
                        kernel_size=kernel_size,
                        strides=(1, 1),
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(skip)
        output = LeakyReLU()(output)

        output = Conv2D(filters,
                        kernel_size=kernel_size,
                        strides=(1, 1),
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(output)
        output = LeakyReLU()(output)

        output = add([skip, output])
        return output
    return f 
Example 44
Project: CNNArt   Author: thomaskuestner   File: network.py    Apache License 2.0 5 votes vote down vote up
def fCreateConv2DTranspose_ResBlock(filters, kernel_size=(3, 3), strides=(2, 2), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        output = Conv2DTranspose(filters=filters,
                                 kernel_size=kernel_size,
                                 strides=strides,
                                 padding=padding,
                                 kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        skip = LeakyReLU()(output)

        output = Conv2D(filters,
                        kernel_size=kernel_size,
                        strides=(1, 1),
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(skip)
        output = LeakyReLU()(output)

        output = Conv2D(filters,
                        kernel_size=kernel_size,
                        strides=(1, 1),
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(output)
        output = LeakyReLU()(output)

        output = add([skip, output])

        return output
    return f 
Example 45
Project: CNNArt   Author: thomaskuestner   File: network.py    Apache License 2.0 5 votes vote down vote up
def fCreateConv3DTranspose_ResBlock(filters, kernel_size=(3, 3, 1), strides=(2, 2, 1), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        output = Conv3DTranspose(filters=filters,
                                 kernel_size=kernel_size,
                                 strides=strides,
                                 padding=padding,
                                 kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        skip = LeakyReLU()(output)

        output = Conv3DTranspose(filters,
                        kernel_size=kernel_size,
                        strides=(1, 1, 1),
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(skip)
        output = LeakyReLU()(output)

        output = Conv3DTranspose(filters,
                        kernel_size=kernel_size,
                        strides=(1, 1, 1),
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(output)
        output = LeakyReLU()(output)

        output = add([skip, output])

        return output
    return f 
Example 46
Project: CNNArt   Author: thomaskuestner   File: network.py    Apache License 2.0 5 votes vote down vote up
def fCreateConv2DTranspose(filters, strides, kernel_size=(3, 3), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        conv2d = Conv2DTranspose(filters=filters,
                                 kernel_size=kernel_size,
                                 strides=strides,
                                 padding=padding,
                                 kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)

        return LeakyReLU()(conv2d)
    return f 
Example 47
Project: CNNArt   Author: thomaskuestner   File: network.py    Apache License 2.0 5 votes vote down vote up
def fCreateConv2DBNTranspose(filters, strides, kernel_size=(3, 3), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        output = Conv2DTranspose(filters=filters,
                                 kernel_size=kernel_size,
                                 strides=strides,
                                 padding=padding,
                                 kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)

        output = BatchNormalization(axis=1)(output)
        return LeakyReLU()(output)
    return f 
Example 48
Project: CNNArt   Author: thomaskuestner   File: network.py    Apache License 2.0 5 votes vote down vote up
def fCreateConv3DTranspose(filters, strides, kernel_size=(4, 4, 2), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        conv2d = Conv3DTranspose(filters=filters,
                                 kernel_size=kernel_size,
                                 strides=strides,
                                 padding=padding,
                                 kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)

        return LeakyReLU()(conv2d)
    return f 
Example 49
Project: CNNArt   Author: thomaskuestner   File: VNetArt.py    Apache License 2.0 5 votes vote down vote up
def fCreateVNet_DownConv_Block(input_t, channels, stride, l1_reg=0.0, l2_reg=1e-6, iPReLU=0, dr_rate=0):
    output_t = Dropout(dr_rate)(input_t)
    output_t = Conv3D(channels,
                      kernel_size=stride,
                      strides=stride,
                      weights=None,
                      padding='valid',
                      kernel_regularizer=l1_l2(l1_reg, l2_reg),
                      kernel_initializer='he_normal'
                      )(output_t)
    output_t = fGetActivation(output_t, iPReLU=iPReLU)
    return output_t 
Example 50
Project: CNNArt   Author: thomaskuestner   File: VNetArt.py    Apache License 2.0 5 votes vote down vote up
def fCreateVNet_DownConv_Block(input_t, channels, stride, l1_reg=0.0, l2_reg=1e-6, iPReLU=0, dr_rate=0):
    output_t = Dropout(dr_rate)(input_t)
    output_t = Conv3D(channels,
                      kernel_size=stride,
                      strides=stride,
                      weights=None,
                      padding='valid',
                      kernel_regularizer=l1_l2(l1_reg, l2_reg),
                      kernel_initializer='he_normal'
                      )(output_t)
    output_t = fGetActivation(output_t, iPReLU=iPReLU)
    return output_t 
Example 51
Project: CNNArt   Author: thomaskuestner   File: motion_all_CNN2D_multiscale.py    Apache License 2.0 5 votes vote down vote up
def fConveBlock(conv_input,l1_reg=0.0, l2_reg=1e-6, dr_rate=0):
    Kernels = fgetKernels()
    Strides = fgetStrides()
    KernelNumber = fgetKernelNumber()
    # All parameters about kernels and so on are identical with original 2DCNN
    drop_out_1 = Dropout(dr_rate)(conv_input)
    conve_out_1 = Conv2D(KernelNumber[0],
                   kernel_size=Kernels[0],
                   kernel_initializer='he_normal',
                   weights=None,
                   padding='valid',
                   strides=Strides[0],
                   kernel_regularizer=l1_l2(l1_reg, l2_reg)
                   )(drop_out_1)
    # input shape : 1 means grayscale... richtig uebergeben...
    active_out_1 = Activation('relu')(conve_out_1)

    drop_out_2 = Dropout(dr_rate)(active_out_1)
    conve_out_2 = Conv2D(KernelNumber[1],  # learning rate: 0.1 -> 76%
                   kernel_size=Kernels[1],
                   kernel_initializer='he_normal',
                   weights=None,
                   padding='valid',
                   strides=Strides[1],
                   kernel_regularizer=l1_l2(l1_reg, l2_reg)
                   )(drop_out_2)
    active_out_2 = Activation('relu')(conve_out_2)

    drop_out_3 = Dropout(dr_rate)(active_out_2)
    conve_out_3 = Conv2D(KernelNumber[2],  # learning rate: 0.1 -> 76%
                   kernel_size=Kernels[2],
                   kernel_initializer='he_normal',
                   weights=None,
                   padding='valid',
                   strides=Strides[2],
                   kernel_regularizer=l1_l2(l1_reg, l2_reg)
                         )(drop_out_3)
    active_out_3 = Activation('relu')(conve_out_3)
    return active_out_3 
Example 52
Project: CNNArt   Author: thomaskuestner   File: motion_VNetArt.py    Apache License 2.0 5 votes vote down vote up
def fCreateVNet_DownConv_Block(input_t,channels, stride, l1_reg=0.0, l2_reg=1e-6, iPReLU=0, dr_rate=0):
    output_t=Dropout(dr_rate)(input_t)
    output_t=Conv3D(channels,
                    kernel_size=stride,
                    strides=stride,
                    weights=None,
                    padding='valid',
                    kernel_regularizer=l1_l2(l1_reg, l2_reg),
                    kernel_initializer='he_normal'
                    )(output_t)
    output_t=fGetActivation(output_t,iPReLU=iPReLU)
    return output_t 
Example 53
Project: CNNArt   Author: thomaskuestner   File: MSnetworks.py    Apache License 2.0 5 votes vote down vote up
def fCreateModel_FCN_simple(patchSize,dr_rate=0.0, iPReLU=0, l1_reg=0.0, l2_reg=1e-6):
    # Total params: 1,223,831
    # Replace the dense layer with a convolutional layer with filters=2 for the two classes
    Strides = fgetStrides()
    kernelnumber = fgetKernelNumber()
    inp = Input(shape=(1, int(patchSize[0]), int(patchSize[1]), int(patchSize[2])))

    after_Conv_1 = fCreateVNet_Block(inp, kernelnumber[0], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_1 = fCreateVNet_DownConv_Block(after_Conv_1, after_Conv_1._keras_shape[1], Strides[0],
                                                     iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    after_Conv_2 = fCreateVNet_Block(after_DownConv_1, kernelnumber[1], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_2 = fCreateVNet_DownConv_Block(after_Conv_2, after_Conv_2._keras_shape[1], Strides[1],
                                                   iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    after_Conv_3 = fCreateVNet_Block(after_DownConv_2, kernelnumber[2], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_3 = fCreateVNet_DownConv_Block(after_Conv_3, after_Conv_3._keras_shape[1], Strides[2],
                                                   iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    dropout_out = Dropout(dr_rate)(after_DownConv_3)
    fclayer = Conv3D(2,
                       kernel_size=(1,1,1),
                       kernel_initializer='he_normal',
                       weights=None,
                       padding='valid',
                       strides=(1, 1, 1),
                       kernel_regularizer=l1_l2(l1_reg, l2_reg),
                       )(dropout_out)
    fclayer = GlobalAveragePooling3D()(fclayer)
    outp = Activation('softmax')(fclayer)
    cnn_spp = Model(inputs=inp, outputs=outp)
    return cnn_spp 
Example 54
Project: kaggle-otto-classification   Author: zhouhaozeng   File: ensemble_train.py    MIT License 5 votes vote down vote up
def create_2_layer_keras_model(input_dim, output_dim):
    model = Sequential()
    model.add(Dropout(0.05, input_shape=(input_dim,)))
    model.add(Dense(512, init='glorot_normal', activation='relu', kernel_regularizer=l1_l2(l1=1e-5, l2=1e-5)))
    model.add(Dropout(0.5))
    model.add(Dense(256, init='glorot_normal', activation='relu', kernel_regularizer=l1_l2(l1=1e-5, l2=1e-5)))
    model.add(Dropout(0.5))
    model.add(Dense(output_dim, init='glorot_normal', activation='softmax', kernel_regularizer=l1_l2(l1=1e-5, l2=1e-5)))
    
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy', 'categorical_crossentropy'])
    return model 
Example 55
Project: kaggle-otto-classification   Author: zhouhaozeng   File: ensemble_train.py    MIT License 5 votes vote down vote up
def create_3_layer_keras_model(input_dim, output_dim):
    model = Sequential()
    model.add(Dropout(0.05, input_shape=(input_dim,)))
    model.add(Dense(1024, init='glorot_normal', activation='relu', kernel_regularizer=l1_l2(l1=1e-5, l2=1e-5)))
    model.add(Dropout(0.5))    
    model.add(Dense(512, init='glorot_normal', activation='relu', kernel_regularizer=l1_l2(l1=1e-5, l2=1e-5)))
    model.add(Dropout(0.5))
    model.add(Dense(256, init='glorot_normal', activation='relu', kernel_regularizer=l1_l2(l1=1e-5, l2=1e-5)))
    model.add(Dropout(0.5))
    model.add(Dense(output_dim, init='glorot_normal', activation='softmax', kernel_regularizer=l1_l2(l1=1e-5, l2=1e-5)))
    
    model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy', 'categorical_crossentropy'])
    return model 
Example 56
Project: gemben   Author: Sujit-O   File: sdne_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_encoder(node_num, d, n_units, nu1, nu2, activation_fn):
    K = len(n_units) + 1
    # Input
    x = Input(shape=(node_num,))
    # Encoder layers
    y = [None] * (K + 1)
    y[0] = x  # y[0] is assigned the input
    for i in range(K - 1):
        y[i + 1] = Dense(n_units[i], activation=activation_fn,
                         W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[i])
    y[K] = Dense(d, activation=activation_fn,
                 W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
    # Encoder model
    encoder = Model(input=x, output=y[K])
    return encoder 
Example 57
Project: ml-projects   Author: apozas   File: cGAN.py    GNU General Public License v3.0 5 votes vote down vote up
def disc(image_dim, label_dim, layer_dim=1024, reg=lambda: l1_l2(1e-5, 1e-5)):
    '''Discriminator network'''
    x      = (Input(shape=(image_dim,), name='discriminator_input'))
    label  = (Input(shape=(label_dim,), name='discriminator_label'))
    inputs = (Concatenate(name='input_concatenation'))([x, label])
    a = (Dense(layer_dim, name="discriminator_h1", kernel_regularizer=reg()))(inputs)
    a = (LeakyReLU(0.2))(a)
    a = (Dense(int(layer_dim / 2), name="discriminator_h2", kernel_regularizer=reg()))(a)
    a = (LeakyReLU(0.2))(a)
    a = (Dense(int(layer_dim / 4), name="discriminator_h3", kernel_regularizer=reg()))(a)
    a = (LeakyReLU(0.2))(a)
    a = (Dense(1, name="discriminator_y", kernel_regularizer=reg()))(a)
    a = (Activation('sigmoid'))(a)
    model = Model(inputs=[x, label], outputs=a, name="discriminator")
    return model 
Example 58
Project: ml-projects   Author: apozas   File: cGAN.py    GNU General Public License v3.0 5 votes vote down vote up
def gen(noise_dim, image_dim, label_dim, layer_dim=1024, reg=lambda: l1_l2(1e-5, 1e-5)):
    '''Generator network'''
    z      = (Input(shape=(noise_dim,), name='generator_input'))
    label  = (Input(shape=(label_dim,), name='generator_label'))
    inputs = (Concatenate(name='input_concatenation'))([z, label])
    a = (Dense(int(layer_dim / 4), name="generator_h1", kernel_regularizer=reg()))(inputs)
    a = (LeakyReLU(0.2))(a)    # Trick 5
    a = (Dense(int(layer_dim / 2), name="generator_h2", kernel_regularizer=reg()))(a)
    a = (LeakyReLU(0.2))(a)
    a = (Dense(layer_dim, name="generator_h3", kernel_regularizer=reg()))(a)
    a = (LeakyReLU(0.2))(a)
    a = (Dense(np.prod(image_dim), name="generator_x_flat", kernel_regularizer=reg()))(a)
    a = (Activation('tanh'))(a)    
    model = Model(inputs=[z, label], outputs=[a, label], name="generator")
    return model 
Example 59
Project: deep_qa   Author: allenai   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def set_regularization_params(encoder_type: str, params: Params):
    """
    This method takes regularization parameters that are specified in `params` and converts them
    into Keras regularization objects, modifying `params` to contain the correct keys for the given
    encoder_type.

    Currently, we only allow specifying a consistent regularization across all the weights of a
    layer.
    """
    l2_regularization = params.pop("l2_regularization", None)
    l1_regularization = params.pop("l1_regularization", None)
    regularizer = lambda: l1_l2(l1=l1_regularization, l2=l2_regularization)
    if encoder_type == 'cnn':
        # Regularization with the CNN encoder is complicated, so we'll just pass in the L1 and L2
        # values directly, and let the encoder deal with them.
        params["l1_regularization"] = l1_regularization
        params["l2_regularization"] = l2_regularization
    elif encoder_type == 'lstm':
        params["W_regularizer"] = regularizer()
        params["U_regularizer"] = regularizer()
        params["b_regularizer"] = regularizer()
    elif encoder_type == 'tree_lstm':
        params["W_regularizer"] = regularizer()
        params["U_regularizer"] = regularizer()
        params["V_regularizer"] = regularizer()
        params["b_regularizer"] = regularizer()
    return params


# The first item added here will be used as the default in some cases. 
Example 60
Project: deep_qa   Author: allenai   File: noisy_or.py    Apache License 2.0 5 votes vote down vote up
def build(self, input_shape):
        # Add the trainable weight variable for the noise parameter.
        self.noise_parameter = self.add_weight(shape=(),
                                               name=self.name + '_noise_param',
                                               initializer=self.param_init,
                                               regularizer=l1_l2(l2=0.001),
                                               constraint=self.noise_param_constraint,
                                               trainable=True)
        super(NoisyOr, self).build(input_shape) 
Example 61
Project: scholar_project   Author: fengjiran   File: test.py    Apache License 2.0 5 votes vote down vote up
def discriminator_model():
    discirminator_layer = [Dense(units=1024,
                                 input_shape=(784,),
                                 kernel_regularizer=l1_l2(1e-5, 1e-5)),
                           #    BatchNormalization(),
                           LeakyReLU(0.2),
                           Dropout(0.5),
                           Dense(units=512,
                                 kernel_regularizer=l1_l2(1e-5, 1e-5)),
                           #    BatchNormalization(),
                           LeakyReLU(0.2),
                           Dropout(0.5),
                           Dense(units=256,
                                 kernel_regularizer=l1_l2(1e-5, 1e-5)),
                           #    BatchNormalization(),
                           LeakyReLU(0.2),
                           Dropout(0.5),
                           Dense(units=1,
                                 kernel_regularizer=l1_l2(1e-5, 1e-5),
                                 activation='sigmoid')]

    model = Sequential(discirminator_layer)
    print 'The discriminator_model summary:'
    print model.summary()

    return model 
Example 62
Project: GEM   Author: palash1992   File: sdne_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_encoder(node_num, d, K, n_units, nu1, nu2, activation_fn):
    # Input
    x = Input(shape=(node_num,))
    # Encoder layers
    y = [None] * (K + 1)
    y[0] = x  # y[0] is assigned the input
    for i in range(K - 1):
        y[i + 1] = Dense(n_units[i], activation=activation_fn,
                         W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[i])
    y[K] = Dense(d, activation=activation_fn,
                 W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y[K - 1])
    # Encoder model
    encoder = Model(input=x, output=y[K])
    return encoder 
Example 63
Project: dynamicgem   Author: Sujit-O   File: dnn_utils.py    MIT License 4 votes vote down vote up
def get_lstm_encoder_v3(n_nodes, look_back, d,
                        n_units, activation_fn, nu1, nu2,
                        bias_reg, input_reg, recurr_reg,
                        ret_seq=True
                        ):
    model = Sequential()
    # model.add(Dense(d, input_shape=(look_back, n_nodes),
    #             activation=activation_fn,
    #              W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))
    #              )
    n_rnn_layers = len(n_units)
    model.add(LSTM(n_units[0],
                   input_shape=(look_back, n_nodes),
                   return_sequences=True,
                   bias_regularizer=bias_reg,
                   kernel_regularizer=input_reg,
                   recurrent_regularizer=recurr_reg

                   )
              )
    for l_idx, n_unit in enumerate(n_units[1:-1]):
        model.add(LSTM(n_unit,
                       return_sequences=True,
                       bias_regularizer=bias_reg,
                       kernel_regularizer=input_reg,
                       recurrent_regularizer=recurr_reg
                       )
                  )
    if n_rnn_layers > 1:
        model.add(LSTM(n_units[-1],
                       return_sequences=False,
                       bias_regularizer=bias_reg,
                       kernel_regularizer=input_reg,
                       recurrent_regularizer=recurr_reg
                       )
                  )
    # model.add(Reshape((1, d)))                        
    model.add(Dense(d, activation=activation_fn,
                    W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))
              )
    # model.add(LSTM(d,
    #                return_sequences=False,
    #                bias_regularizer=bias_reg,
    #                kernel_regularizer=input_reg,
    #                recurrent_regularizer=recurr_reg
    #                )
    #               )  
    model.add(Reshape((1, d)))
    return model 
Example 64
Project: dynamicgem   Author: Sujit-O   File: dnn_utils.py    MIT License 4 votes vote down vote up
def get_lstm_autoencoder_v2(encoder, decoder, d):
    # Input
    x = Input(shape=(encoder.layers[0].input_shape[1], encoder.layers[0].input_shape[2]))
    # Generate embedding
    try:
        y = encoder(x)
    except:
        pdb.set_trace()
    # Generate reconstruction
    try:
        # y=KBack.reshape(y,(-1,1,d))
        x_hat = decoder(y)
    except:
        pdb.set_trace()

        # Autoencoder Model
    autoencoder = Model(input=x, output=[x_hat, y])
    return autoencoder


# def get_lstm_decoder_v3(n_nodes, look_back, d,
#                 n_units, activation_fn,
#                 nu1, nu2,
#                 bias_reg, input_reg, recurr_reg
#                 ):
#     K = len(n_units) + 1
#     # Input
#     y = Input(shape=(d,))
#     # Decoder layers
#     y_hat = [None] * (K + 1)
#     y_hat[K] = y
#     for i in range(K - 1, 0, -1):
#         y_hat[i] = Dense(n_units[i - 1],
#                          activation=LeakyReLU(),
#                          W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[i + 1])
#     y_hat[0] = Dense(n_nodes, activation=LeakyReLU(),
#                      W_regularizer=Reg.l1_l2(l1=nu1, l2=nu2))(y_hat[1])

#     # Output
#     x_hat = y_hat[0]  # decoder's output is also the actual output
#     # Decoder Model
#     decoder = Model(input=y, output=x_hat)

#     return decoder 
Example 65
Project: autonomio   Author: autonomio   File: mlp.py    MIT License 4 votes vote down vote up
def mlp(X, Y, para):

    if para['w_regularizer'] is 'auto':
        para['w_regularizer'] = [para['layers']]

    l1, l2 = check_w_reg(0, para['w_regularizer'], para['w_reg_values'])

    model = Sequential()
    model.add(Dense(para['neuron_count'][0],
                    input_dim=para['dims'],
                    activation=para['activation'],
                    W_regularizer=l1_l2(l1=l1, l2=l2)))
    model.add(Dropout(para['dropout']))

    j = 1

    for i in range(para['layers'] - 1):

        l1, l2 = check_w_reg(j, para['w_regularizer'], para['w_reg_values'])

        model.add(Dense(para['neuron_count'][i+1], 
                        activation=para['activation'],
                        W_regularizer=l1_l2(l1=l1, l2=l2)))
        model.add(Dropout(para['dropout']))

        j += 1

    l1, l2 = check_w_reg(para['layers'], para['w_regularizer'], para['w_reg_values'])

    model.add(Dense(para['neuron_last'], 
                    activation=para['activation_out'],
                    W_regularizer=l1_l2(l1=l1, l2=l2)))
    model.compile(loss=para['loss'],
                  optimizer=para['optimizer'],
                  metrics=['accuracy'])

    if para['verbose'] >= 1:
        time.sleep(0.1)

    out = model.fit(X, Y, validation_split=para['validation_split'],
                    epochs=para['epoch'],
                    verbose=para['verbose'],
                    batch_size=para['batch_size'])

    return model, out 
Example 66
Project: keras-image-captioning   Author: danieljl   File: models.py    MIT License 4 votes vote down vote up
def __init__(self,
                 learning_rate=None,
                 vocab_size=None,
                 embedding_size=None,
                 rnn_output_size=None,
                 dropout_rate=None,
                 bidirectional_rnn=None,
                 rnn_type=None,
                 rnn_layers=None,
                 l1_reg=None,
                 l2_reg=None,
                 initializer=None,
                 word_vector_init=None):
        """
        If an arg is None, it will get its value from config.active_config.
        """
        self._learning_rate = learning_rate or active_config().learning_rate
        self._vocab_size = vocab_size or active_config().vocab_size
        self._embedding_size = embedding_size or active_config().embedding_size
        self._rnn_output_size = (rnn_output_size or
                                 active_config().rnn_output_size)
        self._dropout_rate = dropout_rate or active_config().dropout_rate
        self._rnn_type = rnn_type or active_config().rnn_type
        self._rnn_layers = rnn_layers or active_config().rnn_layers
        self._word_vector_init = (word_vector_init or
                                  active_config().word_vector_init)

        self._initializer = initializer or active_config().initializer
        if self._initializer == 'vinyals_uniform':
            self._initializer = RandomUniform(-0.08, 0.08)

        if bidirectional_rnn is None:
            self._bidirectional_rnn = active_config().bidirectional_rnn
        else:
            self._bidirectional_rnn = bidirectional_rnn

        l1_reg = l1_reg or active_config().l1_reg
        l2_reg = l2_reg or active_config().l2_reg
        self._regularizer = l1_l2(l1_reg, l2_reg)

        self._keras_model = None

        if self._vocab_size is None:
            raise ValueError('config.active_config().vocab_size cannot be '
                             'None! You should check your config or you can '
                             'explicitly pass the vocab_size argument.')

        if self._rnn_type not in ('lstm', 'gru'):
            raise ValueError('rnn_type must be either "lstm" or "gru"!')

        if self._rnn_layers < 1:
            raise ValueError('rnn_layers must be >= 1!')

        if self._word_vector_init is not None and self._embedding_size != 300:
            raise ValueError('If word_vector_init is not None, embedding_size '
                             'must be 300') 
Example 67
Project: CNNArt   Author: thomaskuestner   File: network.py    Apache License 2.0 4 votes vote down vote up
def fCreateConv3D_InceptionBlock(filters):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        # branch 1x1
        branch_1 = Conv3D(filters=filters[0],
                          kernel_size=(1, 1, 1),
                          strides=(1, 1, 1),
                          padding='same',
                          kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        branch_1 = LeakyReLU()(branch_1)

        # branch 3x3
        branch_3 = Conv3D(filters=filters[0],
                          kernel_size=(1, 1, 1),
                          strides=(1, 1, 1),
                          padding='same',
                          kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        branch_3 = Conv3D(filters=filters[2],
                          kernel_size=(3, 3, 3),
                          strides=(1, 1, 1),
                          padding='same',
                          kernel_regularizer=l1_l2(l1_reg, l2_reg))(branch_3)
        branch_3 = LeakyReLU()(branch_3)

        # branch 5x5
        branch_5 = Conv3D(filters=filters[0],
                          kernel_size=(1, 1, 1),
                          strides=(1, 1, 1),
                          padding='same',
                          kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        branch_5 = Conv3D(filters=filters[1],
                          kernel_size=(5, 5, 5),
                          strides=(1, 1, 1),
                          padding='same',
                          kernel_regularizer=l1_l2(l1_reg, l2_reg))(branch_5)
        branch_5 = LeakyReLU()(branch_5)

        # branch maxpooling
        branch_pool = MaxPooling3D(pool_size=(3, 3, 3), strides=(1, 1, 1), padding='same')(inputs)
        branch_pool = Conv3D(filters=filters[0],
                             kernel_size=(1, 1, 1),
                             strides=(1, 1, 1),
                             padding='same',
                             kernel_regularizer=l1_l2(l1_reg, l2_reg))(branch_pool)
        branch_pool = LeakyReLU()(branch_pool)

        # concatenate branches together
        out = concatenate([branch_1, branch_3, branch_5, branch_pool], axis=1)
        return out
    return f 
Example 68
Project: CNNArt   Author: thomaskuestner   File: MSnetworks.py    Apache License 2.0 4 votes vote down vote up
def fCreateModel_FCN_MultiFM(patchSize, dr_rate=0.0, iPReLU=0,l1_reg=0, l2_reg=1e-6):
    # Total params: 1,420,549
    # The dense layer is repleced by a convolutional layer with filters=2 for the two classes
    # The FM from the third down scaled convolutional layer is upsempled by deconvolution and
    # added with the FM from the second down scaled convolutional layer.
    # The combined FM goes through a convolutional layer with filters=2 for the two classes
    # The two predictions are averages as the final result.
    Strides = fgetStrides()
    kernelnumber = fgetKernelNumber()
    inp = Input(shape=(1, int(patchSize[0]), int(patchSize[1]), int(patchSize[2])))

    after_Conv_1 = fCreateVNet_Block(inp, kernelnumber[0], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_1 = fCreateVNet_DownConv_Block(after_Conv_1, after_Conv_1._keras_shape[1], Strides[0],
                                                     iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    after_Conv_2 = fCreateVNet_Block(after_DownConv_1, kernelnumber[1], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_2 = fCreateVNet_DownConv_Block(after_Conv_2, after_Conv_2._keras_shape[1], Strides[1],
                                                   iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    after_Conv_3 = fCreateVNet_Block(after_DownConv_2, kernelnumber[2], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_3 = fCreateVNet_DownConv_Block(after_Conv_3, after_Conv_3._keras_shape[1], Strides[2],
                                                   iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    # fully convolution over the FM from the deepest level
    dropout_out1 = Dropout(dr_rate)(after_DownConv_3)
    fclayer1 = Conv3D(2,
                       kernel_size=(1,1,1),
                       kernel_initializer='he_normal',
                       weights=None,
                       padding='valid',
                       strides=(1, 1, 1),
                       kernel_regularizer=l1_l2(l1_reg, l2_reg),
                       )(dropout_out1)
    fclayer1 = GlobalAveragePooling3D()(fclayer1)
    
    # Upsample FM from the deepest level, add with FM from level 2, 
    UpedFM_Level3 = Conv3DTranspose(filters=97, kernel_size=(3,3,1), strides=(2,2,1), padding='same')(after_DownConv_3)
    conbined_FM_Level23 = add([UpedFM_Level3, after_DownConv_2])    
    fclayer2 = Conv3D(2,
                       kernel_size=(1,1,1),
                       kernel_initializer='he_normal',
                       weights=None,
                       padding='valid',
                       strides=(1, 1, 1),
                       kernel_regularizer=l1_l2(l1_reg, l2_reg),
                       )(conbined_FM_Level23)
    fclayer2 = GlobalAveragePooling3D()(fclayer2)

    # combine the two predictions using average
    fcl_aver = average([fclayer1, fclayer2])
    predict = Activation('softmax')(fcl_aver)
    cnn_fcl_msfm = Model(inputs=inp, outputs=predict)
    return cnn_fcl_msfm 
Example 69
Project: X-CNN   Author: ernstlab   File: train_X-SCNN.py    MIT License 4 votes vote down vote up
def make_model(args, input_length, num_tracks, encoder_1=None, encoder_2=None):
	# Builds a model with required specifications. Takes optional input of pre-trained layers.

	# Create input. Will have to be shape (1, input_length, num_tracks)
	if args.pad:
		to_pad = 2*args.filter_len
	else:
		to_pad = 0
	chip_input = Input(shape=(input_length + to_pad, num_tracks))

	# Add encoder before convolution if available
	if args.shared_weights:
		conv_model = make_subnetwork(args, input_length, num_tracks, encoder_1)

		# Create left and right inputs
		input_left = Input(shape=(input_length + to_pad, num_tracks))
		input_right = Input(shape=(input_length + to_pad, num_tracks))

		# Create outputs
		output_left = conv_model(input_left)
		output_right = conv_model(input_right)
	else:
		conv_model1 = make_subnetwork(args, input_length, num_tracks, encoder_1)
		conv_model2 = make_subnetwork(args, input_length, num_tracks, encoder_2)

		# Create left and right inputs
		input_left = Input(shape=(input_length + to_pad, num_tracks))
		input_right = Input(shape=(input_length + to_pad, num_tracks))

		# Create outputs
		output_left = conv_model1(input_left)
		output_right = conv_model2(input_right)

	# Merge the two layers into one, concatenating them
	concatenated = concatenate([output_left, output_right])
	out = Dense(args.dense_kernel, 
		activation='relu', 
		kernel_regularizer=regularizers.l1_l2(args.regularizer),
		use_bias=args.bias)(concatenated)
	if args.extra_dense:
		out = Dense(args.dense_kernel,
			activation='relu',
			kernel_regularizer=regularizers.l1_l2(args.regularizer),
			use_bias=args.bias)(out)
	out = Dropout(args.dense_dropout)(out)
	out = Dense(1, 
		activation=K.sigmoid,
		kernel_regularizer=regularizers.l1_l2(args.regularizer),
		use_bias=args.bias)(out)

	# Make the final classifier and return it
	classification_model = Model([input_left, input_right], out)
	return classification_model 
Example 70
Project: X-CNN   Author: ernstlab   File: train_X-SCNN.py    MIT License 4 votes vote down vote up
def train_autoencoder(args, data_pos, pos_idxs, side='both'):

	#	This autoencoder's purpose is to reduce the dimensionality of the data by finding the most
	#	common patterns in ChIP seq patterns. This does not look at spatial patterns; rather, it
	#	looks at individual windows, so those are what is fed to the autoencoder. It should
	#	work similarly to PCA, where strongly correlated features are joined in the same filter.
	num_samples, x, num_tracks, input_length = np.shape(data_pos)

	# input data placeholder
	input_data = Input(shape=(input_length, num_tracks))
	# "encoded" is the encoded representation of the input
	encoded = Conv1D(
		filters=args.autoencoder,
		kernel_regularizer=regularizers.l1_l2(args.regularizer),
		kernel_size=1,
		activation='relu',
		use_bias=args.bias)(input_data)
	# "decoded" is the lossy reconstruction of the input
	decoded = Conv1D(
		filters=num_tracks,
		kernel_regularizer=regularizers.l1_l2(args.regularizer),
		kernel_size=1,
		activation='relu',
		use_bias=args.bias)(encoded)

	# This takes an input to its reconstruction
	autoencoder = Model(inputs=input_data, outputs=decoded)
	# this model maps an input to its encoded representation
	encoder = Model(inputs=input_data, outputs=encoded)
	# compile the model using adadelta as an optimizer for best performance
	autoencoder.compile(optimizer='adadelta', loss='mean_squared_logarithmic_error')
	if args.test:
		num_samples = 1000
		args.ae_epochs = 0
	# Check if we're sharing weights
	if side == 'both':
		num_steps = num_samples*2
	else:
		num_steps = num_samples

	# Train the model using a generator
	sys.stdout.write("*** Training autoencoder ***\n")
	autoencoder.fit_generator(
		generate_samples(args,
			data_pos=data_pos,
			pos_idxs=pos_idxs,
			input_length=input_length,
			ret_single=True,
			side=side),
		steps_per_epoch=min(20000,len(data_pos)),
		epochs=args.ae_epochs,
		verbose=int(args.verbose))

	return encoder 
Example 71
Project: scholar_project   Author: fengjiran   File: simple_gan_mnist.py    Apache License 2.0 4 votes vote down vote up
def __init__(self,
                 latent_dim=100,
                 hidden_dim=1024,
                 image_shape=(28, 28),
                 batch_size=128,
                 epochs=100):
        """Network."""
        self.latent_dim = latent_dim
        self.hidden_dim = hidden_dim
        self.image_shape = image_shape
        self.batch_size = batch_size
        self.epochs = epochs

        generator_layer = [Dense(units=int(self.hidden_dim / 4),
                                 input_dim=int(self.latent_dim),
                                 kernel_regularizer=l1(1e-5)),
                           BatchNormalization(),
                           LeakyReLU(0.2),
                           Dense(units=int(self.hidden_dim / 2),
                                 kernel_regularizer=l1(1e-5)),
                           BatchNormalization(),
                           LeakyReLU(0.2),
                           Dense(units=self.hidden_dim,
                                 kernel_regularizer=l1(1e-5)),
                           BatchNormalization(),
                           LeakyReLU(0.2),
                           Dense(units=np.prod(self.image_shape),
                                 kernel_regularizer=l1(1e-5),
                                 activation='tanh')]

        discirminator_layer = [Dense(units=self.hidden_dim,
                                     input_dim=np.prod(self.image_shape),
                                     kernel_regularizer=l1_l2(1e-5, 1e-5)),
                               #    BatchNormalization(),
                               LeakyReLU(0.2),
                               Dropout(0.5),
                               Dense(units=int(self.hidden_dim / 2),
                                     kernel_regularizer=l1_l2(1e-5, 1e-5)),
                               #    BatchNormalization(),
                               LeakyReLU(0.2),
                               Dropout(0.5),
                               Dense(units=int(self.hidden_dim / 4),
                                     kernel_regularizer=l1_l2(1e-5, 1e-5)),
                               #    BatchNormalization(),
                               LeakyReLU(0.2),
                               Dropout(0.5),
                               Dense(units=1,
                                     kernel_regularizer=l1_l2(1e-5, 1e-5),
                                     activation='sigmoid')]

        self.generator = Sequential(generator_layer)
        self.discriminator = Sequential(discirminator_layer)

        print self.generator.summary()
        print self.discriminator.summary()

        self.gan_model = Sequential()
        self.gan_model.add(self.generator)
        self.discriminator.trainable = False
        self.gan_model.add(self.discriminator)