Python keras.regularizers.l2() Examples

The following are code examples for showing how to use keras.regularizers.l2(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 7 votes vote down vote up
def weather_l2(hidden_nums=100,l2=0.01): 
    input_img = Input(shape=(37,))
    hn = Dense(hidden_nums, activation='relu')(input_img)
    hn = Dense(hidden_nums, activation='relu',
               kernel_regularizer=regularizers.l2(l2))(hn)
    out_u = Dense(37, activation='sigmoid',                 
                  name='ae_part')(hn)
    out_sig = Dense(37, activation='linear', 
                    name='pred_part')(hn)
    out_both = concatenate([out_u, out_sig], axis=1, name = 'concatenate')

    #weather_model = Model(input_img, outputs=[out_ae, out_pred])
    mve_model = Model(input_img, outputs=[out_both])
    mve_model.compile(optimizer='adam', loss=mve_loss, loss_weights=[1.])
    
    return mve_model 
Example 2
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_import(self):
        # Conv 1D
        model = Sequential()
        model.add(Conv1D(32, 10, kernel_regularizer=regularizers.l2(0.01),
                         bias_regularizer=regularizers.l2(0.01),
                         activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                         bias_constraint='max_norm', activation='relu', input_shape=(10, 1)))
        model.build()
        self.keras_param_test(model, 1, 9)
        # Conv 2D
        model = Sequential()
        model.add(Conv2D(32, (3, 3), kernel_regularizer=regularizers.l2(0.01),
                         bias_regularizer=regularizers.l2(0.01),
                         activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                         bias_constraint='max_norm', activation='relu', input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 1, 13)
        # Conv 3D
        model = Sequential()
        model.add(Conv3D(32, (3, 3, 3), kernel_regularizer=regularizers.l2(0.01),
                         bias_regularizer=regularizers.l2(0.01),
                         activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                         bias_constraint='max_norm', activation='relu', input_shape=(16, 16, 16, 1)))
        model.build()
        self.keras_param_test(model, 1, 17) 
Example 3
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_import(self):
        # Conv 1D
        model = Sequential()
        model.add(LocallyConnected1D(32, 3, kernel_regularizer=regularizers.l2(0.01),
                                     bias_regularizer=regularizers.l2(0.01),
                                     activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                                     bias_constraint='max_norm', activation='relu', input_shape=(16, 10)))
        model.build()
        self.keras_param_test(model, 1, 12)
        # Conv 2D
        model = Sequential()
        model.add(LocallyConnected2D(32, (3, 3), kernel_regularizer=regularizers.l2(0.01),
                                     bias_regularizer=regularizers.l2(0.01),
                                     activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                                     bias_constraint='max_norm', activation='relu', input_shape=(16, 16, 10)))
        model.build()
        self.keras_param_test(model, 1, 14)


# ********** Recurrent Layers ********** 
Example 4
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(LSTM(64, return_sequences=True, input_shape=(10, 64)))
        model.add(SimpleRNN(32, return_sequences=True))
        model.add(GRU(10, kernel_regularizer=regularizers.l2(0.01),
                      bias_regularizer=regularizers.l2(0.01), recurrent_regularizer=regularizers.l2(0.01),
                      activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                      bias_constraint='max_norm', recurrent_constraint='max_norm'))
        model.build()
        json_string = Model.to_json(model)
        with open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'w') as out:
            json.dump(json.loads(json_string), out, indent=4)
        sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'r')
        response = self.client.post(reverse('keras-import'), {'file': sample_file})
        response = json.loads(response.content)
        layerId = sorted(response['net'].keys())
        self.assertEqual(response['result'], 'success')
        self.assertGreaterEqual(len(response['net'][layerId[1]]['params']), 7)
        self.assertGreaterEqual(len(response['net'][layerId[3]]['params']), 7)
        self.assertGreaterEqual(len(response['net'][layerId[6]]['params']), 7)


# ********** Embedding Layers ********** 
Example 5
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(BatchNormalization(center=True, scale=True, beta_regularizer=regularizers.l2(0.01),
                                     gamma_regularizer=regularizers.l2(0.01),
                                     beta_constraint='max_norm', gamma_constraint='max_norm',
                                     input_shape=(16, 10)))
        model.build()
        json_string = Model.to_json(model)
        with open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'w') as out:
            json.dump(json.loads(json_string), out, indent=4)
        sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'r')
        response = self.client.post(reverse('keras-import'), {'file': sample_file})
        response = json.loads(response.content)
        layerId = sorted(response['net'].keys())
        self.assertEqual(response['result'], 'success')
        self.assertEqual(response['net'][layerId[0]]['info']['type'], 'Scale')
        self.assertEqual(response['net'][layerId[1]]['info']['type'], 'BatchNorm')


# ********** Noise Layers ********** 
Example 6
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['BatchNorm'], 'l2': net['Scale']}
        net['l0']['connection']['output'].append('l1')
        # Test 1
        inp = data(net['l0'], '', 'l0')['l0']
        temp = batch_norm(net['l1'], [inp], 'l1', 'l2', net['l2'])
        model = Model(inp, temp['l2'])
        self.assertEqual(model.layers[1].__class__.__name__, 'BatchNormalization')
        # Test 2
        net['l2']['params']['filler'] = 'VarianceScaling'
        net['l2']['params']['bias_filler'] = 'VarianceScaling'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = batch_norm(net['l1'], [inp], 'l1', 'l2', net['l2'])
        model = Model(inp, temp['l2'])
        self.assertEqual(model.layers[1].__class__.__name__, 'BatchNormalization')
        # Test 3
        inp = data(net['l0'], '', 'l0')['l0']
        temp = batch_norm(net['l1'], [inp], 'l1', 'l0', net['l0'])
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'BatchNormalization') 
Example 7
Project: ImageQA   Author: codedecde   File: wbw_att_imageqa_model2.py    MIT License 6 votes vote down vote up
def get_params():
    parser = argparse.ArgumentParser(description='Short sample app')
    parser.add_argument('-lstm', action="store", default=150, dest="lstm_units", type=int)
    parser.add_argument('-xmaxlen', action="store", default=22, dest="xmaxlen", type=int)
    parser.add_argument('-dropout', action="store", default=0.1, dest="dropout", type=float)
    parser.add_argument('-epochs', action="store", default=20, dest="epochs", type=int)
    parser.add_argument('-batch', action="store", default=32, dest="batch_size", type=int)
    parser.add_argument('-lr', action="store", default=0.003, dest="lr", type=float)
    parser.add_argument('-l2', action="store", default=0.0003, dest="l2", type=float)
    parser.add_argument('-load', action="store", default=False, dest="load_save", type=bool)
    parser.add_argument('-verbose', action="store", default=False, dest="verbose", type=bool)
    parser.add_argument('-local', action="store", default=False, dest="local", type=bool)
    opts = parser.parse_args(sys.argv[1:])
    print "###################################"
    print "LSTM Output Dimension : ", opts.lstm_units
    print "Max Question Length   : ", opts.xmaxlen
    print "Dropout               : ", opts.dropout
    print "Num Epochs            : ", opts.epochs
    print "Batch Size            : ", opts.batch_size
    print "Learning Rate         : ", opts.lr
    print "Regularization Factor : ", opts.l2
    return opts 
Example 8
Project: ImageQA   Author: codedecde   File: wbw_att_imageqa_model.py    MIT License 6 votes vote down vote up
def get_params():
    parser = argparse.ArgumentParser(description='Short sample app')
    parser.add_argument('-lstm', action="store", default=150, dest="lstm_units", type=int)
    parser.add_argument('-xmaxlen', action="store", default=22, dest="xmaxlen", type=int)
    parser.add_argument('-dropout', action="store", default=0.1, dest="dropout", type=float)
    parser.add_argument('-epochs', action="store", default=20, dest="epochs", type=int)
    parser.add_argument('-batch', action="store", default=32, dest="batch_size", type=int)
    parser.add_argument('-lr', action="store", default=0.003, dest="lr", type=float)
    parser.add_argument('-l2', action="store", default=0.0003, dest="l2", type=float)
    parser.add_argument('-load', action="store", default=False, dest="load_save", type=bool)
    parser.add_argument('-verbose', action="store", default=False, dest="verbose", type=bool)
    parser.add_argument('-local', action="store", default=False, dest="local", type=bool)
    opts = parser.parse_args(sys.argv[1:])
    print "###################################"
    print "LSTM Output Dimension : ", opts.lstm_units
    print "Max Question Length   : ", opts.xmaxlen
    print "Dropout               : ", opts.dropout
    print "Num Epochs            : ", opts.epochs
    print "Batch Size            : ", opts.batch_size
    print "Learning Rate         : ", opts.lr
    print "Regularization Factor : ", opts.l2
    return opts 
Example 9
Project: Mnist-practise   Author: hsmyy   File: mnist.py    MIT License 6 votes vote down vote up
def CNN2(X_train, Y_train, X_test, Y_test, activation='relu'):
    batch_size = 64
    nb_classes = 10
    nb_epoch = 20
    np.random.seed(1337)
    X_train = X_train.reshape(60000,1, 28, 28)
    X_test = X_test.reshape(10000,1, 28, 28)
    X_train = X_train.astype("float32")
    X_test.astype("float32")
    #X_train /= 255
    #X_test /= 255
    print(X_train.shape, 'train samples')
    print(Y_train.shape, 'train labels')
    print(X_test.shape, 'test smaples')

    Y_train = np_utils.to_categorical(Y_train, nb_classes)
    Y_test = np_utils.to_categorical(Y_test, nb_classes)

    model = Sequential()
    model.add(Convolution2D(4, 1, 5, 5, border_mode='valid'))
    model.add(Activation(activation))
    model.add(Convolution2D(8, 4, 3, 3, border_mode='valid'))
    model.add(Activation(activation))
    model.add(MaxPooling2D(poolsize=(2, 2)))
    model.add(Convolution2D(16, 8, 3, 3, border_mode='valid'))
    model.add(Activation(activation))
    model.add(MaxPooling2D(poolsize=(2,2)))
    model.add(Flatten())
    model.add(Dense(16 * 4 * 4, 128, init='normal'))
    model.add(Activation(activation))
    model.add(Dense(128, nb_classes, init='normal'))
    model.add(Activation('softmax'))
    sgd = SGD(l2=0.0, lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)
    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=10, shuffle=True, verbose=1, show_accuracy=True, validation_split=0.2)
    score = model.evaluate(X_test, Y_test, batch_size=batch_size)
    print('Test score:', score) 
Example 10
Project: wdcnn_bearning_fault_diagnosis-master   Author: yfshich   File: main.py    MIT License 6 votes vote down vote up
def wdcnn(filters, kernerl_size, strides, conv_padding, pool_padding,  pool_size, BatchNormal):
    """wdcnn层神经元

    :param filters: 卷积核的数目,整数
    :param kernerl_size: 卷积核的尺寸,整数
    :param strides: 步长,整数
    :param conv_padding: 'same','valid'
    :param pool_padding: 'same','valid'
    :param pool_size: 池化层核尺寸,整数
    :param BatchNormal: 是否Batchnormal,布尔值
    :return: model
    """
    model.add(Conv1D(filters=filters, kernel_size=kernerl_size, strides=strides,
                     padding=conv_padding, kernel_regularizer=l2(1e-4)))
    if BatchNormal:
        model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=pool_size, padding=pool_padding))
    return model

# 实例化序贯模型 
Example 11
Project: cnn-levelset   Author: wiseodd   File: localizer.py    MIT License 6 votes vote down vote up
def __init__(self, model_path=None):
        if model_path is not None:
            self.model = self.load_model(model_path)
        else:
            # VGG16 last conv features
            inputs = Input(shape=(7, 7, 512))
            x = Convolution2D(128, 1, 1)(inputs)
            x = Flatten()(x)

            # Cls head
            h_cls = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
            h_cls = Dropout(p=0.5)(h_cls)
            cls_head = Dense(20, activation='softmax', name='cls')(h_cls)

            # Reg head
            h_reg = Dense(256, activation='relu', W_regularizer=l2(l=0.01))(x)
            h_reg = Dropout(p=0.5)(h_reg)
            reg_head = Dense(4, activation='linear', name='reg')(h_reg)

            # Joint model
            self.model = Model(input=inputs, output=[cls_head, reg_head]) 
Example 12
Project: ismir2018-artist   Author: jongpillee   File: run_gtzan_dnn.py    MIT License 6 votes vote down vote up
def __init__(self):
		self.train_size = 443#15244 #15244
		self.valid_size = 197#1529 #1529
		self.test_size = 290#4332 #4332
		self.num_tags = 10
		self.batch_size = 16
		self.nb_epoch = 1000
		self.lr = [0.01] #0.03 #0.06 # 0.01
		self.lrdecay = 1e-6 # e-6
		self.gpu_use = 1 # 1
		self.trial = 10
		self.num_neurons = [1024]
		self.dense_num_min = 0
		self.dense_num_max = 1
		self.activ = 'relu'
		self.regul = 'l2(1e-7)' # e-7
		self.init = 'he_uniform'
		self.optimizer = 'sgd' # adam, sgd # model 7 adam
		self.patience = 8
		self.mean = 0
		self.std = 0
		self.calculateNorm = 1 
Example 13
Project: musical_genres_classification   Author: shaoeric   File: pre_process_mfcc.py    MIT License 6 votes vote down vote up
def get_model():
    input_layer = Input(shape=(1, 1290, 40), name='mfcc_input')
    x = GaussianNoise(0.01)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu',kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01),data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Flatten()(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='mfcc_output', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.2))(x)
    model = Model(input=input_layer, output=x)
    print(model.summary())
    return model 
Example 14
Project: musical_genres_classification   Author: shaoeric   File: final_model.py    MIT License 6 votes vote down vote up
def get_mfcc_model():
    input_layer = Input(shape=(1, 1290, 40), name='mfcc_input')
    x = GaussianNoise(noise)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu',kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01),data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Flatten(name='mfcc_flatten')(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='mfcc_output', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.2))(x)
    model = Model(inputs=input_layer, outputs=x)

    return model 
Example 15
Project: DeepIC   Author: HeavenDuke   File: NaiveSPPNet.py    MIT License 6 votes vote down vote up
def NaiveSPPNet(class_num):
    model = Sequential()
    model.add(Convolution2D(32, 3, 3, border_mode = 'same', input_shape = (None, None, 3)))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3, border_mode = 'same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, 3, 3, border_mode = 'same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size = (2, 2)))
    model.add(Dropout(0.25))

    model.add(SpatialPyramidPooling([1, 2, 4]))
    model.add(Dense(units = class_num, kernel_regularizer = l2(l = 0.01)))
    model.add(Activation(activation = "sigmoid"))
    model.compile(optimizer = RMSprop(lr = 1e-3), loss = "binary_crossentropy", metrics = ['accuracy'])
    return model 
Example 16
Project: DeepIC   Author: HeavenDuke   File: ResSppNet.py    MIT License 6 votes vote down vote up
def _conv_bn_relu(**conv_params):
    """Helper to build a conv -> BN -> relu block
    """
    filters = conv_params["filters"]
    kernel_size = conv_params["kernel_size"]
    strides = conv_params.setdefault("strides", (1, 1))
    kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
    padding = conv_params.setdefault("padding", "same")
    kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(regularizer_rate))

    def f(input):
        conv = Conv2D(filters = filters, kernel_size = kernel_size,
                      strides = strides, padding = padding,
                      kernel_initializer = kernel_initializer,
                      kernel_regularizer = kernel_regularizer)(input)
        return _bn_relu(conv)

    return f 
Example 17
Project: DeepIC   Author: HeavenDuke   File: ResSppNet.py    MIT License 6 votes vote down vote up
def _bn_relu_conv(**conv_params):
    """Helper to build a BN -> relu -> conv block.
    This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
    """
    filters = conv_params["filters"]
    kernel_size = conv_params["kernel_size"]
    strides = conv_params.setdefault("strides", (1, 1))
    kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
    padding = conv_params.setdefault("padding", "same")
    kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(regularizer_rate))

    def f(input):
        activation = _bn_relu(input)
        return Conv2D(filters = filters, kernel_size = kernel_size,
                      strides = strides, padding = padding,
                      kernel_initializer = kernel_initializer,
                      kernel_regularizer = kernel_regularizer)(activation)

    return f 
Example 18
Project: DeepIC   Author: HeavenDuke   File: ResSppNet.py    MIT License 6 votes vote down vote up
def _shortcut(input, residual):
    """Adds a shortcut between input and residual block and merges them with "sum"
    """
    # Expand channels of shortcut to match residual.
    # Stride appropriately to match residual (width, height)
    # Should be int if network architecture is correctly configured.
    input_shape = K.int_shape(input)
    residual_shape = K.int_shape(residual)
    stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
    stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
    equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]

    shortcut = input
    # 1 X 1 conv if shape is different. Else identity.
    if stride_width > 1 or stride_height > 1 or not equal_channels:
        shortcut = Conv2D(filters = residual_shape[CHANNEL_AXIS],
                          kernel_size = (1, 1),
                          strides = (stride_width, stride_height),
                          padding = "valid",
                          kernel_initializer = "he_normal",
                          kernel_regularizer = l2(regularizer_rate))(input)

    return add([shortcut, residual]) 
Example 19
Project: DeepIC   Author: HeavenDuke   File: ResSppNet.py    MIT License 6 votes vote down vote up
def basic_block(filters, init_strides = (1, 1), is_first_block_of_first_layer = False):
    """Basic 3 X 3 convolution blocks for use on resnets with layers <= 34.
    Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
    """

    def f(input):

        if is_first_block_of_first_layer:
            # don't repeat bn->relu since we just did bn->relu->maxpool
            conv1 = Conv2D(filters = filters, kernel_size = (3, 3),
                           strides = init_strides,
                           padding = "same",
                           kernel_initializer = "he_normal",
                           kernel_regularizer = l2(0.01))(input)
        else:
            conv1 = _bn_relu_conv(filters = filters, kernel_size = (3, 3),
                                  strides = init_strides)(input)

        residual = _bn_relu_conv(filters = filters, kernel_size = (3, 3))(conv1)
        return _shortcut(input, residual)

    return f 
Example 20
Project: Keras-DualPathNetworks   Author: titu1994   File: dual_path_network.py    Apache License 2.0 6 votes vote down vote up
def _initial_conv_block_inception(input, initial_conv_filters, weight_decay=5e-4):
    ''' Adds an initial conv block, with batch norm and relu for the DPN
    Args:
        input: input tensor
        initial_conv_filters: number of filters for initial conv block
        weight_decay: weight decay factor
    Returns: a keras tensor
    '''
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay), strides=(2, 2))(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    return x 
Example 21
Project: Keras-DualPathNetworks   Author: titu1994   File: dual_path_network.py    Apache License 2.0 6 votes vote down vote up
def _bn_relu_conv_block(input, filters, kernel=(3, 3), stride=(1, 1), weight_decay=5e-4):
    ''' Adds a Batchnorm-Relu-Conv block for DPN
    Args:
        input: input tensor
        filters: number of output filters
        kernel: convolution kernel size
        stride: stride of convolution
    Returns: a keras tensor
    '''
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = Conv2D(filters, kernel, padding='same', use_bias=False, kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay), strides=stride)(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    return x 
Example 22
Project: EUSIPCO2017   Author: ronggong   File: keras_cnn_jordi_laosheng.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def createModel(model, reshape_dim, input_dim, num_filter, height_filter, width_filter, filter_density, pool_n_row, pool_n_col, dropout):
    model.add(Reshape(reshape_dim, input_shape=input_dim))
    model.add(
        Convolution2D(num_filter * filter_density, height_filter, width_filter, border_mode='valid', input_shape=reshape_dim, dim_ordering='th',
                      init='he_uniform', W_regularizer=l2(1e-5)))
    model.add(ELU())

    if pool_n_row == 'all' and pool_n_col == 'all':
        model.add(MaxPooling2D(pool_size=(model.output_shape[2], model.output_shape[3]), border_mode='valid',
                                 dim_ordering='th'))
    elif pool_n_row == 'all' and pool_n_col != 'all':
        model.add(MaxPooling2D(pool_size=(model.output_shape[2], pool_n_col), border_mode='valid',
                                 dim_ordering='th'))
    elif pool_n_row != 'all' and pool_n_col == 'all':
        model.add(MaxPooling2D(pool_size=(pool_n_row, model.output_shape[3]), border_mode='valid',
                                 dim_ordering='th'))
    else:
        model.add(MaxPooling2D(pool_size=(pool_n_row, pool_n_col), border_mode='valid', dim_ordering='th'))
    model.add(Dropout(dropout))
    model.add(Flatten())
    return model 
Example 23
Project: EUSIPCO2017   Author: ronggong   File: keras_cnn_jordi_danAll.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def createModel(model, reshape_dim, input_dim, num_filter, height_filter, width_filter, filter_density, pool_n_row,
                pool_n_col, dropout):
    model.add(Reshape(reshape_dim, input_shape=input_dim))
    model.add(
        Convolution2D(num_filter * filter_density, height_filter, width_filter, border_mode='valid',
                      input_shape=reshape_dim, dim_ordering='th',
                      init='he_uniform', W_regularizer=l2(1e-5)))
    model.add(ELU())

    if pool_n_row == 'all' and pool_n_col == 'all':
        model.add(MaxPooling2D(pool_size=(model.output_shape[2], model.output_shape[3]), border_mode='valid',
                               dim_ordering='th'))
    elif pool_n_row == 'all' and pool_n_col != 'all':
        model.add(MaxPooling2D(pool_size=(model.output_shape[2], pool_n_col), border_mode='valid',
                               dim_ordering='th'))
    elif pool_n_row != 'all' and pool_n_col == 'all':
        model.add(MaxPooling2D(pool_size=(pool_n_row, model.output_shape[3]), border_mode='valid',
                               dim_ordering='th'))
    else:
        model.add(MaxPooling2D(pool_size=(pool_n_row, pool_n_col), border_mode='valid', dim_ordering='th'))
    model.add(Dropout(dropout))
    model.add(Flatten())
    return model 
Example 24
Project: PNRL   Author: zhitao-wang   File: pnrl_r.py    GNU General Public License v3.0 6 votes vote down vote up
def PNRLR_Model(V, dim_embedddings=128):
    input_source = Input(shape=(1,), name='input_source')
    input_pos = Input(shape=(1,), name='input_pos')
    input_neg = Input(shape=(1,), name='input_neg')
    context_input = Input(shape=(1,), name='context_input')
    shared_embedding = Embedding(V, output_dim=dim_embedddings, input_length=1)
    X_source = shared_embedding(input_source)
    X_pos = shared_embedding(input_pos)
    X_neg = shared_embedding(input_neg)
    Y = Embedding(V, output_dim=dim_embedddings, input_length=1)(context_input)

    XY = dot([X_source, Y], axes=-1)
    res = Reshape((1,), input_shape=(1,1))(XY)
    nrl_output = Activation( activation='sigmoid', name='nrl_output')(res)
    model1 = Model(input=[input_source, context_input], output=[nrl_output])
    # optimizer1 = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    optimizer1 = rmsprop()
    model1.compile(loss="binary_crossentropy", optimizer=optimizer1)

    reshape_layer = Reshape((dim_embedddings,), name='reshape_layer')
    X_S = reshape_layer(X_source)
    X_P = reshape_layer(X_pos)
    X_N = reshape_layer(X_neg)
    XW =  Dense(dim_embedddings, activation='linear', W_regularizer=l2(0.01), bias=False,name='BiW')(X_S)
    pos_ouput = dot([XW, X_P], axes=-1)
    neg_output = dot([XW, X_N], axes=-1)
    lp_output = concatenate([pos_ouput, neg_output], axis=-1, name='lp_output')
    model2 = Model(input=[input_source, input_pos, input_neg], output=[lp_output])
    optimizer2 = Adam()
    model2.compile(loss={'lp_output': ranking_loss_mm}, optimizer=optimizer2)
    return model1, model2 
Example 25
Project: PIEPredict   Author: aras62   File: pie_predict.py    Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 num_hidden_units=256,
                 regularizer_val=0.0001,
                 activation='softsign',
                 embed_size=64,
                 embed_dropout=0):

        # Network parameters
        self._num_hidden_units = num_hidden_units
        self._regularizer_value = regularizer_val
        self._regularizer = regularizers.l2(regularizer_val)

        self._activation = activation
        self._embed_size = embed_size
        self._embed_dropout = embed_dropout

        # model parameters
        self._observe_length = 15
        self._predict_length = 15

        self._encoder_feature_size = 4
        self._decoder_feature_size = 4

        self._prediction_size = 4 
Example 26
Project: VisualNN   Author: angelhunt   File: layers_export.py    GNU General Public License v3.0 5 votes vote down vote up
def regularization(layer, layer_in, layerId, tensor=True):
    l1 = layer['params']['l1']
    l2 = layer['params']['l2']
    out = {layerId: ActivityRegularization(l1=l1, l2=l2)}
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out 
Example 27
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        # Test 1
        img_input = Input((224, 224, 3))
        model = Conv2D(64, (3, 3), padding='same', dilation_rate=1, use_bias=True,
                       kernel_regularizer=regularizers.l1(), bias_regularizer='l1',
                       activity_regularizer='l1', kernel_constraint='max_norm',
                       bias_constraint='max_norm')(img_input)
        model = BatchNormalization(center=True, scale=True, beta_regularizer=regularizers.l2(0.01),
                                   gamma_regularizer=regularizers.l2(0.01),
                                   beta_constraint='max_norm', gamma_constraint='max_norm',)(model)
        model = Model(img_input, model)
        json_string = Model.to_json(model)
        with open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'w') as out:
            json.dump(json.loads(json_string), out, indent=4)
        sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'r')
        response = self.client.post(reverse('keras-import'), {'file': sample_file})
        response = json.loads(response.content)
        net = get_shapes(response['net'])
        response = self.client.post(reverse('keras-export'), {'net': json.dumps(net),
                                                              'net_name': ''})
        response = json.loads(response.content)
        self.assertEqual(response['result'], 'success')
        # Test 2
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'ide',
                                  'caffe_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['HDF5Data']}
        # Currently we can't determine shape of HDF5Data Layer
        response = self.client.post(reverse('keras-export'), {'net': json.dumps(net),
                                                              'net_name': ''})
        response = json.loads(response.content)
        self.assertEqual(response['result'], 'error')


# *********** Keras Backend Test ********** 
Example 28
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Dense(100, kernel_regularizer=regularizers.l2(0.01), bias_regularizer=regularizers.l2(0.01),
                        activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                        bias_constraint='max_norm', activation='relu', input_shape=(16,)))
        model.build()
        self.keras_param_test(model, 1, 3) 
Example 29
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(SeparableConv2D(32, 3, depthwise_regularizer=regularizers.l2(0.01),
                                  pointwise_regularizer=regularizers.l2(0.01),
                                  bias_regularizer=regularizers.l2(0.01),
                                  activity_regularizer=regularizers.l2(0.01), depthwise_constraint='max_norm',
                                  bias_constraint='max_norm', pointwise_constraint='max_norm',
                                  activation='relu', input_shape=(16, 16, 1)))
        self.keras_param_test(model, 1, 12) 
Example 30
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(Embedding(1000, 64, input_length=10, embeddings_regularizer=regularizers.l2(0.01),
                            embeddings_constraint='max_norm'))
        model.build()
        self.keras_param_test(model, 0, 7)


# ********** Merge Layers ********** 
Example 31
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Convolution']}
        # Conv 1D
        net['l1']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l1']
        net['l3']['params']['layer_type'] = '1D'
        net['l3']['shape']['input'] = net['l1']['shape']['output']
        net['l3']['shape']['output'] = [128, 12]
        inp = data(net['l1'], '', 'l1')['l1']
        temp = convolution(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'Conv1D')
        # Conv 2D
        net['l0']['connection']['output'].append('l0')
        net['l3']['connection']['input'] = ['l0']
        net['l3']['params']['layer_type'] = '2D'
        net['l3']['shape']['input'] = net['l0']['shape']['output']
        net['l3']['shape']['output'] = [128, 226, 226]
        inp = data(net['l0'], '', 'l0')['l0']
        temp = convolution(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'Conv2D')
        # Conv 3D
        net['l2']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l2']
        net['l3']['params']['layer_type'] = '3D'
        net['l3']['shape']['input'] = net['l2']['shape']['output']
        net['l3']['shape']['output'] = [128, 226, 226, 18]
        inp = data(net['l2'], '', 'l2')['l2']
        temp = convolution(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'Conv3D') 
Example 32
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Pooling']}
        # Pool 1D
        net['l1']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l1']
        net['l3']['params']['layer_type'] = '1D'
        net['l3']['shape']['input'] = net['l1']['shape']['output']
        net['l3']['shape']['output'] = [12, 12]
        inp = data(net['l1'], '', 'l1')['l1']
        temp = pooling(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling1D')
        # Pool 2D
        net['l0']['connection']['output'].append('l0')
        net['l3']['connection']['input'] = ['l0']
        net['l3']['params']['layer_type'] = '2D'
        net['l3']['shape']['input'] = net['l0']['shape']['output']
        net['l3']['shape']['output'] = [3, 226, 226]
        inp = data(net['l0'], '', 'l0')['l0']
        temp = pooling(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling2D')
        # Pool 3D
        net['l2']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l2']
        net['l3']['params']['layer_type'] = '3D'
        net['l3']['shape']['input'] = net['l2']['shape']['output']
        net['l3']['shape']['output'] = [3, 226, 226, 18]
        inp = data(net['l2'], '', 'l2')['l2']
        temp = pooling(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling3D')


# ********** Locally-connected Layers ********** 
Example 33
Project: Logo-Retrieval-in-Commercial-Plaza   Author: zhang-rongchen   File: model_Mobilenet.py    MIT License 5 votes vote down vote up
def DarknetConv2D(*args, **kwargs):
    """Wrapper to set Darknet parameters for Convolution2D."""
    darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
    darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
    darknet_conv_kwargs.update(kwargs)
    return Conv2D(*args, **darknet_conv_kwargs) 
Example 34
Project: RFMLS-NEU   Author: neu-spiral   File: HomegrownModel.py    MIT License 5 votes vote down vote up
def getHomegrownModel(slice_size=1024, classes=1000, cnn_stacks=3, fc_stacks=1, channels=128, dropout_flag=True, \
                        flt=[50, 50, 256, 80],k1=[1, 7], k2=[2, 7], batchnorm=False, dr=0.5,\
                        #optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False), \
                        loss='categorical_crossentropy'):
    """Original Homegrown model"""
    in_shp = [2, slice_size]
    model_nn = models.Sequential()
    model_nn.add(Reshape([1] + in_shp, input_shape=in_shp))
    model_nn.add(ZeroPadding2D((0, 2)))
    model_nn.add(Conv2D(flt[0], (k1[0], k1[1]), padding="valid", kernel_initializer="glorot_uniform", name="conv1"))
    if batchnorm:
        model_nn.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_1'))
    model_nn.add(Activation('relu'))
    model_nn.add(ZeroPadding2D((0, 2)))
    model_nn.add(Conv2D(flt[1], (k2[0], k2[1]), padding="valid", kernel_initializer="glorot_uniform", name="conv2"))
    if batchnorm:
        model_nn.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_2'))
    model_nn.add(Activation('relu'))
    model_nn.add(Flatten())
    model_nn.add(Dense(flt[2], kernel_initializer='he_normal', name="dense1"))
    if batchnorm:
        model_nn.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_3'))
    model_nn.add(Activation('relu'))
    model_nn.add(Dropout(dr))
    model_nn.add(Dense(flt[3], kernel_initializer='he_normal', name="dense2"))
    if batchnorm:
        model_nn.add(keras.layers.BatchNormalization(momentum=0.9, name='bn_4'))
    model_nn.add(Activation('relu'))
    model_nn.add(Dropout(dr))
    model_nn.add(Dense(classes, kernel_initializer='he_normal', kernel_regularizer=l2(0.0001), name="dense3"))
    model_nn.add(Activation('softmax'))


    model_nn.summary()




    return model_nn 
Example 35
Project: Anamoly-Detection   Author: msmsk05   File: auto_encoder.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _build_model(self):
        model = Sequential()
        # Input layer
        model.add(Dense(
            self.hidden_neurons_[0], activation=self.hidden_activation,
            input_shape=(self.n_features_,),
            activity_regularizer=l2(self.l2_regularizer)))
        model.add(Dropout(self.dropout_rate))

        # Additional layers
        for i, hidden_neurons in enumerate(self.hidden_neurons_, 1):
            model.add(Dense(
                hidden_neurons,
                activation=self.hidden_activation,
                activity_regularizer=l2(self.l2_regularizer)))
            model.add(Dropout(self.dropout_rate))

        # Output layers
        model.add(Dense(self.n_features_, activation=self.output_activation,
                        activity_regularizer=l2(self.l2_regularizer)))

        # Compile model
        model.compile(loss=self.loss, optimizer=self.optimizer)
        print(model.summary())
        return model

    # noinspection PyUnresolvedReferences 
Example 36
Project: design_embeddings_jmd_2016   Author: IDEALLab   File: stacked_ae.py    MIT License 5 votes vote down vote up
def train_ae(data, feature_dim, hidden_sizes, l, p=0, batch_size=100, activation='tanh', 
             activity_regularizer=None, weights=None, nb_epoch=1000, loss='mse', verbose=False):

    data_dim = data.shape[1]
    inputs = Input(shape=(data_dim,))
    
    sizes = [data_dim] + hidden_sizes + [feature_dim]
    n_layers = len(sizes) - 1
    
    # Encoder
    x = noise.GaussianDropout(p)(inputs)
    for i in range(n_layers):
        x = Dense(sizes[i+1], activation=activation, W_regularizer=l2(l))(x)
    
    # Decoder
    for i in range(n_layers):
        x = Dense(sizes[-i-2], activation=activation, W_regularizer=l2(l))(x)
    decoded = x
    
    model = Model(input=inputs, output=decoded)
    
    if weights is not None:
        model.set_weights(weights)
        
#    optimizer = Adagrad(lr=lr, epsilon=epsilon)
    optimizer = Adam()
    model.compile(loss=loss, optimizer=optimizer)
#    early_stopping = MyEarlyStopping(monitor='loss', patience=10, verbose=verbose, tol=1e-6)
    model.fit(data, data, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose)#, callbacks=[early_stopping])
    
    if n_layers == 1:
        W_en = model.layers[-2].get_weights()
        W_de = model.layers[-1].get_weights()
    else:
        W_en = None
        W_de = None
        
    encode = K.function([model.layers[0].input, K.learning_phase()], [model.layers[-2].output])
    a = encode([data, 0])[0] # hidden layer's activation
    
    return a, W_en, W_de, model 
Example 37
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn_all.py    MIT License 5 votes vote down vote up
def fcn_2d(input_shape):
    inputs = Input(input_shape)

    conv1 = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), kernel_regularizer=l2(REG_lambda))(inputs)
#    bn1 = BatchNormalization()(conv1)
    act1 = Activation('relu')(conv1)
    pool1 = MaxPooling2D(pool_size=(3, 3), strides=(3, 3))(act1)

    conv2 = Conv2D(32, kernel_size=(6, 6), strides=(1, 1), kernel_regularizer=l2(REG_lambda))(pool1)
#    bn2 = BatchNormalization()(conv2)
    act2 = Activation('relu')(conv2)
    pool2 = MaxPooling2D(pool_size=(3, 3), strides=(3, 3))(act2)

    conv3 = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), kernel_regularizer=l2(REG_lambda))(pool2)
#    bn3 = BatchNormalization()(conv3)
    act3 = Activation('relu')(conv3)
    pool3 = MaxPooling2D(pool_size=(3, 3), strides=(3, 3))(act3)

    up1 = UpSampling2D(size=(3, 3))(pool3)
    deconv1 = Conv2DTranspose(32, 3)(up1)
    act4 = Activation('relu')(deconv1)

    up2 = UpSampling2D(size=(3, 3))(act4)
    deconv2 = Conv2DTranspose(16, 6)(up2)
    act5 = Activation('relu')(deconv2)

    up3 = UpSampling2D(size=(3, 3))(act5)
    deconv3 = Conv2DTranspose(nb_classes, 3)(up3)
    act6 = Activation('relu')(deconv3)
    deconv4 = Conv2DTranspose(nb_classes, 3)(act6)

    model = Model(inputs=inputs, outputs=deconv4)
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss=softmax_sparse_crossentropy_ignoring_first_label,
                  optimizer=adam,
                  metrics=[sparse_accuracy])
    return model


# U-net model 
Example 38
Project: EUSIPCO2017   Author: Veleslavia   File: singlelayer.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def build_model(n_classes):

    if K.image_dim_ordering() == 'th':
        input_shape = (1, N_MEL_BANDS, SEGMENT_DUR)
        channel_axis = 1
    else:
        input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1)
        channel_axis = 3
    melgram_input = Input(shape=input_shape)

    m_sizes = [50, 70]
    n_sizes = [1, 3, 5]
    n_filters = [128, 64, 32]
    maxpool_const = 4

    layers = list()

    for m_i in m_sizes:
        for i, n_i in enumerate(n_sizes):
            x = Convolution2D(n_filters[i], m_i, n_i,
                              border_mode='same',
                              init='he_normal',
                              W_regularizer=l2(1e-5),
                              name=str(n_i)+'_'+str(m_i)+'_'+'conv')(melgram_input)
            x = BatchNormalization(axis=channel_axis, mode=0, name=str(n_i)+'_'+str(m_i)+'_'+'bn')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(N_MEL_BANDS, SEGMENT_DUR/maxpool_const), name=str(n_i)+'_'+str(m_i)+'_'+'pool')(x)
            x = Flatten(name=str(n_i)+'_'+str(m_i)+'_'+'flatten')(x)
            layers.append(x)

    x = merge(layers, mode='concat', concat_axis=channel_axis)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, init='he_normal', W_regularizer=l2(1e-5), activation='softmax', name='prediction')(x)
    model = Model(melgram_input, x)

    return model 
Example 39
Project: isl-gaze-demo   Author: djpetti   File: network.py    MIT License 5 votes vote down vote up
def _build_common(self):
    """ Build the network components that are common to all. """
    # L2 regularizer for weight decay.
    self._l2 = regularizers.l2(0.0005)

    leye = None
    reye = None
    face = None
    grid = None
    pose = None
    if self.__data_tensors:
      leye, reye, face, grid, pose = self.__data_tensors

    # Create inputs.
    self._left_eye_input = layers.Input(shape=self._eye_shape, tensor=leye,
                                        name="left_eye_input")
    self._right_eye_input = layers.Input(shape=self._eye_shape, tensor=reye,
                                         name="right_eye_input")
    self._face_input = layers.Input(shape=self._input_shape, tensor=face,
                                    name="face_input")
    self._grid_input = layers.Input(shape=(25, 25), tensor=grid,
                                    name="grid_input")
    self._pose_input = layers.Input(shape=(3,), tensor=pose, name="pose_input")

    # Add preprocessing layer.
    self._left_eye_node = self._left_eye_input
    self._right_eye_node = self._right_eye_input
    if self.__eye_preproc is not None:
      self._left_eye_node = self.__eye_preproc(self._left_eye_input)
      self._right_eye_node = self.__eye_preproc(self._right_eye_input) 
Example 40
Project: C3D-Action-Recognition   Author: lianggyu   File: models.py    MIT License 5 votes vote down vote up
def c3d_model():
    input_shape = (112,112,16,3)
    weight_decay = 0.005
    nb_classes = 2

    inputs = Input(input_shape)
    x = Conv3D(64,(3,3,3),strides=(1,1,1),padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(inputs)
    x = MaxPool3D((2,2,1),strides=(2,2,1),padding='same')(x)

    x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2,2,2),strides=(2,2,2),padding='same')(x)

    x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2,2,2),strides=(2,2,2),padding='same')(x)

    x = Conv3D(256,(3,3,3),strides=(1,1,1),padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2,2,2),strides=(2,2,2),padding='same')(x)

    x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Flatten()(x)
    x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = Dropout(0.5)(x)
    x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = Dropout(0.5)(x)
    x = Dense(nb_classes,kernel_regularizer=l2(weight_decay))(x)
    x = Activation('softmax')(x)

    model = Model(inputs, x)
    return model 
Example 41
Project: keras-wrn   Author: EricAlcaide   File: wrn.py    MIT License 5 votes vote down vote up
def main_block(x, filters, n, strides, dropout):
	# Normal part
	x_res = Conv2D(filters, (3,3), strides=strides, padding="same")(x)# , kernel_regularizer=l2(5e-4)
	x_res = BatchNormalization()(x_res)
	x_res = Activation('relu')(x_res)
	x_res = Conv2D(filters, (3,3), padding="same")(x_res)
	# Alternative branch
	x = Conv2D(filters, (1,1), strides=strides)(x)
	# Merge Branches
	x = Add()([x_res, x])

	for i in range(n-1):
		# Residual conection
		x_res = BatchNormalization()(x)
		x_res = Activation('relu')(x_res)
		x_res = Conv2D(filters, (3,3), padding="same")(x_res)
		# Apply dropout if given
		if dropout: x_res = Dropout(dropout)(x)
		# Second part
		x_res = BatchNormalization()(x_res)
		x_res = Activation('relu')(x_res)
		x_res = Conv2D(filters, (3,3), padding="same")(x_res)
		# Merge branches
		x = Add()([x, x_res])

	# Inter block part
	x = BatchNormalization()(x)
	x = Activation('relu')(x)
	return x 
Example 42
Project: keras-yolo3   Author: bing0037   File: model.py    MIT License 5 votes vote down vote up
def DarknetConv2D(*args, **kwargs):
    """Wrapper to set Darknet parameters for Convolution2D."""
    darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
    darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
    darknet_conv_kwargs.update(kwargs)
    return Conv2D(*args, **darknet_conv_kwargs) 
Example 43
Project: PiCamNN   Author: PiSimo   File: keras_darknet19.py    MIT License 5 votes vote down vote up
def DarknetConv2D(*args, **kwargs):
    """Wrapper to set Darknet weight regularizer for Convolution2D."""
    darknet_conv_kwargs = {'W_regularizer': l2(5e-4)}
    darknet_conv_kwargs.update(kwargs)
    return _DarknetConv2D(*args, **darknet_conv_kwargs) 
Example 44
Project: Keras_MedicalImgAI   Author: taoyilee   File: model_factory.py    MIT License 5 votes vote down vote up
def get_model(class_names, base_weights=None, weights_path=None, image_dimension=224, color_mode='grayscale',
              weight_decay=1e-4, class_mode='multiclass', final_activation="softmax"):
    """
    Create model for transfer learning

    Arguments:
    class_names - list of str
    weights_path - str

    Returns:
    model - Keras model
    """
    if weights_path is not None:
        base_weights = None
    base_model = DenseNet121(include_top=False, weights=base_weights, pooling="None")
    x = base_model.output
    x = GlobalAveragePooling2D()(x)

    # dense layers for different class
    predictions = []
    if class_mode == 'multiclass':
        print(f"** Final layer activation function is {final_activation}")
        prediction = Dense(4096, kernel_regularizer=regularizers.l2(weight_decay), name="fc_hidden_layer1")(x)
        predictions = Dense(len(class_names), activation=final_activation, name="fc_output_layer",
                            kernel_regularizer=regularizers.l2(weight_decay))(prediction)
    elif class_mode == 'multibinary':
        for i, class_name in enumerate(class_names):
            prediction = Dense(1024, kernel_regularizer=regularizers.l2(weight_decay))(x)
            prediction = Dense(1, kernel_regularizer=regularizers.l2(weight_decay), activation="sigmoid",
                               name=class_name)(prediction)
            predictions.append(prediction)

    model = Model(inputs=base_model.input, outputs=predictions)
    if weights_path is not None:
        model.load_weights(weights_path)
    model.base_model = base_model
    return model 
Example 45
Project: merck   Author: RuwanT   File: custom_networks.py    MIT License 5 votes vote down vote up
def merck_net(input_shape=(128)):
    """
    # The recommended network presented in the paper: Junshui Ma et. al., Deep Neural Nets as a Method for Quantitative 
    # Structure Activity Relationships
    # URL: http://www.cs.toronto.edu/~gdahl/papers/deepQSARJChemInfModel2015.pdf
    # :param input_shape: dim of input features
    # :return: a keras model
    """

    from keras import models
    from keras.layers import Dense
    from keras.layers.advanced_activations import LeakyReLU
    from keras.layers.normalization import BatchNormalization
    from keras.layers import Dropout
    from keras.layers.noise import GaussianNoise
    from keras.regularizers import l2

    # TODO: is kernel_regularizer=l2(0.0001) the best way to add weight cost strength?

    model = models.Sequential()

    model.add(Dense(4000, activation='relu', input_shape=input_shape, kernel_regularizer=l2(0.0001)))
    model.add(Dropout(0.25))

    model.add(Dense(2000, activation='relu', input_shape=input_shape, kernel_regularizer=l2(0.0001)))
    model.add(Dropout(0.25))

    model.add(Dense(1000, activation='relu', input_shape=input_shape, kernel_regularizer=l2(0.0001)))
    model.add(Dropout(0.25))

    model.add(Dense(1000, activation='relu', input_shape=input_shape, kernel_regularizer=l2(0.0001)))
    model.add(Dropout(0.10))

    model.add(Dense(1, activation=None, use_bias=True, kernel_regularizer=l2(0.0001)))

    # model.summary()

    return model 
Example 46
Project: Pix2Pose   Author: kirumang   File: resnet50_mod.py    MIT License 5 votes vote down vote up
def identity_block(input_tensor, kernel_size, filters, stage, block):
    """The identity block is the block that has no conv layer at shortcut.
    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    # Returns
        Output tensor for the block.
    """
    filters1, filters2, filters3 = filters
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters1, (1, 1), name=conv_name_base +'2a',kernel_regularizer=l2(0.0001))(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2, kernel_size,
               padding='same', name=conv_name_base + '2b',kernel_regularizer=l2(0.0001))(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c',kernel_regularizer=l2(0.0001))(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = layers.add([x, input_tensor])
    x = Activation('relu',name = 'act'+str(stage)+block+'_branch')(x)
    return x 
Example 47
Project: ICASSP2019_TCN   Author: DSIP-UPatras   File: models.py    MIT License 5 votes vote down vote up
def TCCNet(input_shape, classes, residual_blocks=4, tcn_layers=2,
    filters=[[32, 32], [32, 32], [64, 64], [128, 128]], filters_size=3,
    n_dropout=0.5, n_l2=0.0005, classify_as='mot', masking=False):
    """
        Arguments:
            input_shape     : array-like, dimensions of data input (height, width, depth)
            classes         : integer, number of classification labels
            residual_blocks : integer, (see 'tcn.py')
            tcn_layers      : integer, (see 'tcn.py')
            filters         : array-like, (see 'tcn.py')
            filters_size    : integer or array-like, (see 'tcn.py')
            dropout         : float, amount of dropout
            l2              : float, amount of l_2 regularization
            classify_as     : string, one of {'aot', 'att'} corresponding to Average over Time and Attention
    """

    kernel_init = initializers.glorot_normal(seed=0)
    kernel_regl = regularizers.l2(n_l2)

    model_tcn = TCN(input_shape, residual_blocks=residual_blocks, tcn_layers=tcn_layers,
                filters=filters, filters_size=filters_size,
                dropout_rate=n_dropout, weight_decay=n_l2, seed=0, masking=masking)
    x_input = model_tcn.layers[0].output
    x = model_tcn.layers[-1].output

    if classify_as == 'aot':
        x = MeanOverTime()(x)
        y = Dense(classes, activation='softmax', kernel_regularizer=kernel_regl, kernel_initializer=kernel_init)(x)
    elif classify_as == 'att':
        x = AttentionWithContext(bias=False)(x)
        y = Dense(classes, activation='softmax', kernel_regularizer=kernel_regl, kernel_initializer=kernel_init)(x)

    model = Model(x_input, y)

    return model 
Example 48
Project: seriesnet   Author: kristpapadopoulos   File: seriesnet.py    MIT License 5 votes vote down vote up
def DC_CNN_Block(nb_filter, filter_length, dilation, l2_layer_reg):
    def f(input_):
        
        residual =    input_
        
        layer_out =   Conv1D(filters=nb_filter, kernel_size=filter_length, 
                      dilation_rate=dilation, 
                      activation='linear', padding='causal', use_bias=False,
                      kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, 
                      seed=42), kernel_regularizer=l2(l2_layer_reg))(input_)
                    
        layer_out =   Activation('selu')(layer_out)
        
        skip_out =    Conv1D(1,1, activation='linear', use_bias=False, 
                      kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, 
                      seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out)
        
        network_in =  Conv1D(1,1, activation='linear', use_bias=False, 
                      kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, 
                      seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out)
                      
        network_out = Add()([residual, network_in])
        
        return network_out, skip_out
    
    return f 
Example 49
Project: seriesnet   Author: kristpapadopoulos   File: seriesnet.py    MIT License 5 votes vote down vote up
def DC_CNN_Model(length):
    
    input = Input(shape=(length,1))
    
    l1a, l1b = DC_CNN_Block(32,2,1,0.001)(input)    
    l2a, l2b = DC_CNN_Block(32,2,2,0.001)(l1a) 
    l3a, l3b = DC_CNN_Block(32,2,4,0.001)(l2a)
    l4a, l4b = DC_CNN_Block(32,2,8,0.001)(l3a)
    l5a, l5b = DC_CNN_Block(32,2,16,0.001)(l4a)
    l6a, l6b = DC_CNN_Block(32,2,32,0.001)(l5a)
    l6b = Dropout(0.8)(l6b) #dropout used to limit influence of earlier data
    l7a, l7b = DC_CNN_Block(32,2,64,0.001)(l6a)
    l7b = Dropout(0.8)(l7b) #dropout used to limit influence of earlier data

    l8 =   Add()([l1b, l2b, l3b, l4b, l5b, l6b, l7b])
    
    l9 =   Activation('relu')(l8)
           
    l21 =  Conv1D(1,1, activation='linear', use_bias=False, 
           kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=42),
           kernel_regularizer=l2(0.001))(l9)

    model = Model(input=input, output=l21)
    
    adam = optimizers.Adam(lr=0.00075, beta_1=0.9, beta_2=0.999, epsilon=None, 
                           decay=0.0, amsgrad=False)

    model.compile(loss='mae', optimizer=adam, metrics=['mse'])
    
    return model 
Example 50
Project: multi-object-tracking   Author: jguoaj   File: model.py    GNU General Public License v3.0 5 votes vote down vote up
def DarknetConv2D(*args, **kwargs):
    """Wrapper to set Darknet parameters for Convolution2D."""
    darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
    darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
    darknet_conv_kwargs.update(kwargs)
    return Conv2D(*args, **darknet_conv_kwargs) 
Example 51
Project: solder_joint_detection   Author: lx-onism   File: model.py    MIT License 5 votes vote down vote up
def DarknetConv2D(*args, **kwargs):
    """Wrapper to set Darknet parameters for Convolution2D."""
    darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
    darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
    darknet_conv_kwargs.update(kwargs)
    return Conv2D(*args, **darknet_conv_kwargs) 
Example 52
Project: AIX360   Author: IBM   File: resnet_keras_model.py    Apache License 2.0 5 votes vote down vote up
def resnet_layer(inputs,
                 num_filters=16,
                 kernel_size=3,
                 strides=1,
                 activation='relu',
                 batch_normalization=True,
                 conv_first=True):
    """2D Convolution-Batch Normalization-Activation stack builder
    # Arguments
        inputs (tensor): input tensor from input image or previous layer
        num_filters (int): Conv2D number of filters
        kernel_size (int): Conv2D square kernel dimensions
        strides (int): Conv2D square stride dimensions
        activation (string): activation name
        batch_normalization (bool): whether to include batch normalization
        conv_first (bool): conv-bn-activation (True) or
            bn-activation-conv (False)
    # Returns
        x (tensor): tensor as input to the next layer
    """
    conv = Conv2D(num_filters,
                  kernel_size=kernel_size,
                  strides=strides,
                  padding='same',
                  kernel_initializer='he_normal',
                  kernel_regularizer=l2(1e-4))

    x = inputs
    if conv_first:
        x = conv(x)
        if batch_normalization:
            x = BatchNormalization()(x)
        if activation is not None:
            x = Activation(activation)(x)
    else:
        if batch_normalization:
            x = BatchNormalization()(x)
        if activation is not None:
            x = Activation(activation)(x)
        x = conv(x)
    return x 
Example 53
Project: vision-web-service   Author: sherlockchou86   File: model.py    MIT License 5 votes vote down vote up
def DarknetConv2D(*args, **kwargs):
    """Wrapper to set Darknet parameters for Convolution2D."""
    darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
    darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
    darknet_conv_kwargs.update(kwargs)
    return Conv2D(*args, **darknet_conv_kwargs) 
Example 54
Project: musical_genres_classification   Author: shaoeric   File: pre_process_mfcc.py    MIT License 5 votes vote down vote up
def get_model():
    input_layer = Input(shape=(1, 130, 40), name='mfcc_input')
    x = GaussianNoise(0.08)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu',
               kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Flatten()(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='mfcc_output', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.2))(x)
    model = Model(input=input_layer, output=x)
    # model.compile(optimizer=optimizers.sgd(lr=1e-4, decay=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])
    print(model.summary())
    return model 
Example 55
Project: musical_genres_classification   Author: shaoeric   File: attention_logbank.py    MIT License 5 votes vote down vote up
def get_model():
    regularize = 0.01
    input_layer = Input(shape=(1, 120, 120), name='attention_logbank_input')
    x = GaussianNoise(0.03)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu',kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize),data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(1, 3), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Reshape(target_shape=(32, 25))(x)
    x = Dropout(0.3)(x)
    x = CuDNNGRU(34, return_sequences=True)(x)
    x = Attention(x.shape[1], name='attention')(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='attention_logbank_output', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize))(x)
    model = Model(input=input_layer, output=x)
    print(model.summary())
    return model 
Example 56
Project: musical_genres_classification   Author: shaoeric   File: final_model.py    MIT License 5 votes vote down vote up
def get_mfcc_model():
    input_layer = Input(shape=(1, 130, 40), name='mfcc_input')
    x = GaussianNoise(0.08)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu',
               kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.01))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=30, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=16, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(0.01), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Flatten()(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='mfcc_output', kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(0.2))(x)
    model = Model(input=input_layer, output=x)
    # model.compile(optimizer=optimizers.sgd(lr=1e-4, decay=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])

    return model 
Example 57
Project: musical_genres_classification   Author: shaoeric   File: final_model.py    MIT License 5 votes vote down vote up
def get_logfbank_model():
    regularize = 0.01
    input_layer = Input(shape=(1, 120, 120), name='attention_logbank_input')
    x = GaussianNoise(0.03)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu',
               kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(1, 3), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Reshape(target_shape=(32, 25))(x)
    x = Dropout(0.3)(x)
    x = CuDNNGRU(34, return_sequences=True)(x)
    x = Attention(x.shape[1], name='attention')(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='attention_logbank_output', kernel_initializer='glorot_normal',
              kernel_regularizer=regularizers.l2(regularize))(x)
    model = Model(input=input_layer, output=x)
    return model 
Example 58
Project: musical_genres_classification   Author: shaoeric   File: final_model.py    MIT License 5 votes vote down vote up
def get_logfbank_model():
    regularize = 0.01
    input_layer = Input(shape=(1, 1197, 120), name='attention_logbank_input')
    x = GaussianNoise(noise)(input_layer)
    x = Conv2D(data_format='channels_first', filters=16, kernel_size=3, padding='same', activation='relu',
               kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l2(regularize))(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='valid', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=(3, 1), padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Conv2D(filters=32, kernel_size=3, padding='same', activation='relu', kernel_initializer='glorot_normal',
               kernel_regularizer=regularizers.l2(regularize), data_format='channels_first')(x)
    x = MaxPooling2D(pool_size=2, padding='same', data_format='channels_first')(x)
    x = BatchNormalization()(x)
    x = Reshape(target_shape=(17, 32 * 15))(x)
    x = Dropout(0.3)(x)
    x = CuDNNGRU(34, return_sequences=True)(x)
    x = Attention(x.shape[1], name='attention')(x)
    x = Dropout(0.3)(x)
    x = Dense(10, activation='softmax', name='attention_logbank_output', kernel_initializer='glorot_normal',
              kernel_regularizer=regularizers.l2(regularize))(x)
    model = Model(inputs=input_layer, outputs=x)
    return model 
Example 59
Project: knowledgeflow   Author: 3rduncle   File: lcd.py    MIT License 5 votes vote down vote up
def buildConvolution(self, name):
        filters = self.params.get('filters')
        nb_filter = self.params.get('nb_filter')
        assert filters
        assert nb_filter
        convs = []
        for fsz in filters:
            layer_name = '%s-conv-%d' % (name, fsz)
            conv = Convolution1D(
                nb_filter=nb_filter,
                filter_length=fsz,
                border_mode='valid',
                #activation='relu',
                subsample_length=1,
                init='glorot_uniform',
                #init=init,
                #init=lambda shape, name: initializations.uniform(shape, scale=0.01, name=name),
                W_constraint=maxnorm(self.params.get('w_maxnorm')),
                b_constraint=maxnorm(self.params.get('b_maxnorm')),
                #W_regularizer=regularizers.l2(self.params.get('w_l2')),
                #b_regularizer=regularizers.l2(self.params.get('b_l2')),
                #input_shape=(self.q_length, self.wdim),
                name=layer_name
            )
            convs.append(conv)
        self.layers['%s-convolution' % name] = convs 
Example 60
Project: knowledgeflow   Author: 3rduncle   File: lcd.py    MIT License 5 votes vote down vote up
def buildConvolution(self, name):
        filters = self.params.get('filters')
        nb_filter = self.params.get('nb_filter')
        assert filters
        assert nb_filter
        convs = []
        for fsz in filters:
            layer_name = '%s-conv-%d' % (name, fsz)
            conv = Convolution1D(
                nb_filter=nb_filter,
                filter_length=fsz,
                border_mode='valid',
                #activation='relu',
                subsample_length=1,
                init='glorot_uniform',
                #init=init,
                #init=lambda shape, name: initializations.uniform(shape, scale=0.01, name=name),
                W_constraint=maxnorm(self.params.get('w_maxnorm')),
                b_constraint=maxnorm(self.params.get('b_maxnorm')),
                #W_regularizer=regularizers.l2(self.params.get('w_l2')),
                #b_regularizer=regularizers.l2(self.params.get('b_l2')),
                #input_shape=(self.q_length, self.wdim),
                name=layer_name
            )
            convs.append(conv)
        self.layers['%s-convolution' % name] = convs 
Example 61
Project: DeepLearn   Author: GauravBh1010tt   File: eval_fnc.py    MIT License 5 votes vote down vote up
def prepare_model(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
    inp1 = Input(shape=(ninputs,))
    inp2 = Input(shape=(n_feats,))
    inp3 = Input(shape=(n_tfidf,))
    reg = 0.00005
    out_neurons1 = 500
    #out_neurons2 = 20
    #out_neurons2 = 10
    m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
                      ,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
    m1 = Dropout(0.2)(m1)
    m1 = Dense(100,activation='sigmoid')(m1)
    #m1 = Dropout(0.2)(m1)
    #m1 = Dense(4, activation='sigmoid')(m1)
    
    #m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
    m2 = Dense(50,activation='relu')(inp2)
    #m2=Dense(4,activation='relu')(m2)
    
    m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
                    kernel_regularizer=regularizers.l2(reg))(inp3)
    
    m3 = Dropout(0.4)(m3)
    m3 = Dense(50, activation='relu')(m3)
    #m3 = Dropout(0.4)(m3)
    #m3 = Dense(4, activation='softmax')(m3)
    
    
    #m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
    #m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
    
    m = Merge(mode='concat')([m1,m2,m3])
    
    #mul = Multiply()([m1,m2])
    #add = Abs()([m1,m2])
    #m = Merge(mode='concat')([mul,add])
    
    score = Dense(output_dim=nclass,activation='softmax')(m)
    model = Model([inp1,inp2,inp3],score)
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    return model 
Example 62
Project: DeepLearn   Author: GauravBh1010tt   File: eval_fnc.py    MIT License 5 votes vote down vote up
def prepare_model2(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
    inp1 = Input(shape=(ninputs,))
    inp2 = Input(shape=(n_feats,))
    inp3 = Input(shape=(n_tfidf,))
    reg = 0.00005
    out_neurons1 = 500
    #out_neurons2 = 20
    #out_neurons2 = 10
    m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
                      ,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
    m1 = Dropout(0.2)(m1)
    m1 = Dense(100,activation='sigmoid')(m1)
    #m1 = Dropout(0.2)(m1)
    #m1 = Dense(4, activation='sigmoid')(m1)
    
    m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
    m2 = Dense(4,activation='relu')(inp2)
    #m2=Dense(4,activation='relu')(m2)
    
    m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
                    kernel_regularizer=regularizers.l2(reg))(inp3)
    
    m3 = Dropout(0.4)(m3)
    m3 = Dense(50, activation='relu')(m3)
    #m3 = Dropout(0.4)(m3)
    #m3 = Dense(4, activation='softmax')(m3)
    
    
    #m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
    #m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
    
    m = Merge(mode='concat')([m1,m2,m3])
    
    #mul = Multiply()([m1,m2])
    #add = Abs()([m1,m2])
    #m = Merge(mode='concat')([mul,add])
    
    score = Dense(output_dim=nclass,activation='softmax')(m)
    model = Model([inp1,inp2,inp3],score)
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    return model 
Example 63
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 5 votes vote down vote up
def BNConv(nb_filter, nb_row, nb_col, w_decay, subsample=(1, 1), border_mode="same"):
    def f(input):
        conv = Convolution2D(nb_filter=nb_filter, nb_row=nb_row, nb_col=nb_col, subsample=subsample,
                      border_mode=border_mode, activation="relu",
                      W_regularizer=l2(w_decay) if w_decay else None, init="he_normal")(input)
        return BatchNormalization(mode=0, axis=1)(conv)
    return f 
Example 64
Project: Keras-DualPathNetworks   Author: titu1994   File: dual_path_network.py    Apache License 2.0 5 votes vote down vote up
def _grouped_convolution_block(input, grouped_channels, cardinality, strides, weight_decay=5e-4):
    ''' Adds a grouped convolution block. It is an equivalent block from the paper
    Args:
        input: input tensor
        grouped_channels: grouped number of filters
        cardinality: cardinality factor describing the number of groups
        strides: performs strided convolution for downscaling if > 1
        weight_decay: weight decay term
    Returns: a keras tensor
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    group_list = []

    if cardinality == 1:
        # with cardinality 1, it is a standard convolution
        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=strides,
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
        x = BatchNormalization(axis=channel_axis)(x)
        x = Activation('relu')(x)
        return x

    for c in range(cardinality):
        x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels]
                   if K.image_data_format() == 'channels_last' else
                   lambda z: z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input)

        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=strides,
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)

        group_list.append(x)

    group_merge = concatenate(group_list, axis=channel_axis)
    group_merge = BatchNormalization(axis=channel_axis)(group_merge)
    group_merge = Activation('relu')(group_merge)
    return group_merge 
Example 65
Project: CAPTCHA-breaking   Author: lllcho   File: test_regularizers.py    MIT License 5 votes vote down vote up
def test_W_reg(self):
        for reg in [regularizers.identity(), regularizers.l1(), regularizers.l2(), regularizers.l1l2()]:
            model = create_model(weight_reg=reg)
            model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
            model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
            model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0) 
Example 66
Project: PIEPredict   Author: aras62   File: pie_intent.py    Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 num_hidden_units=128,
                 regularizer_val=0.001,
                 activation='tanh',
                 lstm_dropout=0.4,
                 lstm_recurrent_dropout=0.2,
                 convlstm_num_filters=64,
                 convlstm_kernel_size=2):

        # Network parameters
        self._num_hidden_units = num_hidden_units
        #self._bias_initializer = 'zeros' # 'zeros' or 'ones'
        #self._output_activation = 'sigmoid'
        self.reg_value = regularizer_val
        self._kernel_regularizer = regularizers.l2(regularizer_val)
        self._recurrent_regularizer = regularizers.l2(regularizer_val)
        self._bias_regularizer = regularizers.l2(regularizer_val)
        self._activation = activation

        # Encoder
        self._lstm_dropout = lstm_dropout
        self._lstm_recurrent_dropout = lstm_recurrent_dropout

        # conv unit parameters
        self._convlstm_num_filters = convlstm_num_filters
        self._convlstm_kernel_size = convlstm_kernel_size

        #self._encoder_dense_output_size = 1 # set this only for single lstm unit
        self._encoder_input_size = 4  # decided on run time according to data

        self._decoder_dense_output_size = 1
        self._decoder_input_size = 4  # decided on run time according to data

        # Data properties
        #self._batch_size = 128  # this will be set at train time

        self._model_name = 'convlstm_encdec' 
Example 67
Project: ImageDataGenerator-MultiLabel   Author: you359   File: train.py    MIT License 4 votes vote down vote up
def train(train_data_dir, validation_data_dir, model_path):
    # Pre-Trained CNN Model using imagenet dataset for pre-trained weights
    # base_model = Xception(input_shape=(img_width, img_height, 3), weights='imagenet', include_top=False)
    base_model = InceptionV3(input_shape=(img_width, img_height, 3), include_top=False)

    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    predictions = layers.Dense(nb_classes, activation='sigmoid')(x)

    # add your top layer block to your base model
    model = Model(base_model.input, predictions)
    print(model.summary())

    for layer in model.layers:
        layer.trainable = True
        layer.kernel_regularizer = l2(0.05)

    model.compile(optimizer='nadam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       horizontal_flip=True)

    validation_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(train_data_dir,
                                                        target_size=(img_width, img_height),
                                                        batch_size=batch_size,
                                                        class_mode='multi_categorical')

    validation_generator = validation_datagen.flow_from_directory(validation_data_dir,
                                                                  target_size=(img_width, img_height),
                                                                  batch_size=batch_size,
                                                                  class_mode='multi_categorical')

    # save weights of best training epoch: monitor either val_loss or val_acc
    final_acc_weights_path = os.path.join(os.path.abspath(model_path), 'model_acc_weights.h5')
    final_loss_weights_path = os.path.join(os.path.abspath(model_path), 'model_loss_weights.h5')

    callbacks_list = [
        ModelCheckpoint(final_acc_weights_path, monitor='val_acc', verbose=1, save_best_only=True),
        ModelCheckpoint(final_loss_weights_path, monitor='val_loss', verbose=1, save_best_only=True),
        # EarlyStopping(monitor='val_loss', patience=15, verbose=0),
        TensorBoard(log_dir='graph/train', histogram_freq=0, write_graph=True)
    ]

    # fine-tune the model
    model.fit_generator(train_generator,
                        epochs=nb_epoch,
                        validation_data=validation_generator,
                        callbacks=callbacks_list)

    # save model
    model_json = model.to_json()
    with open(os.path.join(os.path.abspath(model_path), 'model.json'), 'w') as json_file:
        json_file.write(model_json) 
Example 68
Project: ImageQA   Author: codedecde   File: wbw_att_imageqa_model2.py    MIT License 4 votes vote down vote up
def build_model(opts, verbose=False):

    # LSTM Output Dimension
    k = 2 * opts.lstm_units

    # Question Length
    L = opts.xmaxlen

    question_input_layer = Input(shape=(L,), dtype='int32', name="Question Input Layer")
    image_input_layer = Input(shape=(100352, ), dtype='float32', name="Image Input Layer")
    image_reshaped_layer = Reshape((196, 512), input_shape=(100352,))(image_input_layer)

    ## TODO : Get GLoVe matrix for the given vocabulary
    ##        or port existing indices to indices from Dictionary.txt
    # # Initial Embedding (Initialise using GloVe)
    # initEmbeddings = np.load(opts.embeddings_file_path)
    # emb_layer = Embedding(initEmbeddings.shape[0], 
    #                         initEmbeddings.shape[1],
    #                         input_length = L,
    #                         weights = [initEmbeddings],
    #                         name = "Embedding Layer") (question_input_layer)

    emb_layer = Embedding(13747, 
                            300,
                            input_length = L,
                            name = "Embedding Layer") (question_input_layer)
    emb_layer = Dropout(0.1, name="Dropout Embeddings")(emb_layer)

    # ## Masking Layer (May not be supported by downstream layers)
    # emb_layer = Masking(mask_value = 0., input_shape=(L, 300))(emb_layer)

    LSTMEncoding = Bidirectional(LSTM(opts.lstm_units,
                                    return_sequences = True, 
                                    name="LSTM Layer")) (emb_layer)

    LSTMEncoding = Dropout(0.1, name="Dropout LSTM Layer")(LSTMEncoding)

    h_n = Lambda(get_H_n, output_shape=(k,), name = "h_n")(LSTMEncoding)

    Y = image_reshaped_layer
    Y = TimeDistributed(Dense(k, W_regularizer = l2(0.01)))(Y)

    Y_h = merge([Y, LSTMEncoding], mode="concat", concat_axis=1)
    r = WBWAttentionLayer(k, return_sequences = False, unroll = True)(Y_h)

    r = Dense(k, W_regularizer = l2(0.01))(r) 
    h_n = Dense(k, W_regularizer = l2(0.01))(h_n)

    h_star = Activation('tanh')(merge([r, h_n]))

    output_layer = Dense(1000, activation='softmax', name="Output Layer")(h_star)

    model = Model(input = [image_input_layer, question_input_layer], output = output_layer)
    model.summary()
    model.compile(loss='categorical_crossentropy', optimizer=Adam(options.lr), metrics=['accuracy'])
    print "Model Compiled"

    return model

## Data Generator 
Example 69
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 4 votes vote down vote up
def cnn_3d(input_shape):

    model = Sequential()
    model.add(Conv3D(16, kernel_size=(3, 3, 20), strides=(1, 1, 10), padding='valid', kernel_regularizer=l2(REG_lambda), input_shape=input_shape))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(16, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 3)))

    model.add(Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 3)))

    model.add(Conv3D(64, kernel_size=(2, 2, 2), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Conv3D(64, kernel_size=(2, 2, 2), strides=(1, 1, 1), padding='same', kernel_regularizer=l2(REG_lambda)))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2)))

    model.add(Flatten())
    model.add(Dense(128))
#    model.add(BatchNormalization())
    model.add(Activation(activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes, activation='softmax'))

    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    return model


# 2D-CNN model 
Example 70
Project: 3DGCN   Author: blackmints   File: model.py    MIT License 4 votes vote down vote up
def model_3DGCN(hyper):
    # Kipf adjacency, neighborhood mixing
    num_atoms = hyper["num_atoms"]
    num_features = hyper["num_features"]
    units_conv = hyper["units_conv"]
    units_dense = hyper["units_dense"]
    num_layers = hyper["num_layers"]
    std = hyper["data_std"]
    loss = hyper["loss"]
    task = hyper["task"]
    pooling = hyper["pooling"]
    outputs = hyper["outputs"]

    atoms = Input(name='atom_inputs', shape=(num_atoms, num_features))
    adjms = Input(name='adjm_inputs', shape=(num_atoms, num_atoms))
    dists = Input(name='coor_inputs', shape=(num_atoms, num_atoms, 3))

    sc, vc = GraphEmbed()([atoms, dists])

    for _ in range(num_layers):
        sc_s = GraphSToS(units_conv, activation='relu')(sc)
        sc_v = GraphVToS(units_conv, activation='relu')([vc, dists])

        vc_s = GraphSToV(units_conv, activation='tanh')([sc, dists])
        vc_v = GraphVToV(units_conv, activation='tanh')(vc)

        sc = GraphConvS(units_conv, pooling='sum', activation='relu')([sc_s, sc_v, adjms])
        vc = GraphConvV(units_conv, pooling='sum', activation='tanh')([vc_s, vc_v, adjms])

    sc, vc = GraphGather(pooling=pooling)([sc, vc])
    sc_out = Dense(units_dense, activation='relu', kernel_regularizer=l2(0.005))(sc)
    sc_out = Dense(units_dense, activation='relu', kernel_regularizer=l2(0.005))(sc_out)

    vc_out = TimeDistributed(Dense(units_dense, activation='relu', kernel_regularizer=l2(0.005)))(vc)
    vc_out = TimeDistributed(Dense(units_dense, activation='relu', kernel_regularizer=l2(0.005)))(vc_out)
    vc_out = Flatten()(vc_out)

    out = Concatenate(axis=-1)([sc_out, vc_out])

    if task == "regression":
        out = Dense(outputs, activation='linear', name='output')(out)
        model = Model(inputs=[atoms, adjms, dists], outputs=out)
        model.compile(optimizer=Adam(lr=0.001), loss=loss, metrics=[std_mae(std=std), std_rmse(std=std)])
    elif task == "binary":
        out = Dense(outputs, activation='sigmoid', name='output')(out)
        model = Model(inputs=[atoms, adjms, dists], outputs=out)
        model.compile(optimizer=Adam(lr=0.001), loss=loss)
    elif task == "classification":
        out = Dense(outputs, activation='softmax', name='output')(out)
        model = Model(inputs=[atoms, adjms, dists], outputs=out)
        model.compile(optimizer=Adam(lr=0.001), loss=loss)
    else:
        raise ValueError("Unsupported task on model generation.")

    return model 
Example 71
Project: ismir2018-artist   Author: jongpillee   File: model.py    MIT License 4 votes vote down vote up
def model_basic(num_frame,num_sing):
	pos_anchor = Input(shape = (num_frame,128))

	# item model **audio**
	conv1 = Conv1D(128,4,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn1 = BatchNormalization()
	activ1 = Activation('relu')
	MP1 = MaxPool1D(pool_size=4)
	conv2 = Conv1D(128,4,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn2 = BatchNormalization()
	activ2 = Activation('relu')
	MP2 = MaxPool1D(pool_size=4)
	conv3 = Conv1D(128,4,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn3 = BatchNormalization()
	activ3 = Activation('relu')
	MP3 = MaxPool1D(pool_size=4)
	conv4 = Conv1D(128,2,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn4 = BatchNormalization()
	activ4 = Activation('relu')
	MP4 = MaxPool1D(pool_size=2)
	conv5 = Conv1D(256,1,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn5 = BatchNormalization()
	activ5 = Activation('relu')
	drop1 = Dropout(0.5)

	item_sem = GlobalAvgPool1D()
	
	# pos anchor
	pos_anchor_conv1 = conv1(pos_anchor)
	pos_anchor_bn1 = bn1(pos_anchor_conv1)
	pos_anchor_activ1 = activ1(pos_anchor_bn1)
	pos_anchor_MP1 = MP1(pos_anchor_activ1)
	pos_anchor_conv2 = conv2(pos_anchor_MP1)
	pos_anchor_bn2 = bn2(pos_anchor_conv2)
	pos_anchor_activ2 = activ2(pos_anchor_bn2)
	pos_anchor_MP2 = MP2(pos_anchor_activ2)
	pos_anchor_conv3 = conv3(pos_anchor_MP2)
	pos_anchor_bn3 = bn3(pos_anchor_conv3)
	pos_anchor_activ3 = activ3(pos_anchor_bn3)
	pos_anchor_MP3 = MP3(pos_anchor_activ3)
	pos_anchor_conv4 = conv4(pos_anchor_MP3)
	pos_anchor_bn4 = bn4(pos_anchor_conv4)
	pos_anchor_activ4 = activ4(pos_anchor_bn4)
	pos_anchor_MP4 = MP4(pos_anchor_activ4)
	pos_anchor_conv5 = conv5(pos_anchor_MP4)
	pos_anchor_bn5 = bn5(pos_anchor_conv5)
	pos_anchor_activ5 = activ5(pos_anchor_bn5)
	pos_anchor_sem = item_sem(pos_anchor_activ5)

	output = Dense(num_sing, activation='softmax')(pos_anchor_sem)
	model = Model(inputs = pos_anchor, outputs = output)
	return model 
Example 72
Project: EUSIPCO2017   Author: Veleslavia   File: multilayer.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def build_model(n_classes):

    if K.image_dim_ordering() == 'th':
        input_shape = (1, N_MEL_BANDS, SEGMENT_DUR)
        channel_axis = 1
    else:
        input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1)
        channel_axis = 3
    melgram_input = Input(shape=input_shape)

    maxpool_const = 4
    m_sizes = [5, 80]
    n_sizes = [1, 3, 5]
    n_filters = [128, 64, 32]

    layers = list()

    for m_i in m_sizes:
        for i, n_i in enumerate(n_sizes):
            x = Convolution2D(n_filters[i], m_i, n_i,
                              border_mode='same',
                              init='he_normal',
                              W_regularizer=l2(1e-5),
                              name=str(n_i)+'_'+str(m_i)+'_'+'conv')(melgram_input)
            x = BatchNormalization(axis=channel_axis, mode=0, name=str(n_i)+'_'+str(m_i)+'_'+'bn')(x)
            x = ELU()(x)
            x = MaxPooling2D(pool_size=(N_MEL_BANDS/maxpool_const, SEGMENT_DUR/maxpool_const),
                             name=str(n_i)+'_'+str(m_i)+'_'+'pool')(x)
            layers.append(x)

    x = merge(layers, mode='concat', concat_axis=channel_axis)

    x = Dropout(0.25)(x)
    x = Convolution2D(128, 3, 3, init='he_normal', W_regularizer=l2(1e-5), border_mode='same', name='conv2')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn2')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x)

    x = Dropout(0.25)(x)
    x = Convolution2D(128, 3, 3, init='he_normal', W_regularizer=l2(1e-5), border_mode='same', name='conv3')(x)
    x = BatchNormalization(axis=channel_axis, mode=0, name='bn3')(x)
    x = ELU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool3')(x)

    x = Flatten(name='flatten')(x)
    x = Dropout(0.5)(x)
    x = Dense(256, init='he_normal', W_regularizer=l2(1e-5), name='fc1')(x)
    x = ELU()(x)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, init='he_normal', W_regularizer=l2(1e-5), activation='softmax', name='prediction')(x)
    model = Model(melgram_input, x)

    return model 
Example 73
Project: Intelligent_Arm_Project   Author: TeamLimb   File: tiny_yolo2.py    MIT License 4 votes vote down vote up
def tiny_yolo2():
    my_input = Input(shape=(416, 416, 3))
    output = Conv2D(16, (3, 3),
                    strides=(1, 1),
                    padding='same',
                    use_bias=False,
                    kernel_initializer='glorot_normal',
                    kernel_regularizer=regularizers.l2(0.01))(my_input)
    output = BatchNormalization(beta_regularizer=regularizers.l2(0.01), gamma_regularizer=regularizers.l2(0.01))(output)
    output = LeakyReLU(alpha=0.1)(output)
    output = MaxPooling2D(pool_size=(2, 2))(output)

    # Layer 2 - 5
    for i in range(0, 4):
        output = Conv2D(32 * (2 ** i), (3, 3),
                        strides=(1, 1),
                        padding='same',
                        use_bias=False,
                        kernel_initializer='glorot_normal',
                        kernel_regularizer=regularizers.l2(0.01))(output)
        output = BatchNormalization(beta_regularizer=regularizers.l2(0.01), gamma_regularizer=regularizers.l2(0.01))(output)
        output = LeakyReLU(alpha=0.1)(output)
        output = MaxPooling2D(pool_size=(2, 2))(output)

    # Layer 6
    output = Conv2D(512, (3, 3),
                    strides=(1, 1),
                    padding='same',
                    use_bias=False,
                    kernel_initializer='glorot_normal',
                    kernel_regularizer=regularizers.l2(0.01))(output)
    output = BatchNormalization(beta_regularizer=regularizers.l2(0.01), gamma_regularizer=regularizers.l2(0.01))(output)
    output = LeakyReLU(alpha=0.1)(output)
    output = MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(output)

    # Layer 7 - 8
    for _ in range(0, 2):
        output = Conv2D(1024, (3, 3),
                        strides=(1, 1),
                        padding='same',
                        use_bias=False,
                        kernel_initializer='glorot_normal',
                        kernel_regularizer=regularizers.l2(0.01))(output)
        output = BatchNormalization(beta_regularizer=regularizers.l2(0.01), gamma_regularizer=regularizers.l2(0.01))(output)
        output = LeakyReLU(alpha=0.1)(output)

    # Layer 9
    output = Conv2D(5 * (4 + 1 + 80), (1, 1),
                    strides=(1, 1),
                    padding='same',
                    kernel_initializer='glorot_normal',
                    kernel_regularizer=regularizers.l2(0.01))(output)
    output = Activation('linear')(output)

    return Model(my_input, output) 
Example 74
Project: Efficient_Augmentation   Author: mkuchnik   File: norb_resnet.py    MIT License 4 votes vote down vote up
def resnet_layer(inputs,
                 num_filters=16,
                 kernel_size=3,
                 strides=1,
                 activation='relu',
                 batch_normalization=True,
                 conv_first=True):
    """2D Convolution-Batch Normalization-Activation stack builder

    # Arguments
        inputs (tensor): input tensor from input image or previous layer
        num_filters (int): Conv2D number of filters
        kernel_size (int): Conv2D square kernel dimensions
        strides (int): Conv2D square stride dimensions
        activation (string): activation name
        batch_normalization (bool): whether to include batch normalization
        conv_first (bool): conv-bn-activation (True) or
            bn-activation-conv (False)

    # Returns
        x (tensor): tensor as input to the next layer
    """
    conv = Conv2D(num_filters,
                  kernel_size=kernel_size,
                  strides=strides,
                  padding='same',
                  kernel_initializer='he_normal',
                  kernel_regularizer=l2(1e-4))

    x = inputs
    if conv_first:
        x = conv(x)
        if batch_normalization:
            x = BatchNormalization()(x)
        if activation is not None:
            x = Activation(activation)(x)
    else:
        if batch_normalization:
            x = BatchNormalization()(x)
        if activation is not None:
            x = Activation(activation)(x)
        x = conv(x)
    return x 
Example 75
Project: Efficient_Augmentation   Author: mkuchnik   File: cifar10_resnet.py    MIT License 4 votes vote down vote up
def resnet_layer(inputs,
                 num_filters=16,
                 kernel_size=3,
                 strides=1,
                 activation='relu',
                 batch_normalization=True,
                 conv_first=True):
    """2D Convolution-Batch Normalization-Activation stack builder

    # Arguments
        inputs (tensor): input tensor from input image or previous layer
        num_filters (int): Conv2D number of filters
        kernel_size (int): Conv2D square kernel dimensions
        strides (int): Conv2D square stride dimensions
        activation (string): activation name
        batch_normalization (bool): whether to include batch normalization
        conv_first (bool): conv-bn-activation (True) or
            bn-activation-conv (False)

    # Returns
        x (tensor): tensor as input to the next layer
    """
    conv = Conv2D(num_filters,
                  kernel_size=kernel_size,
                  strides=strides,
                  padding='same',
                  kernel_initializer='he_normal',
                  kernel_regularizer=l2(1e-4))

    x = inputs
    if conv_first:
        x = conv(x)
        if batch_normalization:
            x = BatchNormalization()(x)
        if activation is not None:
            x = Activation(activation)(x)
    else:
        if batch_normalization:
            x = BatchNormalization()(x)
        if activation is not None:
            x = Activation(activation)(x)
        x = conv(x)
    return x 
Example 76
Project: Pix2Pose   Author: kirumang   File: resnet50_mod.py    MIT License 4 votes vote down vote up
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
    """A block that has a conv layer at shortcut.
    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
        strides: Strides for the first conv layer in the block.
    # Returns
        Output tensor for the block.
    Note that from stage 3,
    the first conv layer at main path is with strides=(2, 2)
    And the shortcut should have strides=(2, 2) as well
    """
    filters1, filters2, filters3 = filters
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters1, (1, 1), strides=strides,
               name=conv_name_base + '2a',kernel_regularizer=l2(0.0001))(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2, kernel_size, padding='same',
               name=conv_name_base + '2b',kernel_regularizer=l2(0.0001))(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c',kernel_regularizer=l2(0.0001))(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    shortcut = Conv2D(filters3, (1, 1), strides=strides,
                      name=conv_name_base + '1',kernel_regularizer=l2(0.0001))(input_tensor)
    shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)

    x = layers.add([x, shortcut])
    x = Activation('relu')(x)
    return x 
Example 77
Project: lmtc-eurlex57k   Author: iliaschalkidis   File: document_classification.py    Apache License 2.0 4 votes vote down vote up
def _compile_bigrus_attention(self, shape, n_hidden_layers, hidden_units_size, dropout_rate, word_dropout_rate, lr):
        """
        Compiles a Hierarchical RNN based on the given parameters
        :param hidden_units_size: size of hidden units, as a list
        :param dropout_rate: The percentage of inputs to dropout
        :param word_dropout_rate: The percentage of timesteps to dropout
        :param lr: learning rate
        :return: Nothing
        """

        # Document Feature Representation
        if self.elmo:
            document_inputs = Input(shape=(1, ), dtype='string', name='document_inputs')
            document_elmos = ElmoEmbeddingLayer()(document_inputs)
            document_inputs2 = Input(shape=(None,), name='document_inputs2')
            self.pretrained_embeddings = self.PretrainedEmbedding()
            document_embs = self.pretrained_embeddings(document_inputs2)
            doc_embs = concatenate([document_embs, document_elmos])

        else:
            document_inputs = Input(shape=(None,), name='document_inputs')
            self.pretrained_embeddings = self.PretrainedEmbedding()
            doc_embs = self.pretrained_embeddings(document_inputs)

        # Apply variational dropout
        drop_doc_embs = SpatialDropout1D(dropout_rate, name='feature_dropout')(doc_embs)
        encodings = TimestepDropout(word_dropout_rate, name='word_dropout')(drop_doc_embs)

        # Bi-GRUs over token embeddings
        for i in range(n_hidden_layers):
            if self._cuDNN:
                grus = Bidirectional(CuDNNGRU(hidden_units_size, return_sequences=True), name='bidirectional_grus_{}'.format(i))(encodings)
            else:
                grus = Bidirectional(GRU(hidden_units_size, activation="tanh", recurrent_activation='sigmoid',
                                         return_sequences=True), name='bidirectional_grus_{}'.format(i))(encodings)
            grus = Camouflage(mask_value=0.0)([grus, encodings])
            if i == 0:
                encodings = SpatialDropout1D(dropout_rate)(grus)
            else:
                encodings = add([grus, encodings])
                encodings = SpatialDropout1D(dropout_rate)(encodings)

        # Attention over BI-GRU (context-aware) embeddings
        if self._attention_mechanism == 'maxpooling':
            doc_encoding = GlobalMaxPooling1D(name='max_pooling')(encodings)
        elif self._attention_mechanism == 'attention':
            # Mask encodings before attention
            grus_outputs = SymmetricMasking(mask_value=0, name='masking')([encodings, encodings])
            doc_encoding = Attention(kernel_regularizer=l2(), bias_regularizer=l2(), name='self_attention')(grus_outputs)
        losses = 'binary_crossentropy' \
            if self._decision_type == 'multi_label' else 'categorical_crossentropy'
        loss_weights = None

        # Final output (projection) layer
        outputs = Dense(self.n_classes, activation='sigmoid' if self._decision_type == 'multi_label' else 'softmax', name='outputs')(doc_encoding)

        # Wrap up model + Compile with optimizer and loss function
        self.model = Model(inputs=document_inputs if not self.elmo else [document_inputs, document_inputs2],
                           outputs=[outputs])
        self.model.compile(optimizer=Adam(lr=lr, clipvalue=5.0), loss=losses, loss_weights=loss_weights) 
Example 78
Project: Document-Classifier-LSTM   Author: AlexGidiotis   File: classifier.py    MIT License 4 votes vote down vote up
def build_model(nb_classes,
	word_index,
	embedding_dim,
	seq_length,
	stamp):
	"""
	"""

	embedding_matrix, nb_words = prepare_embeddings(word_index)

	input_layer = Input(shape=(seq_length,),
		dtype='int32')

	embedding_layer = Embedding(input_dim=nb_words+1,
		output_dim=embedding_dim,
		input_length=seq_length,
		weights=[embedding_matrix],
		embeddings_regularizer=regularizers.l2(0.00),
		trainable=True)(input_layer)

	
	drop1 = SpatialDropout1D(0.3)(embedding_layer)

	lstm_1 = Bidirectional(LSTM(128, name='blstm_1',
	activation='tanh',
	recurrent_activation='hard_sigmoid',
	recurrent_dropout=0.0,
	dropout=0.5, 
	kernel_initializer='glorot_uniform',
	return_sequences=True),
	merge_mode='concat')(drop1)
	lstm_1 = BatchNormalization()(lstm_1)

	att_layer = AttentionWithContext()(lstm_1)

	drop3 = Dropout(0.5)(att_layer)
	
	predictions = Dense(nb_classes, activation='sigmoid')(drop3)

	model = Model(inputs=input_layer, outputs=predictions)

	adam = Adam(lr=0.001,
		decay=0.0)

	model.compile(loss='binary_crossentropy',
		optimizer=adam,
		metrics=[f1_score])

	model.summary()
	print(stamp)


	# Save the model.
	model_json = model.to_json()
	with open(stamp + ".json", "w") as json_file:
		json_file.write(model_json)


	return model 
Example 79
Project: DeepLearn   Author: GauravBh1010tt   File: eval_fnc.py    MIT License 4 votes vote down vote up
def prepare_model1(ninputs=9600, n_feats=45,nclass=4,n_tfidf=10001):
    inp1 = Input(shape=(ninputs,))
    inp2 = Input(shape=(n_feats,))
    inp3 = Input(shape=(n_tfidf,))
    reg = 0.00005
    out_neurons1 = 500
    #out_neurons2 = 20
    #out_neurons2 = 10
    m1 = Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
                      ,kernel_regularizer=regularizers.l2(0.00000001))(inp1)
    m1 = Dropout(0.5)(m1)
    m1 = Dense(100,activation='sigmoid')(m1)
    m1 = Dropout(0.5)(m1)
    
    m2 = Dense(input_dim=n_feats, output_dim=n_feats,activation='relu')(inp2)
    m2 = Dense(30,activation='relu')(m2)
    
    
    m3 = Dense(500, input_dim=n_tfidf, activation='relu',\
                    kernel_regularizer=regularizers.l2(reg))(inp3)
    
    m3 = Dropout(0.6)(m3)
    m3 = Dense(100, activation='relu')(m3)
    m3 = Dropout(0.4)(m3)
    m3 = Dense(4, activation='softmax')(m3)
    
    
    #m1 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='sigmoid')(m1)
    #m2 = Dense(input_dim=ninputs, output_dim=out_neurons2,activation='softmax')(m2)
    
    m = Merge(mode='concat')([m1,m2,m3])
    
    #mul = Multiply()([m1,m2])
    #add = Abs()([m1,m2])
    #m = Merge(mode='concat')([mul,add])
    
    score = Dense(output_dim=nclass,activation='softmax')(m)
    model = Model([inp1,inp2,inp3],score)
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    return model
    
    """
    Set up and compile the model architecture (Logistic regression)
    
    print 'changed'
    out_neurons1 = 500
    lrmodel = Sequential()
    lrmodel.add(Dense(input_dim=ninputs, output_dim=out_neurons1,activation='sigmoid'\
                      ,kernel_regularizer=regularizers.l2(0.00000001)))
    lrmodel.add(Dropout(0.5))
    #lrmodel.add(Dense(out_neurons2))
    #lrmodel.add(Dropout(0.5))
    lrmodel.add(Dense(output_dim=nclass))
    
    #lrmodel.add(Dense(input_dim=ninputs, output_dim=nclass))
    #lrmodel.add(Dropout(0.3))
    lrmodel.add(Activation('softmax'))
    lrmodel.compile(loss='categorical_crossentropy', optimizer='adam')
    return lrmodel
    """ 
Example 80
Project: Quagga   Author: HPI-Information-Systems   File: model.py    GNU General Public License v3.0 4 votes vote down vote up
def init_lstm_model(self):
        model = Sequential()
        model.add(Masking(mask_value=0.0,
                          input_shape=(None, len(self.features))))
        model.add(GaussianNoise(0.3))
        model.add(GRU(units=len(self.features),
                      # input_shape=(None, len(self.features)),
                      return_sequences=True,
                      kernel_initializer='glorot_uniform',
                      recurrent_initializer='orthogonal',
                      bias_initializer='zeros',
                      # kernel_regularizer=l2(0.1),
                      bias_regularizer=l2(0.2),
                      # recurrent_regularizer=l2(0.4),
                      # activity_regularizer=l2(0.4),
                      activation='tanh',
                      recurrent_activation='hard_sigmoid',
                      # dropout=0.2,
                      # recurrent_dropout=0.1,
                      name='rnn_layer1'))
        # model.add(LSTM(units=10,
        #                return_sequences=True,
        #                kernel_initializer='he_uniform',
        #                recurrent_initializer='orthogonal',
        #                kernel_regularizer=l2(0.01),
        #                recurrent_regularizer=l2(0.01),
        #                activation='tanh',
        #                dropout=0.3,
        #                # recurrent_dropout=0.1,
        #                name='rnn_layer2'))
        # model.add(Dense(len(self.labels),
        #                 kernel_initializer='he_uniform',
        #                 activity_regularizer=l2(0.05),
        #                 bias_regularizer=l2(0.01),
        #                 kernel_regularizer=l2(0.01),
        #                 activation='softmax',
        #                 name='output'))
        model.add(GRU(units=len(self.label_encoder.classes_),
                      return_sequences=True,
                      kernel_initializer='glorot_uniform',
                      recurrent_initializer='orthogonal',
                      bias_initializer='zeros',
                      # kernel_regularizer=l2(0.2),
                      # bias_regularizer=l2(0.2),
                      # recurrent_regularizer=l2(0.2),
                      # activity_regularizer=l2(0.2),
                      recurrent_activation='hard_sigmoid',
                      activation='softmax',
                      name='output'))

        model.compile(loss='categorical_crossentropy',  # 'msle',
                      optimizer='Adam',  # 'RMSprop',  # opts.Adadelta(),
                      sample_weight_mode='temporal',
                      metrics=['accuracy'])

        model.summary()

        self.init_weights = model.get_weights()
        self.model = model