Python keras.regularizers.l1() Examples

The following are code examples for showing how to use keras.regularizers.l1(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input2'], 'l1': net['InnerProduct']}
        net['l0']['connection']['output'].append('l1')
        # Test 1
        inp = data(net['l0'], '', 'l0')['l0']
        temp = dense(net['l1'], [inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[2].__class__.__name__, 'Dense')
        # Test 2
        net['l1']['params']['weight_filler'] = 'glorot_normal'
        net['l1']['params']['bias_filler'] = 'glorot_normal'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = dense(net['l1'], [inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[2].__class__.__name__, 'Dense') 
Example 2
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['ReLU']}
        # Test 1
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        temp = activation(net['l1'], [inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation')
        # Test 2
        net['l1']['params']['negative_slope'] = 1
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        temp = activation(net['l1'], [inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'LeakyReLU') 
Example 3
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Deconvolution']}
        net['l0']['connection']['output'].append('l1')
        # Test 1
        inp = data(net['l0'], '', 'l0')['l0']
        temp = deconvolution(net['l1'], [inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[2].__class__.__name__, 'Conv2DTranspose')
        # Test 2
        net['l1']['params']['weight_filler'] = 'xavier'
        net['l1']['params']['bias_filler'] = 'xavier'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = deconvolution(net['l1'], [inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[2].__class__.__name__, 'Conv2DTranspose') 
Example 4
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input3'], 'l1': net['Embed']}
        net['l0']['connection']['output'].append('l1')
        # Test 1
        inp = data(net['l0'], '', 'l0')['l0']
        temp = embed(net['l1'], [inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Embedding')
        # Test 2
        net['l1']['params']['input_length'] = None
        net['l1']['params']['weight_filler'] = 'VarianceScaling'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = embed(net['l1'], [inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Embedding')


# ********** Merge Layers Test ********** 
Example 5
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 6 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['BatchNorm'], 'l2': net['Scale']}
        net['l0']['connection']['output'].append('l1')
        # Test 1
        inp = data(net['l0'], '', 'l0')['l0']
        temp = batch_norm(net['l1'], [inp], 'l1', 'l2', net['l2'])
        model = Model(inp, temp['l2'])
        self.assertEqual(model.layers[1].__class__.__name__, 'BatchNormalization')
        # Test 2
        net['l2']['params']['filler'] = 'VarianceScaling'
        net['l2']['params']['bias_filler'] = 'VarianceScaling'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = batch_norm(net['l1'], [inp], 'l1', 'l2', net['l2'])
        model = Model(inp, temp['l2'])
        self.assertEqual(model.layers[1].__class__.__name__, 'BatchNormalization')
        # Test 3
        inp = data(net['l0'], '', 'l0')['l0']
        temp = batch_norm(net['l1'], [inp], 'l1', 'l0', net['l0'])
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'BatchNormalization') 
Example 6
Project: keras-anomaly-detection   Author: chen0040   File: feedforward.py    MIT License 6 votes vote down vote up
def create_model(self, input_dim):
        encoding_dim = 14
        input_layer = Input(shape=(input_dim,))

        encoder = Dense(encoding_dim, activation="tanh",
                        activity_regularizer=regularizers.l1(10e-5))(input_layer)
        encoder = Dense(encoding_dim // 2, activation="relu")(encoder)

        decoder = Dense(encoding_dim // 2, activation='tanh')(encoder)
        decoder = Dense(input_dim, activation='relu')(decoder)

        model = Model(inputs=input_layer, outputs=decoder)
        model.compile(optimizer='adam',
                      loss='mean_squared_error',
                      metrics=['accuracy'])

        return model 
Example 7
Project: fy2015-replication   Author: educational-technology-collective   File: train_lstm.py    MIT License 6 votes vote down vote up
def droprate_lstm_l1reg_train(X, y, hidden_size=HIDDEN_SIZE, l1_lambda=0.01):
    """
    Construct a LSTM model with a single dropout layer after input later to predict the type I dropout rate (See paper) from features in every week.
    Fit the model with train data.
    :param X: a numpy array of features, has shape ( , n_week, n_feature)
    :param y: a numpy array of labels, has shape (N,1)
    :param hidden_size: an integer of hidden layer size.
    :return: model: a fitted LSTM model as keras.models.Sequential
    """
    model = Sequential()
    model.add(LSTM(hidden_size, input_shape=(X.shape[1], X.shape[2]),
                   kernel_regularizer=regularizers.l1(l1_lambda), activity_regularizer=regularizers.l1(l1_lambda)))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
    print(model.summary())
    model.fit(X, y, epochs=EPOCHS, batch_size=BATCH_SIZE)
    return model 
Example 8
Project: keras-training   Author: hls-fpga-machine-learning   File: models.py    GNU General Public License v3.0 6 votes vote down vote up
def dense_model(Inputs, nclasses, l1Reg=0, dropoutRate=0.25):
    """
    Dense matrix, defaults similar to 2016 DeepCSV training
    """
    x = Dense(200, activation='relu', kernel_initializer='lecun_uniform', name='fc1_relu', W_regularizer=l1(l1Reg))(Inputs)
    x = Dropout(dropoutRate)(x)
    x = Dense(200, activation='relu', kernel_initializer='lecun_uniform', name='fc2_relu', W_regularizer=l1(l1Reg))(x)
    x = Dropout(dropoutRate)(x)
    x = Dense(200, activation='relu', kernel_initializer='lecun_uniform', name='fc3_relu', W_regularizer=l1(l1Reg))(x)
    x = Dropout(dropoutRate)(x)
    x = Dense(200, activation='relu', kernel_initializer='lecun_uniform', name='fc4_relu', W_regularizer=l1(l1Reg))(x)
    x = Dropout(dropoutRate)(x)
    x = Dense(200, activation='relu', kernel_initializer='lecun_uniform', name='fc5_relu', W_regularizer=l1(l1Reg))(x)
    predictions = Dense(nclasses, activation='softmax', kernel_initializer='lecun_uniform', name = 'output_softmax')(x)
    model = Model(inputs=Inputs, outputs=predictions)
    return model 
Example 9
Project: keras-training   Author: hls-fpga-machine-learning   File: models.py    GNU General Public License v3.0 6 votes vote down vote up
def three_layer_model_constraint(Inputs, nclasses, l1Reg=0, h5fName=None):
    """
    Two hidden layers model
    """
    h5f = h5py.File(h5fName)
    x = Dense(64, activation='relu', kernel_initializer='lecun_uniform', 
              name='fc1_relu', W_regularizer=l1(l1Reg), 
              kernel_constraint = zero_some_weights(binary_tensor=h5f['fc1_relu'][()].tolist()))(Inputs)
    x = Dense(32, activation='relu', kernel_initializer='lecun_uniform', 
              name='fc2_relu', W_regularizer=l1(l1Reg), 
              kernel_constraint = zero_some_weights(binary_tensor=h5f['fc2_relu'][()].tolist()))(x)
    x = Dense(32, activation='relu', kernel_initializer='lecun_uniform', 
              name='fc3_relu', W_regularizer=l1(l1Reg), 
              kernel_constraint = zero_some_weights(binary_tensor=h5f['fc3_relu'][()].tolist()))(x)
    predictions = Dense(nclasses, activation='softmax', kernel_initializer='lecun_uniform', 
                        name='output_softmax', W_regularizer=l1(l1Reg), 
                        kernel_constraint = zero_some_weights(binary_tensor=h5f['output_softmax'][()].tolist()))(x)
    model = Model(inputs=Inputs, outputs=predictions)
    return model 
Example 10
Project: keras-training   Author: hls-fpga-machine-learning   File: models.py    GNU General Public License v3.0 6 votes vote down vote up
def three_layer_model_tanh_constraint(Inputs, nclasses, l1Reg=0, h5fName=None):
    """
    Two hidden layers model
    """
    h5f = h5py.File(h5fName)
    x = Dense(64, activation='tanh', kernel_initializer='lecun_uniform', 
              name='fc1_tanh', W_regularizer=l1(l1Reg), 
              kernel_constraint = zero_some_weights(binary_tensor=h5f['fc1_tanh'][()].tolist()))(Inputs)
    x = Dense(32, activation='tanh', kernel_initializer='lecun_uniform', 
              name='fc2_tanh', W_regularizer=l1(l1Reg), 
              kernel_constraint = zero_some_weights(binary_tensor=h5f['fc2_tanh'][()].tolist()))(x)
    x = Dense(32, activation='tanh', kernel_initializer='lecun_uniform', 
              name='fc3_tanh', W_regularizer=l1(l1Reg), 
              kernel_constraint = zero_some_weights(binary_tensor=h5f['fc3_tanh'][()].tolist()))(x)
    predictions = Dense(nclasses, activation='softmax', kernel_initializer='lecun_uniform', 
                        name='output_softmax', W_regularizer=l1(l1Reg), 
                        kernel_constraint = zero_some_weights(binary_tensor=h5f['output_softmax'][()].tolist()))(x)
    model = Model(inputs=Inputs, outputs=predictions)
    return model 
Example 11
Project: keras-training   Author: hls-fpga-machine-learning   File: models.py    GNU General Public License v3.0 6 votes vote down vote up
def conv1d_model(Inputs, nclasses, l1Reg=0):
    """
    Conv1D model, kernel size 4
    """
    x = Conv1D(filters=8, kernel_size=4, strides=1, padding='same',
               kernel_initializer='he_normal', use_bias=True, name='conv1_relu',
               activation = 'relu', W_regularizer=l1(l1Reg))(Inputs)
    x = Conv1D(filters=4, kernel_size=4, strides=2, padding='same',
               kernel_initializer='he_normal', use_bias=True, name='conv2_relu',
               activation = 'relu', W_regularizer=l1(l1Reg))(x)
    x = Conv1D(filters=2, kernel_size=4, strides=3, padding='same',
               kernel_initializer='he_normal', use_bias=True, name='conv3_relu',
               activation = 'relu', W_regularizer=l1(l1Reg))(x)
    x = Flatten()(x)
    x = Dense(32, activation='relu', kernel_initializer='lecun_uniform', 
              name='fc1_relu', W_regularizer=l1(l1Reg))(x)
    predictions = Dense(nclasses, activation='softmax', kernel_initializer='lecun_uniform', 
                        name='output_softmax', W_regularizer=l1(l1Reg))(x)
    model = Model(inputs=Inputs, outputs=predictions)
    print(model.summary())
    return model 
Example 12
Project: applications   Author: geomstats   File: core_test.py    MIT License 6 votes vote down vote up
def test_activity_regularization():
    layer = layers.ActivityRegularization(l1=0.01, l2=0.01)

    # test in functional API
    x = layers.Input(shape=(3,))
    z = layers.Dense(2)(x)
    y = layer(z)
    model = Model(x, y)
    model.compile('rmsprop', 'mse')

    model.predict(np.random.random((2, 3)))

    # test serialization
    model_config = model.get_config()
    model = Model.from_config(model_config)
    model.compile('rmsprop', 'mse') 
Example 13
Project: applications   Author: geomstats   File: recurrent_test.py    MIT License 6 votes vote down vote up
def test_regularizer(layer_class):
    layer = layer_class(units, return_sequences=False, weights=None,
                        input_shape=(timesteps, embedding_dim),
                        kernel_regularizer=regularizers.l1(0.01),
                        recurrent_regularizer=regularizers.l1(0.01),
                        bias_regularizer='l2')
    layer.build((None, None, embedding_dim))
    assert len(layer.losses) == 3
    assert len(layer.cell.losses) == 3

    layer = layer_class(units, return_sequences=False, weights=None,
                        input_shape=(timesteps, embedding_dim),
                        activity_regularizer='l2')
    assert layer.activity_regularizer
    x = K.variable(np.ones((num_samples, timesteps, embedding_dim)))
    layer(x)
    assert len(layer.cell.get_losses_for(x)) == 0
    assert len(layer.get_losses_for(x)) == 1 
Example 14
Project: VisualNN   Author: angelhunt   File: layers_export.py    GNU General Public License v3.0 5 votes vote down vote up
def regularization(layer, layer_in, layerId, tensor=True):
    l1 = layer['params']['l1']
    l2 = layer['params']['l2']
    out = {layerId: ActivityRegularization(l1=l1, l2=l2)}
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out 
Example 15
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        # Test 1
        img_input = Input((224, 224, 3))
        model = Conv2D(64, (3, 3), padding='same', dilation_rate=1, use_bias=True,
                       kernel_regularizer=regularizers.l1(), bias_regularizer='l1',
                       activity_regularizer='l1', kernel_constraint='max_norm',
                       bias_constraint='max_norm')(img_input)
        model = BatchNormalization(center=True, scale=True, beta_regularizer=regularizers.l2(0.01),
                                   gamma_regularizer=regularizers.l2(0.01),
                                   beta_constraint='max_norm', gamma_constraint='max_norm',)(model)
        model = Model(img_input, model)
        json_string = Model.to_json(model)
        with open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'w') as out:
            json.dump(json.loads(json_string), out, indent=4)
        sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'r')
        response = self.client.post(reverse('keras-import'), {'file': sample_file})
        response = json.loads(response.content)
        net = get_shapes(response['net'])
        response = self.client.post(reverse('keras-export'), {'net': json.dumps(net),
                                                              'net_name': ''})
        response = json.loads(response.content)
        self.assertEqual(response['result'], 'success')
        # Test 2
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'ide',
                                  'caffe_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['HDF5Data']}
        # Currently we can't determine shape of HDF5Data Layer
        response = self.client.post(reverse('keras-export'), {'net': json.dumps(net),
                                                              'net_name': ''})
        response = json.loads(response.content)
        self.assertEqual(response['result'], 'error')


# *********** Keras Backend Test ********** 
Example 16
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(ActivityRegularization(l1=2, input_shape=(10,)))
        model.build()
        self.keras_type_test(model, 0, 'Regularization') 
Example 17
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['ELU']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'ELU') 
Example 18
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['ThresholdedReLU']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'ThresholdedReLU') 
Example 19
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Sigmoid']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 20
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['TanH']}
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 21
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Softmax']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 22
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Softplus']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 23
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Softsign']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 24
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['HardSigmoid']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 25
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Linear']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = activation(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Activation') 
Example 26
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input3'], 'l1': net['Dropout']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = dropout(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Dropout') 
Example 27
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Reshape']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = reshape(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Reshape') 
Example 28
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input2'], 'l1': net['Permute']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = permute(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Permute') 
Example 29
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input3'], 'l1': net['RepeatVector']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = repeat_vector(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'RepeatVector') 
Example 30
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input3'], 'l1': net['Regularization']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = regularization(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'ActivityRegularization') 
Example 31
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input2'], 'l1': net['Masking']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = masking(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Masking')


# ********** Vision Layers Test ********** 
Example 32
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Upsample']}
        # Conv 1D
        net['l1']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l1']
        net['l3']['params']['layer_type'] = '1D'
        inp = data(net['l1'], '', 'l1')['l1']
        temp = upsample(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[1].__class__.__name__, 'UpSampling1D')
        # Conv 2D
        net['l0']['connection']['output'].append('l0')
        net['l3']['connection']['input'] = ['l0']
        net['l3']['params']['layer_type'] = '2D'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = upsample(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[1].__class__.__name__, 'UpSampling2D')
        # Conv 3D
        net['l2']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l2']
        net['l3']['params']['layer_type'] = '3D'
        inp = data(net['l2'], '', 'l2')['l2']
        temp = upsample(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[1].__class__.__name__, 'UpSampling3D') 
Example 33
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Pooling']}
        # Pool 1D
        net['l1']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l1']
        net['l3']['params']['layer_type'] = '1D'
        net['l3']['shape']['input'] = net['l1']['shape']['output']
        net['l3']['shape']['output'] = [12, 12]
        inp = data(net['l1'], '', 'l1')['l1']
        temp = pooling(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling1D')
        # Pool 2D
        net['l0']['connection']['output'].append('l0')
        net['l3']['connection']['input'] = ['l0']
        net['l3']['params']['layer_type'] = '2D'
        net['l3']['shape']['input'] = net['l0']['shape']['output']
        net['l3']['shape']['output'] = [3, 226, 226]
        inp = data(net['l0'], '', 'l0')['l0']
        temp = pooling(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling2D')
        # Pool 3D
        net['l2']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l2']
        net['l3']['params']['layer_type'] = '3D'
        net['l3']['shape']['input'] = net['l2']['shape']['output']
        net['l3']['shape']['output'] = [3, 226, 226, 18]
        inp = data(net['l2'], '', 'l2')['l2']
        temp = pooling(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling3D')


# ********** Locally-connected Layers ********** 
Example 34
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Input2'], 'l3': net['LocallyConnected']}
        # LocallyConnected 1D
        net['l1']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l1']
        net['l3']['params']['layer_type'] = '1D'
        inp = data(net['l1'], '', 'l1')['l1']
        temp = locally_connected(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[1].__class__.__name__, 'LocallyConnected1D')
        # LocallyConnected 2D
        net['l0']['connection']['output'].append('l0')
        net['l0']['shape']['output'] = [3, 10, 10]
        net['l3']['connection']['input'] = ['l0']
        net['l3']['params']['layer_type'] = '2D'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = locally_connected(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[1].__class__.__name__, 'LocallyConnected2D')


# ********** Recurrent Layers Test ********** 
Example 35
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input2'], 'l1': net['RNN']}
        net['l0']['connection']['output'].append('l1')
        # # net = get_shapes(net)
        inp = data(net['l0'], '', 'l0')['l0']
        net = recurrent(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'SimpleRNN') 
Example 36
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input2'], 'l1': net['GRU']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = recurrent(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'GRU')


# ********** Embed Layer Test ********* 
Example 37
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Eltwise']}
        net['l0']['connection']['output'].append('l1')
        # Test 1
        inp = data(net['l0'], '', 'l0')['l0']
        temp = eltwise(net['l1'], [inp, inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Multiply')
        # Test 2
        net['l1']['params']['layer_type'] = 'Sum'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = eltwise(net['l1'], [inp, inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Add')
        # Test 3
        net['l1']['params']['layer_type'] = 'Average'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = eltwise(net['l1'], [inp, inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Average')
        # Test 4
        net['l1']['params']['layer_type'] = 'Dot'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = eltwise(net['l1'], [inp, inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Dot')
        # Test 5
        net['l1']['params']['layer_type'] = 'Maximum'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = eltwise(net['l1'], [inp, inp], 'l1')
        model = Model(inp, temp['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Maximum') 
Example 38
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Concat']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = concat(net['l1'], [inp, inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'Concatenate')


# ********** Noise Layers Test ********** 
Example 39
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['GaussianNoise']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = gaussian_noise(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'GaussianNoise') 
Example 40
Project: VisualNN   Author: angelhunt   File: test_views.py    GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['AlphaDropout']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = alpha_dropout(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'AlphaDropout')


# ********** Normalisation Layers Test ********** 
Example 41
Project: CAPTCHA-breaking   Author: lllcho   File: test_regularizers.py    MIT License 5 votes vote down vote up
def test_W_reg(self):
        for reg in [regularizers.identity(), regularizers.l1(), regularizers.l2(), regularizers.l1l2()]:
            model = create_model(weight_reg=reg)
            model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
            model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
            model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0) 
Example 42
Project: 4Dsurvival   Author: UK-Digital-Heart-Project   File: trainDL.py    GNU General Public License v3.0 5 votes vote down vote up
def DL_single_run(xtr, ytr, units1, units2, dro, lr, l1r, alpha, batchsize, numepochs):
    #Data preparation: create X, E and TM where X=input vector, E=censoring status and T=survival time. Apply formatting (X and T as 'float32', E as 'int32')
    X_tr, E_tr, TM_tr = prepare_data(xtr, ytr[:,0,np.newaxis], ytr[:,1])

    #Arrange data into minibatches (based on specified batch size), and within each minibatch, sort in descending order of survival/censoring time (see explanation of Cox PH loss function definition)
    X_tr, E_tr, TM_tr, _ = sort4minibatches(X_tr, E_tr, TM_tr, batchsize)
    
    #before defining network architecture, clear current computation graph (if one exists), and specify input dimensionality
    K.clear_session()
    inpshape = xtr.shape[1]
    
    #Define Network Architecture
    inputvec= Input(shape=(inpshape,))
    x       = Dropout(dro, input_shape=(inpshape,))(inputvec)
    x       = Dense(units=int(units1), activation='relu', activity_regularizer=l1(l1r))(x)
    encoded = Dense(units=int(units2), activation='relu', name='encoded')(x)
    riskpred= Dense(units=1,  activation='linear', name='predicted_risk')(encoded)
    z       = Dense(units=int(units1),  activation='relu')(encoded)
    decoded = Dense(units=inpshape, activation='linear', name='decoded')(z)

    model = Model(inputs=inputvec, outputs=[decoded,riskpred])
    model.summary()
    
    #Model compilation
    optimdef = Adam(lr = lr)
    model.compile(loss=[keras.losses.mean_squared_error, _negative_log_likelihood], loss_weights=[alpha,1-alpha], optimizer=optimdef, metrics={'decoded':keras.metrics.mean_squared_error})
    
    #Run model
    mlog = model.fit(X_tr, [X_tr,E_tr], batch_size=batchsize, epochs=numepochs, shuffle=False, verbose=1)

    return mlog 
Example 43
Project: malware-prediction-rnn   Author: mprhode   File: RNN.py    Apache License 2.0 5 votes vote down vote up
def __generate_regulariser(self, l1_value, l2_value):
		""" Returns keras l1/l2 regulariser"""
		if l1_value and l2_value:
			return l1_l2(l1=l1_value, l2=l2_value)
		elif l1_value and not l2_value:
			return l1(l1_value)
		elif l2_value:
			return l2(l2_value)
		else:
			return None 
Example 44
Project: workspace_2017   Author: nwiizo   File: test_regularizers.py    MIT License 5 votes vote down vote up
def test_W_reg():
    (X_train, Y_train), (X_test, Y_test), test_ids = get_data()
    for reg in [regularizers.l1(),
                regularizers.l2(),
                regularizers.l1l2()]:
        model = create_model(weight_reg=reg)
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        assert len(model.losses) == 1
        model.fit(X_train, Y_train, batch_size=batch_size,
                  nb_epoch=nb_epoch, verbose=0)
        model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0) 
Example 45
Project: dream2016_dm   Author: lishen   File: dm_resnet.py    GNU General Public License v3.0 5 votes vote down vote up
def l1l2_penalty_reg(alpha=1.0, l1_ratio=0.5):
        '''Calculate L1 and L2 penalties for a Keras layer
        This follows the same formulation as in the R package glmnet and Sklearn
        Args:
            alpha ([float]): amount of regularization.
            l1_ratio ([float]): portion of L1 penalty. Setting to 1.0 equals 
                    Lasso.
        '''
        if l1_ratio == .0:
            return l2(alpha)
        elif l1_ratio == 1.:
            return l1(alpha)
        else:
            return l1l2(l1_ratio*alpha, 1./2*(1 - l1_ratio)*alpha) 
Example 46
Project: DeepPINK   Author: younglululu   File: run_withKnockoff_all.py    GNU General Public License v2.0 5 votes vote down vote up
def build_DNN(p, coeff=0):

    input = Input(name='input', shape=(p, 2));
    show_layer_info('Input', input);

    local1 = LocallyConnected1D(filterNum,1, use_bias=bias, kernel_initializer=Constant(value=0.1))(input);
    show_layer_info('LocallyConnected1D', local1);

    local2 = LocallyConnected1D(1,1, use_bias=bias, kernel_initializer='glorot_normal')(local1);
    show_layer_info('LocallyConnected1D', local2);

    flat = Flatten()(local2);
    show_layer_info('Flatten', flat);

    dense1 = Dense(p, activation=activation,use_bias=bias, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1(coeff))(flat);
    show_layer_info('Dense', dense1);

    dense2 = Dense(p, activation=activation, use_bias=bias, kernel_initializer='glorot_normal', kernel_regularizer=regularizers.l1(coeff))(dense1);
    show_layer_info('Dense', dense2);

    out_ = Dense(1, activation='sigmoid', kernel_initializer='glorot_normal')(dense2)
    show_layer_info('Dense', out_)

    model = Model(inputs=input, outputs=out_)
    # model.compile(loss='mse', optimizer='adam')
    model.compile(loss='binary_crossentropy', optimizer='adam')
    return model 
Example 47
Project: keras-training   Author: hls-fpga-machine-learning   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def two_layer_model(Inputs, nclasses, l1Reg=0):
    """
    One hidden layer model
    """
    x = Dense(32, activation='relu', kernel_initializer='lecun_uniform', 
              name='fc1_relu', W_regularizer=l1(l1Reg))(Inputs)
    predictions = Dense(nclasses, activation='sigmoid', kernel_initializer='lecun_uniform', 
                        name = 'output_sigmoid', W_regularizer=l1(l1Reg))(x)
    model = Model(inputs=Inputs, outputs=predictions)
    return model 
Example 48
Project: keras-training   Author: hls-fpga-machine-learning   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def two_layer_model_constraint(Inputs, nclasses, l1Reg=0, h5fName=None):
    """
    One hidden layer model
    """
    x = Dense(32, activation='relu', kernel_initializer='lecun_uniform', 
              name='fc1_relu', W_regularizer=l1(l1Reg), 
              kernel_constraint = zero_some_weights(binary_tensor=h5f['fc1_relu'][()].tolist()))(Inputs)
    predictions = Dense(nclasses, activation='sigmoid', kernel_initializer='lecun_uniform', 
                        name = 'output_sigmoid', W_regularizer=l1(l1Reg), 
                        kernel_constraint = zero_some_weights(binary_tensor=h5f['output_softmax'][()].tolist()))(x)
    model = Model(inputs=Inputs, outputs=predictions)
    return model 
Example 49
Project: keras-training   Author: hls-fpga-machine-learning   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def three_layer_model(Inputs, nclasses, l1Reg=0):
    """
    Two hidden layers model
    """
    x = Dense(64, activation='relu', kernel_initializer='lecun_uniform', 
              name='fc1_relu', W_regularizer=l1(l1Reg))(Inputs)
    x = Dense(32, activation='relu', kernel_initializer='lecun_uniform', 
              name='fc2_relu', W_regularizer=l1(l1Reg))(x)
    x = Dense(32, activation='relu', kernel_initializer='lecun_uniform', 
              name='fc3_relu', W_regularizer=l1(l1Reg))(x)
    predictions = Dense(nclasses, activation='softmax', kernel_initializer='lecun_uniform', 
                        name='output_softmax', W_regularizer=l1(l1Reg))(x)
    model = Model(inputs=Inputs, outputs=predictions)
    return model 
Example 50
Project: keras-training   Author: hls-fpga-machine-learning   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def three_layer_model_batch_norm(Inputs, nclasses, l1Reg=0):
    """
    Two hidden layers model
    """
    x = Dense(64, kernel_initializer='lecun_uniform', 
              name='fc1_relu', W_regularizer=l1(l1Reg))(Inputs)
    x = BatchNormalization(epsilon=1e-6, momentum=0.9, name='bn1')(x)
    x = Activation(activation='relu', name='relu1')(x)
              
    x = Dense(32, kernel_initializer='lecun_uniform', 
              name='fc2_relu', W_regularizer=l1(l1Reg))(x)
    x = BatchNormalization(epsilon=1e-6, momentum=0.9, name='bn2')(x)
    x = Activation(activation='relu', name='relu2')(x)
    
    x = Dense(32, kernel_initializer='lecun_uniform', 
              name='fc3_relu', W_regularizer=l1(l1Reg))(x)
    x = BatchNormalization(epsilon=1e-6, momentum=0.9, name='bn3')(x)
    x = Activation(activation='relu', name='relu3')(x)
    
    x = Dense(nclasses, kernel_initializer='lecun_uniform', 
                        name='output_softmax', W_regularizer=l1(l1Reg))(x)
    x = BatchNormalization(epsilon=1e-6, momentum=0.9, name='bn4')(x)
    predictions = Activation(activation='softmax', name='softmax')(x)

    model = Model(inputs=Inputs, outputs=predictions)
    return model 
Example 51
Project: keras-training   Author: hls-fpga-machine-learning   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def conv1d_model_constraint(Inputs, nclasses, l1Reg=0, h5fName=None):
    """
    Conv1D model, kernel size 4
    """
    h5f = h5py.File(h5fName)
    x = Conv1D(filters=8, kernel_size=4, strides=1, padding='same',
               kernel_initializer='he_normal', use_bias=True, name='conv1_relu',
               activation = 'relu', W_regularizer=l1(l1Reg), 
               kernel_constraint = zero_some_weights(binary_tensor=h5f['conv1_relu'][()].tolist()))(Inputs)
    x = Conv1D(filters=8, kernel_size=4, strides=1, padding='same',
               kernel_initializer='he_normal', use_bias=True, name='conv2_relu',
               activation = 'relu', W_regularizer=l1(l1Reg), 
               kernel_constraint = zero_some_weights(binary_tensor=h5f['conv2_relu'][()].tolist()))(x)
    x = Conv1D(filters=8, kernel_size=4, strides=1, padding='same',
               kernel_initializer='he_normal', use_bias=True, name='conv3_relu',
               activation = 'relu', W_regularizer=l1(l1Reg), 
               kernel_constraint = zero_some_weights(binary_tensor=h5f['conv3_relu'][()].tolist()))(x)
    x = Flatten()(x)
    x = Dense(32, activation='relu', kernel_initializer='lecun_uniform', 
              name='fc1_relu', W_regularizer=l1(l1Reg), 
              kernel_constraint = zero_some_weights(binary_tensor=h5f['fc1_relu'][()].tolist()))(x)
    predictions = Dense(nclasses, activation='softmax', kernel_initializer='lecun_uniform', 
                        name='output_softmax', W_regularizer=l1(l1Reg), 
                        kernel_constraint = zero_some_weights(binary_tensor=h5f['output_softmax'][()].tolist()))(x)
    model = Model(inputs=Inputs, outputs=predictions)
    print(model.summary())
    return model 
Example 52
Project: keras-training   Author: hls-fpga-machine-learning   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def lstm_model(Inputs, nclasses, l1Reg=0,l1RegR=0):
    """
    Basic LSTM model
    """
    x = LSTM(16,return_sequences=False,  kernel_regularizer=l1(l1Reg),recurrent_regularizer=l1(l1RegR),activation='relu',kernel_initializer='lecun_uniform',name='lstm_lstm')(Inputs)
    #x = Flatten()(x)
    x = Dropout(0.1)(x)
    predictions = Dense(nclasses, activation='softmax', kernel_initializer='lecun_uniform', name='rnn_densef')(x)
    model = Model(inputs=Inputs, outputs=predictions)
    print(model.summary())
    return model 
Example 53
Project: keras-training   Author: hls-fpga-machine-learning   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def gru_model(Inputs, nclasses, l1Reg=0,l1RegR=0):
    """                                                                                                                                                                                                                                                                         
    Basic GRU model                                                                                                                                                                                                                                                             
    """
    x = GRU(20,kernel_regularizer=l1(l1Reg),recurrent_regularizer=l1(l1RegR),activation='relu', recurrent_activation='sigmoid', name='gru_selu',)(Inputs)
    #x = GRU(20,kernel_regularizer=l1(l1Reg),recurrent_regularizer=l1(l1RegR),activation='selu', recurrent_activation='hard_sigmoid', name='gru_selu',)(Inputs)                                                                                                                
    x = Dense(20,kernel_regularizer=l1(l1Reg),activation='relu', kernel_initializer='lecun_uniform', name='dense_relu')(x)
    x = Dropout(0.1)(x)
    predictions = Dense(nclasses, activation='softmax', kernel_initializer='lecun_uniform', name='rnn_densef')(x)
    model = Model(inputs=Inputs, outputs=predictions)
    print(model.summary())
    return model 
Example 54
Project: keras-training   Author: hls-fpga-machine-learning   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def gru_model_constraint(Inputs, nclasses, l1Reg=0,l1RegR=0,h5fName=None):
    """                                                                                                                                                                                                                                                                         
    Basic GRU  model                                                                                                                                                                                                                                                            
    """
    h5f = h5py.File(h5fName)
    x = GRU(20,kernel_regularizer=l1(l1Reg),recurrent_regularizer=l1(l1RegR),activation='selu',recurrent_activation='hard_sigmoid',name='gru_selu',recurrent_constraint = zero_some_weights(binary_tensor=h5f['gru_selu'][()].tolist()))(Inputs)
    x = Dense(20,kernel_regularizer=l1(l1Reg),activation='relu', kernel_initializer='lecun_uniform',kernel_constraint = zero_some_weights(binary_tensor=h5f['dense_relu'][()].tolist()), name='dense_relu')(x)
    x = Dropout(0.1)(x)
    predictions = Dense(nclasses, activation='softmax', kernel_initializer='lecun_uniform', kernel_constraint = zero_some_weights(binary_tensor=h5f['rnn_densef'][()].tolist()), name='rnn_densef')(x)
    model = Model(inputs=Inputs, outputs=predictions)
    print(model.summary())
    return model 
Example 55
Project: applications   Author: geomstats   File: regularizers_test.py    MIT License 5 votes vote down vote up
def test_kernel_regularization():
    x_train, y_train = get_data()
    for reg in [regularizers.l1(),
                regularizers.l2(),
                regularizers.l1_l2()]:
        model = create_model(kernel_regularizer=reg)
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        assert len(model.losses) == 1
        model.train_on_batch(x_train, y_train) 
Example 56
Project: applications   Author: geomstats   File: regularizers_test.py    MIT License 5 votes vote down vote up
def test_activity_regularization():
    x_train, y_train = get_data()
    for reg in [regularizers.l1(), regularizers.l2()]:
        model = create_model(activity_regularizer=reg)
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        assert len(model.losses) == 1
        model.train_on_batch(x_train, y_train) 
Example 57
Project: applications   Author: geomstats   File: regularizers_test.py    MIT License 5 votes vote down vote up
def test_regularization_shared_layer():
    dense_layer = Dense(num_classes,
                        kernel_regularizer=regularizers.l1(),
                        activity_regularizer=regularizers.l1())

    model = create_multi_input_model_from(dense_layer, dense_layer)
    model.compile(loss='categorical_crossentropy', optimizer='sgd')
    assert len(model.losses) == 6 
Example 58
Project: applications   Author: geomstats   File: regularizers_test.py    MIT License 5 votes vote down vote up
def test_regularization_shared_model():
    dense_layer = Dense(num_classes,
                        kernel_regularizer=regularizers.l1(),
                        activity_regularizer=regularizers.l1())

    input_tensor = Input(shape=(data_dim,))
    dummy_model = Model(input_tensor, dense_layer(input_tensor))

    model = create_multi_input_model_from(dummy_model, dummy_model)
    model.compile(loss='categorical_crossentropy', optimizer='sgd')
    assert len(model.losses) == 6 
Example 59
Project: applications   Author: geomstats   File: regularizers_test.py    MIT License 5 votes vote down vote up
def test_regularization_shared_layer_in_different_models():
    shared_dense = Dense(num_classes,
                         kernel_regularizer=regularizers.l1(),
                         activity_regularizer=regularizers.l1())
    models = []
    for _ in range(2):
        input_tensor = Input(shape=(data_dim,))
        unshared_dense = Dense(num_classes, kernel_regularizer=regularizers.l1())
        out = unshared_dense(shared_dense(input_tensor))
        models.append(Model(input_tensor, out))

    model = create_multi_input_model_from(*models)
    model.compile(loss='categorical_crossentropy', optimizer='sgd')
    assert len(model.losses) == 8 
Example 60
Project: applications   Author: geomstats   File: layers_test.py    MIT License 5 votes vote down vote up
def test_maxout_dense():
    layer_test(legacy_layers.MaxoutDense,
               kwargs={'output_dim': 3},
               input_shape=(3, 2))

    layer_test(legacy_layers.MaxoutDense,
               kwargs={'output_dim': 3,
                       'W_regularizer': regularizers.l2(0.01),
                       'b_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.l2(0.01),
                       'W_constraint': constraints.MaxNorm(1),
                       'b_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2)) 
Example 61
Project: applications   Author: geomstats   File: core_test.py    MIT License 5 votes vote down vote up
def test_dense():
    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(3, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(3, 4, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(None, None, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(3, 4, 5, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3,
                       'kernel_regularizer': regularizers.l2(0.01),
                       'bias_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.L1L2(l1=0.01, l2=0.01),
                       'kernel_constraint': constraints.MaxNorm(1),
                       'bias_constraint': constraints.max_norm(1)},
               input_shape=(3, 2))

    layer = layers.Dense(3,
                         kernel_regularizer=regularizers.l1(0.01),
                         bias_regularizer='l1')
    layer.build((None, 4))
    assert len(layer.losses) == 2 
Example 62
Project: applications   Author: geomstats   File: recurrent_test.py    MIT License 5 votes vote down vote up
def test_from_config(layer_class):
    stateful_flags = (False, True)
    for stateful in stateful_flags:
        l1 = layer_class(units=1, stateful=stateful)
        l2 = layer_class.from_config(l1.get_config())
        assert l1.get_config() == l2.get_config() 
Example 63
Project: toxic_comments   Author: Donskov7   File: models.py    MIT License 5 votes vote down vote up
def _get_regularizer(regularizer_name, weight):
    if regularizer_name is None:
        return None
    if regularizer_name == 'l1':
        return l1(weight)
    if regularizer_name == 'l2':
        return l2(weight)
    if regularizer_name == 'l1_l2':
        return l1_l2(weight)
    return None 
Example 64
Project: Advanced-ML-techniques   Author: AlexGidiotis   File: multilabel.py    MIT License 5 votes vote down vote up
def build_model(num_features,
	num_classes,
	embedding_dims,
	maxlen):
	"""
	"""

	input_layer = Input(shape=(maxlen,),
		dtype='int32')


	embeddings = Embedding(num_features,
		embedding_dims,
		input_length=maxlen,
		embeddings_regularizer=regularizers.l1(7e-7))(input_layer)

	avg_layer = GlobalAveragePooling1D()(embeddings)
	predictions = Dense(num_classes, activation='sigmoid')(avg_layer)

	model = Model(inputs=input_layer, outputs=predictions)
	model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=[f1_score])

	model.summary()

	return model 
Example 65
Project: Advanced-ML-techniques   Author: AlexGidiotis   File: hierarchical.py    MIT License 5 votes vote down vote up
def build_model(num_features,
	num_classes,
	embedding_dims,
	maxlen,
	max_sentence_len):
	"""
	"""

	input_layer = Input(shape=(maxlen,max_sentence_len,),
		dtype='int32')
	sentence_input = Input(shape=(max_sentence_len,),
		dtype='int32')

	embeddings = Embedding(num_features,
		embedding_dims,
		input_length=max_sentence_len,
		embeddings_regularizer=regularizers.l1(1e-6))(sentence_input)

	avg_layer = GlobalAveragePooling1D()(embeddings)
	sentEncoder = Model(inputs=sentence_input,
		outputs=avg_layer)
	sentEncoder.summary()
	textEncoder = TimeDistributed(sentEncoder)(input_layer)

	global_avg_layer = Flatten()(textEncoder)

	global_avg_layer = Dropout(0.5)(global_avg_layer)
	predictions = Dense(num_classes, 
		activation='sigmoid',
		kernel_regularizer=regularizers.l1(1e-5))(global_avg_layer)

	model = Model(inputs=input_layer,
		outputs=predictions)
	model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=[f1_score])

	model.summary()

	return model 
Example 66
Project: keras-contrib   Author: keras-team   File: test_core.py    MIT License 5 votes vote down vote up
def test_cosinedense_reg_constraint():
    layer_test(core.CosineDense,
               kwargs={'units': 3,
                       'kernel_regularizer': regularizers.l2(0.01),
                       'bias_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.l2(0.01),
                       'kernel_constraint': constraints.MaxNorm(1),
                       'bias_constraint': constraints.MaxNorm(1)},
               input_shape=(3, 2)) 
Example 67
Project: plastering   Author: plastering   File: ir2tagsets.py    MIT License 5 votes vote down vote up
def get_mlp_model(self, data_dim, output_classes):
        model = Sequential()
        model.add(Dense(64,
                        input_shape=(data_dim,),
                        #bias_regularizer=regularizers.l1(0.0001),
                        #kernel_regularizer=regularizers.l1(0.001),
                        #activity_regularizer=regularizers.l1(0.001),
                        #kernel_constraint=max_norm(3),
                        activation='relu'))
        """
        model.add(Dropout(0.1))
        model.add(Dense(64,
                        input_shape=(data_dim,),
                        #bias_regularizer=regularizers.l1(0.0001),
                        #kernel_regularizer=regularizers.l1(0.001),
                        #activity_regularizer=regularizers.l1(0.001),
                        kernel_constraint=max_norm(3),
                        activation='relu'))
        """
        model.add(Dropout(0.1))
        model.add(Dense(output_classes,
                        #bias_regularizer=regularizers.l1(0.0001),
                        #kernel_regularizer=regularizers.l1(0.0001),
                        #activity_regularizer=regularizers.l2(0.01),
                        #kernel_constraint=max_norm(3),
                        activation='sigmoid'))
        #model.compile(optimizer='sgd',
        model.compile(optimizer='rmsprop',
                      loss='binary_crossentropy',
                      )
        return model 
Example 68
Project: allen-ai-science-qa   Author: arranger1044   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def get_regularizer(lambda_l1=None, lambda_l2=None):
    regularizer = None
    if lambda_l1 is None and lambda_l2 is not None:
        regularizer = l2(l=lambda_l2)
    elif lambda_l1 is not None and lambda_l2 is None:
        regularizer = l1(l=lambda_l1)
    elif lambda_l1 is not None and lambda_l2 is not None:
        regularizer = l1l2(l1=lambda_l1, l2=lambda_l2)
    return regularizer 
Example 69
Project: deep_learning   Author: jarvisqi   File: face.py    MIT License 5 votes vote down vote up
def build_model():

    model = Sequential()
    model.add(Conv2D(32, kernel_size=(3, 3), padding='valid',
                     input_shape=(image_row, image_row, 3), activation='relu'))
    #  kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.001)))
    model.add(Conv2D(42, kernel_size=(3, 3), activation='relu'))
    #  kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.001)))
    # 池化层往往在卷积层后面,通过池化来降低卷积层输出的特征向量,同时改善结果(不易出现过拟合)。
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    #                 # kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.001)))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes, activation='softmax'))

    print('编译模型...')
    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy',
                  metrics=['accuracy'])

    print(model.summary())

    return model 
Example 70
Project: ae-review-resources   Author: fdavidcl   File: autoencoder.py    Mozilla Public License 2.0 5 votes vote down vote up
def build(self):
        # "encoded" is the encoded representation of the input
        activity_reg = KLregularizer(-0.7, 0.2) if self.sparse else regularizers.l1(0.)
        kernel_reg = regularizers.l2(0.02) if self.weight_decay else regularizers.l1(0.)
        
        # this is our input placeholder
        input_img = Input(shape=(self.input_dim,))
        
        encoded = Dense(self.encoding_dim, activation=self.activation
                        , activity_regularizer = activity_reg
                        , kernel_regularizer = kernel_reg
                        , name = "encoded")(input_img)

        # "decoded" is the lossy reconstruction of the input
        decoded = Dense(self.input_dim, activation=('linear' if self.activation == "linear" else 'sigmoid')
                        , name = "decoded")(encoded)

        # this model maps an input to its reconstruction
        self.model = Model(input_img, decoded)

        # this model maps an input to its encoded representation
        self.encoder = Model(input_img, encoded)

        # create a placeholder for an encoded (32-dimensional) input
        encoded_input = Input(shape=(self.encoding_dim,))
        # retrieve the last layer of the autoencoder model
        decoder_layer = self.model.layers[-1]
        # create the decoder model
        self.decoder = Model(encoded_input, decoder_layer(encoded_input)) 
Example 71
Project: jamespy_py3   Author: jskDr   File: kkeras_util.py    MIT License 5 votes vote down vote up
def __init__(self, X_shape_1, alpha):
		in_model = Input(shape=(X_shape_1,))
		out_model = Dense(1, activation='linear', W_regularizer=l1(alpha))(in_model)
		
		super().__init__(input = in_model, output=out_model)
		
		self.compile(optimizer='adadelta', loss='mse', metrics=['accuracy']) 
Example 72
Project: jamespy_py3   Author: jskDr   File: kkeras_util.py    MIT License 5 votes vote down vote up
def __init__(self, X_shape_1, n_h_nodes, alpha):
		in_model = Input(shape=(X_shape_1,))
		hidden_l = Dense(n_h_nodes, activation='relu', W_regularizer=l1(alpha))(in_model)
		out_model = Dense(1, activation='linear', W_regularizer=l1(alpha))(hidden_l)
		
		super().__init__(input = in_model, output=out_model)
		
		self.compile(optimizer='adadelta', loss='mse', metrics=['accuracy']) 
Example 73
Project: jamespy_py3   Author: jskDr   File: kkeras_util.py    MIT License 5 votes vote down vote up
def __init__(self, X_shape_1, alpha):
		in_model = Input(shape=(X_shape_1,))
		out_model = Dense(1, activation='linear', W_regularizer=l1(alpha))(in_model)
		
		super().__init__(input = in_model, output=out_model)
		
		self.compile(optimizer='adadelta', loss='mse', metrics=['accuracy']) 
Example 74
Project: jamespy_py3   Author: jskDr   File: kkeras_util.py    MIT License 5 votes vote down vote up
def __init__(self, X_shape_1, n_h_nodes, alpha):
		in_model = Input(shape=(X_shape_1,))
		hidden_l = Dense(n_h_nodes, activation='relu', W_regularizer=l1(alpha))(in_model)
		out_model = Dense(1, activation='linear', W_regularizer=l1(alpha))(hidden_l)
		
		super().__init__(input = in_model, output=out_model)
		
		self.compile(optimizer='adadelta', loss='mse', metrics=['accuracy']) 
Example 75
Project: deep-learning-keras   Author: arnaudvl   File: dnn.py    MIT License 4 votes vote down vote up
def init_model(self):
        """
        Set up and compile deep neural net architecture.
        """
        self._set_architecture_type()
        self._get_default_values_architecture()
        ilayer = 0
        model_dnn = Sequential()
        
        if self.X_train is not None:
            input_shape = self.X_train.shape[1]
        else:
            input_shape = self.X.shape[1]
        
        # set input layer
        model_dnn.add(Dense(self.architecture[ilayer]['hidden_units'],
                            kernel_initializer=self.architecture[ilayer]['kernel_initializer'],
                            bias_initializer=self.architecture[ilayer]['bias_initializer'],
                            kernel_regularizer=l1(self.architecture[ilayer]['kernel_regularizer_l1']),
                            input_dim=input_shape))
        if self.architecture[ilayer]['batchnorm']:
            model_dnn.add(BatchNormalization())
        model_dnn.add(Activation(self.architecture[ilayer]['activation']))
        model_dnn.add(Dropout(self.architecture[ilayer]['dropout']))
        
        # add hidden layers
        while ilayer<len(self.architecture)-1:
            ilayer+=1
            
            model_dnn.add(Dense(self.architecture[ilayer]['hidden_units'],
                                kernel_initializer=self.architecture[ilayer]['kernel_initializer'],
                                bias_initializer=self.architecture[ilayer]['bias_initializer'],
                                kernel_regularizer=l1(self.architecture[ilayer]['kernel_regularizer_l1'])))
            if self.architecture[ilayer]['batchnorm']:
                model_dnn.add(BatchNormalization())
            model_dnn.add(Activation(self.architecture[ilayer]['activation']))
            model_dnn.add(Dropout(self.architecture[ilayer]['dropout']))
        
        # add output layer
        n_classes = self._num_classes()
        if self.output_layer=='sigmoid':
            if n_classes==2:
                model_dnn.add(Dense(1,activation=self.output_layer)) # output layer
            else:
                raise ValueError('sigmoid output layer suitable for binary classification, but %i classes detected.' \
                                    %(n_classes))
        else:
            model_dnn.add(Dense(n_classes,activation=self.output_layer)) # output layer
        
        # compile model
        model_dnn.compile(optimizer=self._get_optimizer(),metrics=self.metrics,loss=self.loss_function)
        
        if not self._print_summary:
            model_dnn.summary() # display model architecture
            self._print_summary = True
        
        return model_dnn 
Example 76
Project: WordNetEmbeddings   Author: nlx-group   File: vector_generator.py    MIT License 4 votes vote down vote up
def nn_dimensionality_reduction(vec_dim, emb_matrix, mode):
    epochs = 2
    batch_size = 100

    inp = Input(shape=(len(emb_matrix),))
    encoded = Dense(vec_dim, activation='relu', use_bias=True, activity_regularizer=regularizers.l1(10e-5))(inp)  # encoded representation of the input
    # encoded = Dense(vec_dim, activation='tanh', use_bias=False, activity_regularizer=regularizers.l1(10e-5))(inp)  # encoded representation of the input
    # encoded = Dense(vec_dim, activation='relu', use_bias=False,)(inp)                              # encoded representation of the input
    decoded = Dense(len(emb_matrix), activation='sigmoid', trainable=True)(encoded)  # lossy reconstruction of the input

    model = Model(inp, decoded)  # maps an input to its reconstruction
    model.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['acc'])
    model.summary()

    encoder = Model(inp, encoded)  # To access the mid-layers

    if mode == "1Hot":
        model.fit(np.identity(len(emb_matrix)), emb_matrix, batch_size=batch_size, epochs=epochs, verbose=1,
                  callbacks=None,
                  validation_split=0.0, validation_data=None, shuffle=True,
                  class_weight=None, sample_weight=None, initial_epoch=0)
        encoded_inp = encoder.predict(np.identity(len(emb_matrix)))

    elif mode == "encoder":
        model.fit(emb_matrix, emb_matrix, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=None,
                  validation_split=0.0, validation_data=None, shuffle=True,
                  class_weight=None, sample_weight=None, initial_epoch=0)
        encoded_inp = encoder.predict(emb_matrix)

    #To save the model
    model.save(os.getcwd() + '/data/output/model_' + mode)
    print("    The model is saved...")

    # To save layer1 output
    array_writer(encoded_inp, "layer1_output", "bin")

    # To save the weights (no bias)
    print("    Extracting the weights ...")
    weights = []
    biases = []
    for i in range(1, len(model.layers)):  # layer_1(input) weights/biases are 0, so no need to save them
        layer = model.layers[i]
        temp = np.array(layer.get_weights()[0], dtype=np.float16)  # weights
        weights.append(temp)
        if len(layer.get_weights())>1:
            temp = np.array(layer.get_weights()[1], dtype=np.float16)  # biases
        else:
            temp = []
        biases.append(temp)

    array_writer(weights, "weights_" + mode, "bin")
    array_writer(biases, "biases_" + mode, "bin")

    return (weights[0]) 
Example 77
Project: RPGOne   Author: RTHMaK   File: adaptive_recurrence.py    Apache License 2.0 4 votes vote down vote up
def call(self, inputs, mask=None):
        encoded_question, current_memory, encoded_knowledge = inputs
        # We need to create a tensor which doesn't have the encoding_dim dimension. So that this Layer is
        # independent of the dimension of the input tensors, we just sum over the last dimension to remove it.
        # We only use this to create variables, nothing else.
        memory_cell = K.sum(current_memory, -1)
        # This is a boolean mask, holding whether a particular sample has halted.
        batch_mask = tf.cast(tf.ones_like(memory_cell, name='batch_mask'), tf.bool)
        # This counts the number of memory steps per sample.
        hop_counter = tf.zeros_like(memory_cell, name='hop_counter')
        # This accumulates the halting probabilities.
        halting_accumulator = tf.zeros_like(memory_cell, name='halting_accumulator')
        # This also accumulates the halting probabilities, with the difference being that if an
        # outputed probability causes a particular sample to go over 1 - epsilon, this accumulates
        # that value, but the halting_accumulator does not. This variable is _only_ used in the
        # halting condition of the loop.
        halting_accumulator_for_comparison = tf.zeros_like(memory_cell,
                                                           name='halting_acc_for_comparision')
        # This accumulates the weighted memory vectors at each memory step. The memory is weighted by the
        # halting probability and added to this accumulator.
        memory_accumulator = tf.zeros_like(current_memory, name='memory_accumulator')
        # We need the attended_knowledge from the last memory network step, so we create a dummy variable to
        # input to the while_loop, as tensorflow requires the input signature to match the output signature.
        attended_knowledge_loop_placeholder = tf.zeros_like(current_memory, name='attended_knowledge_placeholder')

        # Add the ponder cost variable as a regulariser to the loss function.
        ponder_cost = l1(self.ponder_cost_strength)
        self.add_loss(ponder_cost(hop_counter))
        # This actually does the computation of self.adaptive_memory_hop,
        # checking the condition at every step to see if it should stop.

        # The while loop has to produce as many variables as it has inputs - we only need the last two.
        *_, current_memory, attended_knowledge = \
            tf.while_loop(cond=self.halting_condition, body=self.adaptive_memory_hop,
                          loop_vars=[batch_mask,
                                     halting_accumulator,
                                     halting_accumulator_for_comparison,
                                     hop_counter,
                                     encoded_question,
                                     current_memory,
                                     encoded_knowledge,
                                     memory_accumulator,
                                     attended_knowledge_loop_placeholder
                                    ])

        return [current_memory, attended_knowledge] 
Example 78
Project: aitom   Author: xulabs   File: auto_classifier_model.py    GNU General Public License v3.0 4 votes vote down vote up
def auto_classifier_model(img_shape, encoding_dim=128, NUM_CHANNELS=1, num_of_class=2):

    input_shape = (None, img_shape[0], img_shape[1], img_shape[2], NUM_CHANNELS)
    mask_shape = (None, num_of_class)

    # use relu activation for hidden layer to guarantee non-negative outputs are passed to the max pooling layer. In such case, as long as the output layer is linear activation, the network can still accomodate negative image intendities, just matter of shift back using the bias term
    input_img = Input(shape=input_shape[1:])
    mask = Input(shape=mask_shape[1:])
    x = input_img

    x = conv_block(x, 32, 3, 3, 3)
    x = MaxPooling3D((2, 2, 2), padding ='same')(x)

    x = conv_block(x, 32, 3, 3, 3)
    x = MaxPooling3D((2, 2, 2), padding ='same')(x)

    encoder_conv_shape = [_.value for _ in  x.get_shape()]          # x.get_shape() returns a list of tensorflow.python.framework.tensor_shape.Dimension objects
    x = Flatten()(x)
    encoded = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(10e-5))(x)
    encoder = Model(inputs=input_img, outputs=encoded)

    x = BatchNormalization()(x)
    x = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(10e-5))(x)
    x = Dense(128, activation = 'relu')(x)
    x = Dense(num_of_class, activation = 'softmax')(x)
    
    prob = x
    # classifier output
    classifier = Model(inputs=input_img, outputs=prob)

    input_img_decoder = Input(shape=encoder.output_shape[1:])
    x = input_img_decoder
    x = Dense(np.prod(encoder_conv_shape[1:]), activation='relu')(x)
    x = Reshape(encoder_conv_shape[1:])(x)

    x = UpSampling3D((2, 2, 2))(x)
    x = conv_block(x, 32, 3, 3, 3)

    x = UpSampling3D((2, 2, 2))(x)
    x = conv_block(x, 32, 3, 3, 3)
    x = Convolution3D(1, (3, 3, 3), activation='linear', padding ='same')(x)

    decoded = x
    # autoencoder output
    decoder = Model(inputs=input_img_decoder, outputs=decoded)

    
    autoencoder = Sequential()
    for l in encoder.layers:    
        autoencoder.add(l)
    last = None
    for l in decoder.layers:
        last = l    
        autoencoder.add(l)

    decoded = autoencoder(input_img)


    auto_classifier = Model(inputs=input_img, outputs=[decoded, prob])
    auto_classifier.summary()
    return auto_classifier 
Example 79
Project: deep_intent   Author: AutonomyLab   File: recurrent_encoder_classifier.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def classifier_model():
    inputs = Input(shape=(10, 128, 128, 3))
    conv_1 = ConvLSTM2D(filters=32,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding="same",
                        return_sequences=True,
                        recurrent_dropout=0.5,
                        kernel_regularizer=regularizers.l1(0.001))(inputs)
    conv_1 = TimeDistributed(LeakyReLU(alpha=0.2))(conv_1)
    conv_1 = TimeDistributed(Dropout(0.5))(conv_1)

    conv_2 = ConvLSTM2D(filters=64,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding="same",
                        return_sequences=True,
                        recurrent_dropout=0.5,
                        kernel_regularizer=regularizers.l1(0.001))(conv_1)
    conv_2 = TimeDistributed(BatchNormalization())(conv_2)
    conv_2 = TimeDistributed(LeakyReLU(alpha=0.2))(conv_2)
    conv_2 = TimeDistributed(Dropout(0.5))(conv_2)

    conv_3 = ConvLSTM2D(filters=128,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding="same",
                        return_sequences=True,
                        recurrent_dropout=0.5,
                        kernel_regularizer=regularizers.l1(0.001))(conv_2)
    conv_3 = TimeDistributed(BatchNormalization())(conv_3)
    conv_3 = TimeDistributed(LeakyReLU(alpha=0.2))(conv_3)
    conv_3 = TimeDistributed(Dropout(0.5))(conv_3)

    conv_4 = ConvLSTM2D(filters=256,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding="same",
                        return_sequences=True,
                        recurrent_dropout=0.5,
                        kernel_regularizer=regularizers.l1(0.001))(conv_3)
    conv_4 = TimeDistributed(BatchNormalization())(conv_4)
    conv_4 = TimeDistributed(LeakyReLU(alpha=0.2))(conv_4)
    conv_4 = TimeDistributed(Dropout(0.5))(conv_4)

    flat_1 = TimeDistributed(Flatten())(conv_4)
    dense_1 = TimeDistributed(Dense(units=1024, activation='tanh'))(flat_1)
    dense_2 = TimeDistributed(Dense(units=6, activation='sigmoid'))(dense_1)

    model = Model(inputs=inputs, outputs=dense_2)

    return model 
Example 80
Project: deep_intent   Author: AutonomyLab   File: classifier_early_attn.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def classifier_model():
    inputs = Input(shape=(10, 128, 128, 3))
    conv_1 = ConvLSTM2D(filters=32,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding="same",
                        return_sequences=True,
                        recurrent_dropout=0.5,
                        kernel_regularizer=regularizers.l1(0.001))(inputs)
    conv_1 = TimeDistributed(LeakyReLU(alpha=0.2))(conv_1)
    conv_1 = TimeDistributed(Dropout(0.5))(conv_1)

    conv_2 = ConvLSTM2D(filters=64,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding="same",
                        return_sequences=True,
                        recurrent_dropout=0.5,
                        kernel_regularizer=regularizers.l1(0.001))(conv_1)
    conv_2 = TimeDistributed(BatchNormalization())(conv_2)
    conv_2 = TimeDistributed(LeakyReLU(alpha=0.2))(conv_2)
    conv_2 = TimeDistributed(Dropout(0.5))(conv_2)

    conv_3 = ConvLSTM2D(filters=128,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding="same",
                        return_sequences=True,
                        recurrent_dropout=0.5,
                        kernel_regularizer=regularizers.l1(0.001))(conv_2)
    conv_3 = TimeDistributed(BatchNormalization())(conv_3)
    conv_3 = TimeDistributed(LeakyReLU(alpha=0.2))(conv_3)
    conv_3 = TimeDistributed(Dropout(0.5))(conv_3)

    conv_4 = ConvLSTM2D(filters=256,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding="same",
                        return_sequences=True,
                        recurrent_dropout=0.5,
                        kernel_regularizer=regularizers.l1(0.001))(conv_3)
    conv_4 = TimeDistributed(BatchNormalization())(conv_4)
    conv_4 = TimeDistributed(LeakyReLU(alpha=0.2))(conv_4)
    conv_4 = TimeDistributed(Dropout(0.5))(conv_4)

    flat_1 = TimeDistributed(Flatten())(conv_4)
    dense_1 = TimeDistributed(Dense(units=1024, activation='tanh'))(flat_1)
    dense_2 = TimeDistributed(Dense(units=6, activation='sigmoid'))(dense_1)

    model = Model(inputs=inputs, outputs=dense_2)

    return model