Python keras.layers.noise.GaussianNoise() Examples

The following are 14 code examples of keras.layers.noise.GaussianNoise(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers.noise , or try the search function .
Example #1
Source File: riveal.py    From rivuletpy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def makecnn(in_shape, K):
    model = Sequential()
    model.add(
        Convolution2D(
            32, 3, 3, border_mode='same', input_shape=in_shape[1:]))
    model.add(SReLU())
    model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))
    model.add(GaussianNoise(1))
    model.add(GaussianDropout(0.4))
    model.add(Convolution2D(32, 3, 3, border_mode='same'))
    model.add(SReLU())
    model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))
    model.add(GaussianNoise(1))
    model.add(GaussianDropout(0.4))
    model.add(Flatten())
    model.add(Dense(64))
    model.add(SReLU())
    model.add(Dense(64))
    # model.add(SReLU())
    model.add(Dense(1))
    model.add(Activation('linear'))
    return model 
Example #2
Source File: noise_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_GaussianNoise():
    layer_test(noise.GaussianNoise,
               kwargs={'stddev': 1.},
               input_shape=(3, 2, 3)) 
Example #3
Source File: noise_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_GaussianNoise():
    layer_test(noise.GaussianNoise,
               kwargs={'stddev': 1.},
               input_shape=(3, 2, 3)) 
Example #4
Source File: noise_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_GaussianNoise():
    layer_test(noise.GaussianNoise,
               kwargs={'stddev': 1.},
               input_shape=(3, 2, 3)) 
Example #5
Source File: noise_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_GaussianNoise():
    layer_test(noise.GaussianNoise,
               kwargs={'stddev': 1.},
               input_shape=(3, 2, 3)) 
Example #6
Source File: noise_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_GaussianNoise():
    layer_test(noise.GaussianNoise,
               kwargs={'stddev': 1.},
               input_shape=(3, 2, 3)) 
Example #7
Source File: noise_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_GaussianNoise():
    layer_test(noise.GaussianNoise,
               kwargs={'stddev': 1.},
               input_shape=(3, 2, 3)) 
Example #8
Source File: noise_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_GaussianNoise():
    layer_test(noise.GaussianNoise,
               kwargs={'stddev': 1.},
               input_shape=(3, 2, 3)) 
Example #9
Source File: DNGR.py    From DNGR-Keras with MIT License 5 votes vote down vote up
def model(data, hidden_layers, hidden_neurons, output_file, validation_split=0.9):


	train_n = int(validation_split * len(data))
	batch_size = 50
	train_data = data[:train_n,:]
	val_data = data[train_n:,:]

	input_sh = Input(shape=(data.shape[1],))
	encoded = noise.GaussianNoise(0.2)(input_sh)
	for i in range(hidden_layers):
		encoded = Dense(hidden_neurons[i], activation='relu')(encoded)
		encoded = noise.GaussianNoise(0.2)(encoded)

	decoded = Dense(hidden_neurons[-2], activation='relu')(encoded)
	for j in range(hidden_layers-3,-1,-1):
		decoded = Dense(hidden_neurons[j], activation='relu')(decoded)
	decoded = Dense(data.shape[1], activation='sigmoid')(decoded)

	autoencoder = Model(input=input_sh, output=decoded)
	autoencoder.compile(optimizer='adadelta', loss='mse')

	checkpointer = ModelCheckpoint(filepath='data/bestmodel' + output_file + ".hdf5", verbose=1, save_best_only=True)
	earlystopper = EarlyStopping(monitor='val_loss', patience=15, verbose=1)

	train_generator = DataGenerator(batch_size)
	train_generator.fit(train_data, train_data)
	val_generator = DataGenerator(batch_size)
	val_generator.fit(val_data, val_data)

	autoencoder.fit_generator(train_generator,
		samples_per_epoch=len(train_data),
		nb_epoch=100,
		validation_data=val_generator,
		nb_val_samples=len(val_data),
		max_q_size=batch_size,
		callbacks=[checkpointer, earlystopper])
	enco = Model(input=input_sh, output=encoded)
	enco.compile(optimizer='adadelta', loss='mse')
	reprsn = enco.predict(data)
	return reprsn 
Example #10
Source File: adabn.py    From ddan with MIT License 5 votes vote down vote up
def _build_model(self, arch, activations, nfeatures, droprate, noise, optimizer):

        self.layers = [Input(shape=(nfeatures,))]

        for i, nunits in enumerate(arch):

            if isinstance(nunits, int):
                self.layers += [Dense(nunits, activation='linear')(self.layers[-1])]

            elif nunits == 'noise':
                self.layers += [GaussianNoise(noise)(self.layers[-1])]

            elif nunits == 'bn':
                self.layers += [BatchNormalization()(self.layers[-1])]
            
            elif nunits == 'abn':
                self.layers += [AdaBN()(self.layers[-1])]

            elif nunits == 'drop':
                self.layers += [Dropout(droprate)(self.layers[-1])]

            elif nunits == 'act':
                if activations == 'prelu':
                    self.layers += [PReLU()(self.layers[-1])]
                elif activations == 'elu':
                    self.layers += [ELU()(self.layers[-1])]
                elif activations == 'leakyrelu':
                    self.layers += [LeakyReLU()(self.layers[-1])]
                else:
                    self.layers += [Activation(activations)(self.layers[-1])]

            else:
                print 'Unrecognised layer {}, type: {}'.format(nunits, type(nunits))

        self.layers += [Dense(1, activation='sigmoid')(self.layers[-1])]

        self.model = Model(self.layers[0], self.layers[-1])
        self.model.compile(loss='binary_crossentropy', optimizer=optimizer) 
Example #11
Source File: shallow_weight.py    From DeepLearning-OCR with Apache License 2.0 4 votes vote down vote up
def build_shallow_weight(channels, width, height, output_size, nb_classes):
	# input
	inputs = Input(shape=(channels, height, width))
	# 1 conv
	conv1_1 = Convolution2D(8, 3, 3, border_mode='same', activation='relu', 
		W_regularizer=l2(0.01))(inputs)
	bn1 = BatchNormalization(mode=0, axis=1)(conv1_1)
	pool1 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn1)
	gn1 = GaussianNoise(0.5)(pool1)
	drop1 = SpatialDropout2D(0.5)(gn1)
	# 2 conv
	conv2_1 = Convolution2D(8, 3, 3, border_mode='same', activation='relu',
		W_regularizer=l2(0.01))(gn1)
	bn2 = BatchNormalization(mode=0, axis=1)(conv2_1)
	pool2 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn2)
	gn2 = GaussianNoise(0.5)(pool2)
	drop2 = SpatialDropout2D(0.5)(gn2)
	# 3 conv
	conv3_1 = Convolution2D(8, 3, 3, border_mode='same', activation='relu',
		W_regularizer=l2(0.01))(drop2)
	bn3 = BatchNormalization(mode=0, axis=1)(conv3_1)
	pool3 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn3)
	gn3 = GaussianNoise(0.5)(pool3)
	drop3 = SpatialDropout2D(0.5)(gn3)
	# 4 conv
	conv4_1 = Convolution2D(8, 3, 3, border_mode='same', activation='relu',
		W_regularizer=l2(0.01))(gn3)
	bn4 = BatchNormalization(mode=0, axis=1)(conv4_1)
	pool4 = MaxPooling2D(pool_size=(2,2), strides=(2,2))(bn4)
	gn4 = GaussianNoise(0.5)(pool4)
	drop4 = SpatialDropout2D(0.5)(gn4)
	# flaten
	flat = Flatten()(gn4)
	# 1 dense
	dense1 = Dense(8, activation='relu', W_regularizer=l2(0.1))(flat)
	bn6 = BatchNormalization(mode=0, axis=1)(dense1)
	drop6 = Dropout(0.5)(bn6)
	# output
	out = []
	for i in range(output_size):
		out.append(Dense(nb_classes, activation='softmax')(bn6))
	if output_size > 1:
		merged_out = merge(out, mode='concat')
		shaped_out = Reshape((output_size, nb_classes))(merged_out)
		sample_weight_mode = 'temporal'
	else:
		shaped_out = out
		sample_weight_mode = None
	model = Model(input=[inputs], output=shaped_out)
	model.summary()
	model.compile(loss='categorical_crossentropy',
				  optimizer='adam',
				  metrics=[categorical_accuracy_per_sequence],
				  sample_weight_mode = sample_weight_mode
				  )

	return model 
Example #12
Source File: regularize.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def Regularize(layer, params,
               shared_layers=False,
               name='',
               apply_noise=True,
               apply_batch_normalization=True,
               apply_prelu=True,
               apply_dropout=True,
               apply_l2=True,
               trainable=True):
    """
    Apply the regularization specified in parameters to the layer
    :param layer: Layer to regularize
    :param params: Params specifying the regularizations to apply
    :param shared_layers: Boolean indicating if we want to get the used layers for applying to a shared-layers model.
    :param name: Name prepended to regularizer layer
    :param apply_noise: If False, noise won't be applied, independently of params
    :param apply_dropout: If False, dropout won't be applied, independently of params
    :param apply_prelu: If False, prelu won't be applied, independently of params
    :param apply_batch_normalization: If False, batch normalization won't be applied, independently of params
    :param apply_l2: If False, l2 normalization won't be applied, independently of params
    :return: Regularized layer
    """
    shared_layers_list = []

    if apply_noise and params.get('USE_NOISE', False):
        shared_layers_list.append(GaussianNoise(params.get('NOISE_AMOUNT', 0.01), name=name + '_gaussian_noise', trainable=trainable))

    if apply_batch_normalization and params.get('USE_BATCH_NORMALIZATION', False):
        if params.get('WEIGHT_DECAY'):
            l2_gamma_reg = l2(params['WEIGHT_DECAY'])
            l2_beta_reg = l2(params['WEIGHT_DECAY'])
        else:
            l2_gamma_reg = None
            l2_beta_reg = None

        bn_mode = params.get('BATCH_NORMALIZATION_MODE', 0)

        shared_layers_list.append(BatchNormalization(mode=bn_mode,
                                                     gamma_regularizer=l2_gamma_reg,
                                                     beta_regularizer=l2_beta_reg,
                                                     name=name + '_batch_normalization',
                                                     trainable=trainable))

    if apply_prelu and params.get('USE_PRELU', False):
        shared_layers_list.append(PReLU(name=name + '_PReLU', trainable=trainable))

    if apply_dropout and params.get('DROPOUT_P', 0) > 0:
        shared_layers_list.append(Dropout(params.get('DROPOUT_P', 0.5), name=name + '_dropout', trainable=trainable))

    if apply_l2 and params.get('USE_L2', False):
        shared_layers_list.append(Lambda(L2_norm, name=name + '_L2_norm', trainable=trainable))

    # Apply all the previously built shared layers
    for l in shared_layers_list:
        layer = l(layer)
    result = layer

    # Return result or shared layers too
    if shared_layers:
        return result, shared_layers_list
    return result 
Example #13
Source File: regularize.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def Regularize(layer, params,
               shared_layers=False,
               name='',
               apply_noise=True,
               apply_batch_normalization=True,
               apply_prelu=True,
               apply_dropout=True,
               apply_l2=True):
    """
    Apply the regularization specified in parameters to the layer
    :param layer: Layer to regularize
    :param params: Params specifying the regularizations to apply
    :param shared_layers: Boolean indicating if we want to get the used layers for applying to a shared-layers model.
    :param name: Name prepended to regularizer layer
    :param apply_noise: If False, noise won't be applied, independently of params
    :param apply_dropout: If False, dropout won't be applied, independently of params
    :param apply_prelu: If False, prelu won't be applied, independently of params
    :param apply_batch_normalization: If False, batch normalization won't be applied, independently of params
    :param apply_l2: If False, l2 normalization won't be applied, independently of params
    :return: Regularized layer
    """
    shared_layers_list = []

    if apply_noise and params.get('USE_NOISE', False):
        shared_layers_list.append(GaussianNoise(params.get('NOISE_AMOUNT', 0.01), name=name + '_gaussian_noise'))

    if apply_batch_normalization and params.get('USE_BATCH_NORMALIZATION', False):
        if params.get('WEIGHT_DECAY'):
            l2_gamma_reg = l2(params['WEIGHT_DECAY'])
            l2_beta_reg = l2(params['WEIGHT_DECAY'])
        else:
            l2_gamma_reg = None
            l2_beta_reg = None

        bn_mode = params.get('BATCH_NORMALIZATION_MODE', 0)

        shared_layers_list.append(BatchNormalization(mode=bn_mode,
                                                     gamma_regularizer=l2_gamma_reg,
                                                     beta_regularizer=l2_beta_reg,
                                                     name=name + '_batch_normalization'))

    if apply_prelu and params.get('USE_PRELU', False):
        shared_layers_list.append(PReLU(name=name + '_PReLU'))

    if apply_dropout and params.get('DROPOUT_P', 0) > 0:
        shared_layers_list.append(Dropout(params.get('DROPOUT_P', 0.5), name=name + '_dropout'))

    if apply_l2 and params.get('USE_L2', False):
        shared_layers_list.append(Lambda(L2_norm, name=name + '_L2_norm'))

    # Apply all the previously built shared layers
    for l in shared_layers_list:
        layer = l(layer)
    result = layer

    # Return result or shared layers too
    if shared_layers:
        return result, shared_layers_list
    return result 
Example #14
Source File: BMM_attention_model.py    From BMM_attentional_CNN with GNU General Public License v3.0 4 votes vote down vote up
def minst_attention(inc_noise=False, attention=True):
    #make layers
    inputs = Input(shape=(1,image_size,image_size),name='input')

    conv_1a = Convolution2D(32, 3, 3,activation='relu',name='conv_1')
    maxp_1a = MaxPooling2D((3, 3), strides=(2,2),name='convmax_1')
    norm_1a = crosschannelnormalization(name="convpool_1")
    zero_1a = ZeroPadding2D((2,2),name='convzero_1')

    conv_2a = Convolution2D(32,3,3,activation='relu',name='conv_2')
    maxp_2a = MaxPooling2D((3, 3), strides=(2,2),name='convmax_2')
    norm_2a = crosschannelnormalization(name="convpool_2")
    zero_2a = ZeroPadding2D((2,2),name='convzero_2')

    dense_1a = Lambda(global_average_pooling,output_shape=global_average_pooling_shape,name='dense_1')
    dense_2a = Dense(10, activation = 'softmax', init='uniform',name='dense_2')

    #make actual model
    if inc_noise:
        inputs_noise = noise.GaussianNoise(2.5)(inputs)
        input_pad = ZeroPadding2D((1,1),input_shape=(1,image_size,image_size),name='input_pad')(inputs_noise)
    else:
        input_pad = ZeroPadding2D((1,1),input_shape=(1,image_size,image_size),name='input_pad')(inputs)

    conv_1 = conv_1a(input_pad)
    conv_1 = maxp_1a(conv_1)
    conv_1 = norm_1a(conv_1)
    conv_1 = zero_1a(conv_1)

    conv_2_x = conv_2a(conv_1)
    conv_2 = maxp_2a(conv_2_x)
    conv_2 = norm_2a(conv_2)
    conv_2 = zero_2a(conv_2)
    conv_2 = Dropout(0.5)(conv_2)

    dense_1 = dense_1a(conv_2)
    dense_2 = dense_2a(dense_1)

    conv_shape1 = Lambda(change_shape1,output_shape=(32,),name='chg_shape')(conv_2_x)
    find_att = dense_2a(conv_shape1)

    if attention:
        find_att = Lambda(attention_control,output_shape=att_shape,name='att_con')([find_att,dense_2])
    else:
        find_att = Lambda(no_attention_control,output_shape=att_shape,name='att_con')([find_att,dense_2])

    zero_3a = ZeroPadding2D((1,1),name='convzero_3')(find_att)
    apply_attention  = Merge(mode='mul',name='attend')([zero_3a,conv_1])

    conv_3 = conv_2a(apply_attention)
    conv_3 = maxp_2a(conv_3)
    conv_3 = norm_2a(conv_3)
    conv_3 = zero_2a(conv_3)

    dense_3 = dense_1a(conv_3)
    dense_4 = dense_2a(dense_3)

    model = Model(input=inputs,output=dense_4)

    return model