Python keras.layers.GaussianNoise() Examples

The following are 14 code examples of keras.layers.GaussianNoise(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source File: example.py    From CTCModel with MIT License 8 votes vote down vote up
def create_network(nb_features, nb_labels, padding_value):

    # Define the network architecture
    input_data = Input(name='input', shape=(None, nb_features)) # nb_features = image height

    masking = Masking(mask_value=padding_value)(input_data)
    noise = GaussianNoise(0.01)(masking)
    blstm = Bidirectional(LSTM(128, return_sequences=True, dropout=0.1))(noise)
    blstm = Bidirectional(LSTM(128, return_sequences=True, dropout=0.1))(blstm)
    blstm = Bidirectional(LSTM(128, return_sequences=True, dropout=0.1))(blstm)

    dense = TimeDistributed(Dense(nb_labels + 1, name="dense"))(blstm)
    outrnn = Activation('softmax', name='softmax')(dense)

    network = CTCModel([input_data], [outrnn])
    network.compile(Adam(lr=0.0001))

    return network 
Example #2
Source File: gan.py    From GAN-Sandbox with MIT License 6 votes vote down vote up
def discriminator_network(x):
    def add_common_layers(y):
        y = layers.advanced_activations.LeakyReLU()(y)
        y = layers.Dropout(0.25)(y)
        return y

    x = layers.GaussianNoise(stddev=0.2)(x)

    x = layers.Conv2D(64, kernel_size, **conv_layer_keyword_args)(x)
    x = add_common_layers(x)

    x = layers.Conv2D(128, kernel_size, **conv_layer_keyword_args)(x)
    x = add_common_layers(x)

    x = layers.Flatten()(x)

    x = layers.Dense(1024)(x)
    x = add_common_layers(x)

    return layers.Dense(1, activation='sigmoid')(x) 
Example #3
Source File: models.py    From asr-study with MIT License 6 votes vote down vote up
def graves2006(num_features=26, num_hiddens=100, num_classes=28, std=.6):
    """ Implementation of Graves' model
    Reference:
        [1] Graves, Alex, et al. "Connectionist temporal classification:
        labelling unsegmented sequence data with recurrent neural networks."
        Proceedings of the 23rd international conference on Machine learning.
        ACM, 2006.
    """

    x = Input(name='inputs', shape=(None, num_features))
    o = x

    o = GaussianNoise(std)(o)
    o = Bidirectional(LSTM(num_hiddens,
                      return_sequences=True,
                      consume_less='gpu'))(o)
    o = TimeDistributed(Dense(num_classes))(o)

    return ctc_model(x, o) 
Example #4
Source File: train.py    From DeepAA with MIT License 5 votes vote down vote up
def CNN(input_shape=None, classes=1000):
    inputs = Input(shape=input_shape)

    # Block 1
    x = GaussianNoise(0.3)(inputs)
    x = CBRD(x, 64)
    x = CBRD(x, 64)
    x = MaxPooling2D()(x)

    # Block 2
    x = CBRD(x, 128)
    x = CBRD(x, 128)
    x = MaxPooling2D()(x)

    # Block 3
    x = CBRD(x, 256)
    x = CBRD(x, 256)
    x = CBRD(x, 256)
    x = MaxPooling2D()(x)

    # Classification block
    x = Flatten(name='flatten')(x)
    x = DBRD(x, 4096)
    x = DBRD(x, 4096)
    x = Dense(classes, activation='softmax', name='predictions')(x)

    model = Model(inputs=inputs, outputs=x)

    return model 
Example #5
Source File: model.py    From KerasDeepSpeech with GNU Affero General Public License v3.0 5 votes vote down vote up
def graves(input_dim=26, rnn_size=512, output_dim=29, std=0.6):
    """ Implementation of Graves 2006 model

    Architecture:
        Gaussian Noise on input
        BiDirectional LSTM

    Reference:
        ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
    """

    K.set_learning_phase(1)
    input_data = Input(name='the_input', shape=(None, input_dim))
    # x = BatchNormalization(axis=-1)(input_data)

    x = GaussianNoise(std)(input_data)
    x = Bidirectional(LSTM(rnn_size,
                      return_sequences=True,
                      implementation=0))(x)
    y_pred = TimeDistributed(Dense(output_dim, activation='softmax'))(x)

    # Input of labels and other CTC requirements
    labels = Input(name='the_labels', shape=[None,], dtype='int32')
    input_length = Input(name='input_length', shape=[1], dtype='int32')
    label_length = Input(name='label_length', shape=[1], dtype='int32')

    # Keras doesn't currently support loss funcs with extra parameters
    # so CTC loss is implemented in a lambda layer
    loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred,
                                                                       labels,
                                                                       input_length,
                                                                       label_length])


    model = Model(inputs=[input_data, labels, input_length, label_length], outputs=[loss_out])

    return model 
Example #6
Source File: main.py    From Sound_event_detection with MIT License 5 votes vote down vote up
def supervised_train(task_name,sed_model_name,augmentation):
	""""
	Training with only weakly-supervised learning
	Args:
		task_name: string
			the name of the task
		sed_model_name:	string
			the name of the model
		augmentation:	bool
			whether to add Gaussian noise Layer
	Return:

	"""
	LOG.info('config preparation for %s'%sed_model_name)
	#prepare for training
	train_sed=trainer.trainer(task_name,sed_model_name,False)
	
	#creat model using the model structure prepared in [train_sed]
	creat_model_sed=train_sed.model_struct.graph()
	LEN=train_sed.data_loader.LEN
	DIM=train_sed.data_loader.DIM
	inputs=Input((LEN,DIM))

	#add Gaussian noise Layer
	if augmentation:
		inputs_t=GaussianNoise(0.15)(inputs)
	else:
		inputs_t=inputs
	outs=creat_model_sed(inputs_t,False)

	#the model used for training
	models=Model(inputs,outs)

	LOG.info('------------start training------------')
	train_sed.train(extra_model=models,train_mode='supervised')

	#predict results for validation set and test set
	train_sed.save_at_result()	#audio tagging result
	train_sed.save_sed_result()	#event detection result 
Example #7
Source File: UNIT.py    From GAN-MRI with GNU General Public License v3.0 5 votes vote down vote up
def modelSharedEncoder(self, name):
        input = Input(shape=self.latent_dim)

        x = self.resblk(input, 256)
        z = GaussianNoise(stddev=1)(x, training=True)

        return Model(inputs=input, outputs=z, name=name) 
Example #8
Source File: dann.py    From ddan with MIT License 5 votes vote down vote up
def _build(self, input_layer, arch, activations, noise, droprate, l2reg):
        print 'Building network layers...'
        network = [input_layer]
        for nunits in arch:
            print nunits
            if isinstance(nunits, int):
                network += [Dense(nunits, activation='linear', kernel_regularizer=l1_l2(l1=0.01, l2=l2reg))(network[-1])]

            elif nunits == 'noise':
                network += [GaussianNoise(noise)(network[-1])]

            elif nunits == 'bn':
                network += [BatchNormalization()(network[-1])]

            elif nunits == 'drop':
                network += [Dropout(droprate)(network[-1])]

            elif nunits == 'act':
                if activations == 'prelu':
                    network += [PReLU()(network[-1])]
                elif activations == 'leakyrelu':
                	network += [LeakyReLU()(network[-1])]
                elif activations == 'elu':
                	network += [ELU()(network[-1])]
                else:
                    print 'Activation({})'.format(activations)
                    network += [Activation(activations)(network[-1])]
        return network 
Example #9
Source File: ddcn.py    From ddan with MIT License 5 votes vote down vote up
def _build_model(self, nfeatures, architecture, supervised, confusion, confusion_incr, confusion_max, 
        activations, noise, droprate, mmd_layer_idx, optimizer):

        self.inp_a = tf.placeholder(tf.float32, shape=(None, nfeatures))
        self.inp_b = tf.placeholder(tf.float32, shape=(None, nfeatures))
        self.labels_a = tf.placeholder(tf.float32, shape=(None, 1))

        nlayers = len(architecture)
        layers_a = [self.inp_a]
        layers_b = [self.inp_b]

        for i, nunits in enumerate(architecture):

            print nunits,
            if i in mmd_layer_idx: print '(MMD)'
            else: print

            if isinstance(nunits, int):
                shared_layer = Dense(nunits, activation='linear')
            elif nunits == 'noise':
                shared_layer = GaussianNoise(noise)
            elif nunits == 'bn':
                shared_layer = BatchNormalization()
            elif nunits == 'drop':
                shared_layer = Dropout(droprate)
            elif nunits == 'act':
                if activations == 'prelu':
                    shared_layer = PReLU()
                elif activations == 'elu':
                    shared_layer = ELU()
                elif activations == 'leakyrelu':
                    shared_layer = LeakyReLU()
                else:
                    shared_layer = Activation(activations)

            layers_a += [shared_layer(layers_a[-1])]
            layers_b += [shared_layer(layers_b[-1])] 
Example #10
Source File: layers_export.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def gaussian_noise(layer, layer_in, layerId, tensor=True):
    stddev = layer['params']['stddev']
    out = {layerId: GaussianNoise(stddev=stddev)}
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out 
Example #11
Source File: test_views.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(GaussianNoise(stddev=0.1, input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 1) 
Example #12
Source File: test_views.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['GaussianNoise']}
        net['l0']['connection']['output'].append('l1')
        inp = data(net['l0'], '', 'l0')['l0']
        net = gaussian_noise(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'GaussianNoise') 
Example #13
Source File: main.py    From Sound_event_detection with MIT License 4 votes vote down vote up
def semi_train(task_name,sed_model_name,at_model_name,augmentation):
	""""
	Training with semi-supervised learning (Guiding learning)
	Args:
		task_name: string
			the name of the task
                sed_model_name: string
			the name of the the PS-model
		at_model_name: string
			the name of the the PT-model
                augmentation: bool
			whether to add Gaussian noise to the input of the PT-model
	Return:

        """
	#prepare for training of the PS-model
	LOG.info('config preparation for %s'%at_model_name)
	train_sed=trainer.trainer(task_name,sed_model_name,False)

	#prepare for training of the PT-model
	LOG.info('config preparation for %s'%sed_model_name)
	train_at=trainer.trainer(task_name,at_model_name,False)

	#connect the outputs of the two models to produce a model for end-to-end learning
	creat_model_at=train_at.model_struct.graph()
	creat_model_sed=train_sed.model_struct.graph()
	LEN=train_sed.data_loader.LEN
	DIM=train_sed.data_loader.DIM	
	inputs=Input((LEN,DIM))

	#add Gaussian noise
	if augmentation:
		at_inputs=GaussianNoise(0.15)(inputs)
	else:
		at_inputs=inputs

	at_out=creat_model_at(at_inputs,False)
	sed_out=creat_model_sed(inputs,False)
	out=concatenate([at_out,sed_out],axis=-1)
	models=Model(inputs,out)

	#start training (all intermediate files are saved in the PS-model dir)
	LOG.info('------------start training------------')	
	train_sed.train(models)

	#copy the final model to the PT-model dir from the PS-model dir
	shutil.copyfile(train_sed.best_model_path,train_at.best_model_path) 

	#predict results for validation set and test set (the PT-model)
	LOG.info('------------result of %s------------'%at_model_name)
	train_at.save_at_result()	#audio tagging result

	#predict results for validation set and test set (the PS-model)
	LOG.info('------------result of %s------------'%sed_model_name)
	train_sed.save_at_result()	#audio tagging result
	train_sed.save_sed_result()	#event detection result 
Example #14
Source File: deepcoral.py    From ddan with MIT License 4 votes vote down vote up
def _build_model(self, nfeatures, architecture, supervised, confusion, confusion_incr, confusion_max, 
        activations, noise, droprate, coral_layer_idx, optimizer):

        self.inp_a = tf.placeholder(tf.float32, shape=(None, nfeatures))
        self.inp_b = tf.placeholder(tf.float32, shape=(None, nfeatures))
        self.labels_a = tf.placeholder(tf.float32, shape=(None, 1))
        self.lr = tf.placeholder(tf.float32, [], name='lr')

        nlayers = len(architecture)
        layers_a = [self.inp_a]
        layers_b = [self.inp_b]

        for i, nunits in enumerate(architecture):

            print nunits,
            if i in coral_layer_idx: print '(CORAL)'
            else: print

            if isinstance(nunits, int):
                shared_layer = Dense(nunits, activation='linear')
            elif nunits == 'noise':
                shared_layer = GaussianNoise(noise)
            elif nunits == 'bn':
                shared_layer = BatchNormalization()
            elif nunits == 'drop':
                shared_layer = Dropout(droprate)
            elif nunits == 'act':
                if activations == 'prelu':
                    shared_layer = PReLU()
                elif activations == 'elu':
                    shared_layer = ELU()
                elif activations == 'leakyrelu':
                    shared_layer = LeakyReLU()
                else:
                    shared_layer = Activation(activations)

            layers_a += [shared_layer(layers_a[-1])]
            layers_b += [shared_layer(layers_b[-1])]