Python keras.regularizers.get() Examples
The following are 30
code examples of keras.regularizers.get().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.regularizers
, or try the search function
.
Example #1
Source File: layers.py From keras-utilities with MIT License | 6 votes |
def __init__(self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, bias=True, return_attention=False, **kwargs): self.supports_masking = True self.return_attention = return_attention self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.u_regularizer = regularizers.get(u_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.u_constraint = constraints.get(u_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(AttentionWithContext, self).__init__(**kwargs)
Example #2
Source File: capsule.py From keras-contrib with MIT License | 6 votes |
def __init__(self, num_capsule, dim_capsule, routings=3, share_weights=True, initializer='glorot_uniform', activation=None, regularizer=None, constraint=None, **kwargs): super(Capsule, self).__init__(**kwargs) self.num_capsule = num_capsule self.dim_capsule = dim_capsule self.routings = routings self.share_weights = share_weights self.activation = activations.get(activation) self.regularizer = regularizers.get(regularizer) self.initializer = initializers.get(initializer) self.constraint = constraints.get(constraint)
Example #3
Source File: pelu.py From keras-contrib with MIT License | 6 votes |
def __init__(self, alpha_initializer='ones', alpha_regularizer=None, alpha_constraint=None, beta_initializer='ones', beta_regularizer=None, beta_constraint=None, shared_axes=None, **kwargs): super(PELU, self).__init__(**kwargs) self.supports_masking = True self.alpha_initializer = initializers.get(alpha_initializer) self.alpha_regularizer = regularizers.get(alpha_regularizer) self.alpha_constraint = constraints.get(alpha_constraint) self.beta_initializer = initializers.get(beta_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.beta_constraint = constraints.get(beta_constraint) if shared_axes is None: self.shared_axes = None elif not isinstance(shared_axes, (list, tuple)): self.shared_axes = [shared_axes] else: self.shared_axes = list(shared_axes)
Example #4
Source File: my_layers.py From Attention-Based-Aspect-Extraction with Apache License 2.0 | 6 votes |
def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): """ Keras Layer that implements an Content Attention mechanism. Supports Masking. """ self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(Attention, self).__init__(**kwargs)
Example #5
Source File: my_layers.py From Attention-Based-Aspect-Extraction with Apache License 2.0 | 6 votes |
def __init__(self, input_dim, output_dim, init='uniform', input_length=None, W_regularizer=None, activity_regularizer=None, W_constraint=None, weights=None, dropout=0., **kwargs): self.input_dim = input_dim self.output_dim = output_dim self.init = initializers.get(init) self.input_length = input_length self.dropout = dropout self.W_constraint = constraints.get(W_constraint) self.W_regularizer = regularizers.get(W_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) if 0. < self.dropout < 1.: self.uses_learning_phase = True self.initial_weights = weights kwargs['input_shape'] = (self.input_length,) kwargs['input_dtype'] = K.floatx() super(WeightedAspectEmb, self).__init__(**kwargs)
Example #6
Source File: attention_with_context.py From DeepResearch with MIT License | 6 votes |
def __init__(self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.u_regularizer = regularizers.get(u_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.u_constraint = constraints.get(u_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(AttentionWithContext, self).__init__(**kwargs)
Example #7
Source File: attention.py From Document-Classifier-LSTM with MIT License | 6 votes |
def __init__(self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, bias=True, **kwargs): self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.u_regularizer = regularizers.get(u_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.u_constraint = constraints.get(u_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(AttentionWithContext, self).__init__(**kwargs)
Example #8
Source File: core.py From keras-contrib with MIT License | 6 votes |
def __init__(self, units, kernel_initializer='glorot_uniform', activation=None, weights=None, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) self.kernel_initializer = initializers.get(kernel_initializer) self.activation = activations.get(activation) self.units = units self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.use_bias = use_bias self.initial_weights = weights super(CosineDense, self).__init__(**kwargs)
Example #9
Source File: recurrent.py From keras_bn_library with MIT License | 6 votes |
def __init__(self, output_dim, init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=0., dropout_U=0., **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.forget_bias_init = initializations.get(forget_bias_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.dropout_W = dropout_W self.dropout_U = dropout_U self.stateful = False if self.dropout_W or self.dropout_U: self.uses_learning_phase = True super(QRNN, self).__init__(**kwargs)
Example #10
Source File: ChainCRF.py From elmo-bilstm-cnn-crf with Apache License 2.0 | 6 votes |
def __init__(self, init='glorot_uniform', U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None, U_constraint=None, b_start_constraint=None, b_end_constraint=None, weights=None, **kwargs): super(ChainCRF, self).__init__(**kwargs) self.init = initializers.get(init) self.U_regularizer = regularizers.get(U_regularizer) self.b_start_regularizer = regularizers.get(b_start_regularizer) self.b_end_regularizer = regularizers.get(b_end_regularizer) self.U_constraint = constraints.get(U_constraint) self.b_start_constraint = constraints.get(b_start_constraint) self.b_end_constraint = constraints.get(b_end_constraint) self.initial_weights = weights self.supports_masking = True self.uses_learning_phase = True self.input_spec = [InputSpec(ndim=3)]
Example #11
Source File: ChainCRF.py From naacl18-multitask_argument_mining with Apache License 2.0 | 6 votes |
def __init__(self, init='glorot_uniform', U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None, U_constraint=None, b_start_constraint=None, b_end_constraint=None, weights=None, **kwargs): self.supports_masking = True self.uses_learning_phase = True self.input_spec = [InputSpec(ndim=3)] self.init = initializations.get(init) self.U_regularizer = regularizers.get(U_regularizer) self.b_start_regularizer = regularizers.get(b_start_regularizer) self.b_end_regularizer = regularizers.get(b_end_regularizer) self.U_constraint = constraints.get(U_constraint) self.b_start_constraint = constraints.get(b_start_constraint) self.b_end_constraint = constraints.get(b_end_constraint) self.initial_weights = weights super(ChainCRF, self).__init__(**kwargs)
Example #12
Source File: FFNN.py From dts with MIT License | 6 votes |
def evaluate(self, inputs, fn_inverse=None, fn_plot=None): try: X, y = inputs inputs = X except: X, conditions, y = inputs inputs = [X, conditions] y_hat = self.predict(inputs) if fn_inverse is not None: y_hat = fn_inverse(y_hat) y = fn_inverse(y) if fn_plot is not None: fn_plot([y, y_hat]) results = [] for m in self.model.metrics: if isinstance(m, str): results.append(K.eval(K.mean(get(m)(y, y_hat)))) else: results.append(K.eval(K.mean(m(y, y_hat)))) return results
Example #13
Source File: rnn_feature.py From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License | 6 votes |
def __init__(self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.u_regularizer = regularizers.get(u_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.u_constraint = constraints.get(u_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(AttentionWithContext, self).__init__(**kwargs)
Example #14
Source File: recurrent.py From keras_bn_library with MIT License | 6 votes |
def __init__(self, output_dim, init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=0., dropout_U=0., **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.forget_bias_init = initializations.get(forget_bias_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.dropout_W, self.dropout_U = dropout_W, dropout_U if self.dropout_W or self.dropout_U: self.uses_learning_phase = True super(DecoderVaeLSTM, self).__init__(**kwargs)
Example #15
Source File: models.py From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License | 6 votes |
def __init__(self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.u_regularizer = regularizers.get(u_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.u_constraint = constraints.get(u_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(AttentionWithContext, self).__init__(**kwargs)
Example #16
Source File: models.py From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License | 6 votes |
def __init__(self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.u_regularizer = regularizers.get(u_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.u_constraint = constraints.get(u_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(AttentionWithContext, self).__init__(**kwargs)
Example #17
Source File: rhn.py From deep-models with Apache License 2.0 | 6 votes |
def __init__(self, output_dim, L, init='glorot_uniform', inner_init='orthogonal', activation='tanh', inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=0., dropout_U=0., **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.dropout_W, self.dropout_U = dropout_W, dropout_U self.L = L if self.dropout_W or self.dropout_U: self.uses_learning_phase = True super(RHN, self).__init__(**kwargs)
Example #18
Source File: normalizations.py From se_relativisticgan with MIT License | 6 votes |
def __init__(self, axis=-1, momentum=0.99, center=True, scale=True, epsilon=1e-3, r_max_value=3., d_max_value=5., t_delta=1e-3, weights=None, beta_initializer='zero', gamma_initializer='one', moving_mean_initializer='zeros', moving_variance_initializer='ones', gamma_regularizer=None, beta_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): self.supports_masking = True self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.momentum = momentum self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights = weights self.r_max_value = r_max_value self.d_max_value = d_max_value self.t_delta = t_delta self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.moving_mean_initializer = initializers.get(moving_mean_initializer) self.moving_variance_initializer = initializers.get(moving_variance_initializer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint) super(BatchRenormalization, self).__init__(**kwargs)
Example #19
Source File: normalizations.py From se_relativisticgan with MIT License | 6 votes |
def __init__(self, axis=None, epsilon=1e-3, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): super(InstanceNormalization, self).__init__(**kwargs) self.supports_masking = True self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint)
Example #20
Source File: lstm2ntm.py From NTM-Keras with MIT License | 6 votes |
def __init__(self, output_dim, memory_dim=128, memory_size=20, controller_output_dim=100, location_shift_range=1, num_read_head=1, num_write_head=1, init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None, R_regularizer=None, b_regularizer=None, W_y_regularizer=None, W_xi_regularizer=None, W_r_regularizer=None, dropout_W=0., dropout_U=0., **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.forget_bias_init = initializations.get(forget_bias_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.dropout_W, self.dropout_U = dropout_W, dropout_U if self.dropout_W or self.dropout_U: self.uses_learning_phase = True super(NTM, self).__init__(**kwargs)
Example #21
Source File: attention.py From deephlapan with GNU General Public License v2.0 | 6 votes |
def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, return_attention=False, **kwargs): self.supports_masking = True self.return_attention = return_attention self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(Attention, self).__init__(**kwargs)
Example #22
Source File: models.py From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License | 6 votes |
def on_epoch_end(self, epoch, logs=None): logs = logs or {} self.epochs_since_last_save += 1 if self.epochs_since_last_save >= self.period: self.epochs_since_last_save = 0 #filepath = self.filepath.format(epoch=epoch + 1, **logs) current = logs.get(self.monitor) if current is None: warnings.warn('Can pick best model only with %s available, ' 'skipping.' % (self.monitor), RuntimeWarning) else: if self.monitor_op(current, self.best): if self.verbose > 0: print('\nEpoch %05d: %s improved from %0.5f to %0.5f,' ' storing weights.' % (epoch + 1, self.monitor, self.best, current)) self.best = current self.best_epochs = epoch + 1 self.best_weights = self.model.get_weights() else: if self.verbose > 0: print('\nEpoch %05d: %s did not improve' % (epoch + 1, self.monitor))
Example #23
Source File: instance_normalization.py From Coloring-greyscale-images with MIT License | 6 votes |
def __init__(self, axis=None, epsilon=1e-3, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): super(InstanceNormalization, self).__init__(**kwargs) self.supports_masking = True self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint)
Example #24
Source File: norm.py From deep_complex_networks with MIT License | 6 votes |
def __init__(self, epsilon=1e-4, axis=-1, beta_init='zeros', gamma_init='ones', gamma_regularizer=None, beta_regularizer=None, **kwargs): self.supports_masking = True self.beta_init = initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init) self.epsilon = epsilon self.axis = axis self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) super(LayerNormalization, self).__init__(**kwargs)
Example #25
Source File: attention_decoder.py From keras-monotonic-attention with GNU Affero General Public License v3.0 | 5 votes |
def get_initial_state(self, inputs): if isinstance(inputs, list): assert len(inputs) == 2 # inputs == [encoder_outputs, y_true] encoder_outputs = inputs[0] else: encoder_outputs = inputs memory_shape = K.shape(encoder_outputs) # apply the matrix on the first time step to get the initial s0. s0 = activations.tanh(K.dot(encoder_outputs[:, 0], self.W_s)) y0 = K.zeros((memory_shape[0],), dtype='int64') + self.start_token t0 = K.zeros((memory_shape[0],), dtype='int64') initial_states = [y0, s0, t0] if self.is_monotonic: # initial attention has form: [1, 0, 0, ..., 0] for each sample in batch alpha0 = K.ones((memory_shape[0], 1)) alpha0 = K.switch(K.greater(memory_shape[1], 1), lambda: K.concatenate([alpha0, K.zeros((memory_shape[0], memory_shape[1] - 1))], axis=-1), alpha0) # like energy, attention is stored in shape (samples, time, 1) alpha0 = K.expand_dims(alpha0, -1) initial_states.append(alpha0) return initial_states
Example #26
Source File: bayesian_dense.py From bayesian_dense with MIT License | 5 votes |
def __init__(self, output_dim, init='glorot_uniform', init_sigma=lambda shape, name:init_uniform(shape, -10, -5, name=name), activation='linear', weights=None, W_regularizer=None, b_regularizer=None, W_sigma_regularizer=None, b_sigma_regularizer=None, activity_regularizer=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.init_sigma = init_sigma self.activation = activations.get(activation) self.output_dim = output_dim self.input_dim = input_dim self.uses_learning_phase = True self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_sigma_regularizer = regularizers.get(W_sigma_regularizer) self.b_sigma_regularizer = regularizers.get(b_sigma_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim=2)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(BayesianDense, self).__init__(**kwargs)
Example #27
Source File: cells.py From recurrentshop with MIT License | 5 votes |
def __init__(self, units=None, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, **kwargs): if units is None: assert 'output_dim' in kwargs, 'Missing argument: units' else: kwargs['output_dim'] = units self.activation = activations.get(activation) self.recurrent_activation = activations.get(recurrent_activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.bias_constraint = constraints.get(bias_constraint) super(ExtendedRNNCell, self).__init__(**kwargs)
Example #28
Source File: cosineconvolution2d.py From keras-contrib with MIT License | 5 votes |
def __init__(self, filters, kernel_size, kernel_initializer='glorot_uniform', activation=None, weights=None, padding='valid', strides=(1, 1), data_format=None, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, **kwargs): if data_format is None: data_format = K.image_data_format() if padding not in {'valid', 'same', 'full'}: raise ValueError('Invalid border mode for CosineConvolution2D:', padding) self.filters = filters self.kernel_size = kernel_size self.nb_row, self.nb_col = self.kernel_size self.kernel_initializer = initializers.get(kernel_initializer) self.activation = activations.get(activation) self.padding = padding self.strides = tuple(strides) self.data_format = normalize_data_format(data_format) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.use_bias = use_bias self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(CosineConvolution2D, self).__init__(**kwargs)
Example #29
Source File: batch_renorm.py From BatchRenormalization with MIT License | 5 votes |
def __init__(self, axis=-1, momentum=0.99, center=True, scale=True, epsilon=1e-3, r_max_value=3., d_max_value=5., t_delta=1e-3, weights=None, beta_initializer='zero', gamma_initializer='one', moving_mean_initializer='zeros', moving_variance_initializer='ones', gamma_regularizer=None, beta_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): if axis != -1 and K.backend() == 'tensorflow': raise NotImplementedError('There is currently a bug ' 'when using batch renormalisation and ' 'the TensorFlow backend.') warnings.warn('This implementation of BatchRenormalization is inconsistent with the ' 'original paper and therefore results may not be similar ! ' 'For discussion on the inconsistency of this implementation, ' 'refer here : https://github.com/keras-team/keras-contrib/issues/17') self.supports_masking = True self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.momentum = momentum self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights = weights self.r_max_value = r_max_value self.d_max_value = d_max_value self.t_delta = t_delta self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.moving_mean_initializer = initializers.get(moving_mean_initializer) self.moving_variance_initializer = initializers.get( moving_variance_initializer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint) super(BatchRenormalization, self).__init__(**kwargs)
Example #30
Source File: qrnn.py From embedding-as-service with MIT License | 5 votes |
def __init__(self, units, window_size=2, stride=1, return_sequences=False, go_backwards=False, stateful=False, unroll=False, activation='tanh', kernel_initializer='uniform', bias_initializer='zero', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, dropout=0, use_bias=True, input_dim=None, input_length=None, **kwargs): self.return_sequences = return_sequences self.go_backwards = go_backwards self.stateful = stateful self.unroll = unroll self.units = units self.window_size = window_size self.strides = (stride, 1) self.use_bias = use_bias self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.dropout = dropout self.supports_masking = True self.input_spec = [InputSpec(ndim=3)] self.input_dim = input_dim self.input_length = input_length if self.input_dim: kwargs['input_shape'] = (self.input_length, self.input_dim) super(QRNN, self).__init__(**kwargs)