Python tensorflow.python.keras.initializers.get() Examples
The following are 19
code examples of tensorflow.python.keras.initializers.get().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.keras.initializers
, or try the search function
.

Example #1
Source Project: AliNet Author: nju-websoft File: alinet_layer.py License: MIT License | 6 votes |
def __init__(self, input_dim, output_dim, dropout_rate=0.0, activation='tanh', kernel_initializer='glorot_uniform', bias_initializer='zeros'): super(HighwayLayer, self).__init__() self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.dropout_rate = dropout_rate self.shape = (input_dim, output_dim) self.input_dim = input_dim self.output_dim = output_dim self.kernel = None self.bias = None
Example #2
Source Project: Time-series-prediction Author: LongxingTan File: wavenet_layer.py License: MIT License | 6 votes |
def __init__(self,units, activation=None, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, use_bias=False, bias_initializer="zeros", trainable=True, name=None): super(Dense3D,self).__init__(trainable=trainable,name=name) self.units = units self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.use_bias=use_bias self.bias_initializer = bias_initializer
Example #3
Source Project: Time-series-prediction Author: LongxingTan File: wavenet_layer.py License: MIT License | 6 votes |
def __init__(self,filters, kernel_size, strides=1, padding='valid', dilation_rate=1, activation=None, causal=True, kernel_initializer='glorot_uniform', trainable=True, name=None): super(ConvTime,self).__init__(trainable=trainable,name=name) self.filters=filters self.kernel_size=kernel_size self.strides=strides self.padding=padding self.dilation_rate=dilation_rate self.activation=activations.get(activation) self.causal=causal self.kernel_initializer=initializers.get(kernel_initializer)
Example #4
Source Project: tf-encrypted Author: tf-encrypted File: dense.py License: Apache License 2.0 | 5 votes |
def __init__( self, units, activation=None, use_bias=True, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs, ): super(Dense, self).__init__(**kwargs) self.units = int(units) self.activation_identifier = activation self.activation = activations.get(self.activation_identifier) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) # Not implemented arguments default_args_check(kernel_regularizer, "kernel_regularizer", "Dense") default_args_check(bias_regularizer, "bias_regularizer", "Dense") default_args_check(activity_regularizer, "activity_regularizer", "Dense") default_args_check(kernel_constraint, "kernel_constraint", "Dense") default_args_check(bias_constraint, "bias_constraint", "Dense")
Example #5
Source Project: online-normalization Author: Cerebras File: online_norm.py License: BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, alpha_fwd=0.999, alpha_bkw=0.99, axis=1, epsilon=1e-5, stream_mu_initializer='zeros', stream_var_initializer='ones', u_ctrl_initializer='zeros', v_ctrl_initializer='zeros', trainable=True, name=None, **kwargs): super(Norm, self).__init__(trainable=trainable, name=name, **kwargs) # setup mixed precesion self.dtype_policy = self._mixed_precision_policy \ if self._mixed_precision_policy.name == "infer_float32_vars" \ else self._dtype if isinstance(self.dtype_policy, Policy): self.mixed_precision = True self.fp_type = tf.float32 # full precision self.mp_type = tf.float16 # reduced precision else: self.mixed_precision = False self.fp_type = self._dtype if self._dtype else tf.float32 # full precision self.mp_type = self.fp_type # reduced precision assert axis == 1, 'kernel requires channels_first data_format' self.axis = axis self.norm_ax = None self.epsilon = epsilon self.alpha_fwd = alpha_fwd self.alpha_bkw = alpha_bkw self.stream_mu_initializer = initializers.get(stream_mu_initializer) self.stream_var_initializer = initializers.get(stream_var_initializer) self.u_ctrl_initializer = initializers.get(u_ctrl_initializer) self.v_ctrl_initializer = initializers.get(v_ctrl_initializer)
Example #6
Source Project: AliNet Author: nju-websoft File: alinet_layer.py License: MIT License | 5 votes |
def __init__(self, shape, kernel_initializer='glorot_uniform'): super(InputLayer, self).__init__() self.kernel_initializer = initializers.get(kernel_initializer) self.init_embeds = self.add_weight('embedding', shape=shape, dtype='float32', trainable=True)
Example #7
Source Project: AliNet Author: nju-websoft File: layers.py License: MIT License | 5 votes |
def __init__(self, input_dim, output_dim, adj, num_features_nonzero, dropout_rate=0.0, is_sparse_inputs=False, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer='l2', bias_regularizer='l2', activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(GraphConvolution, self).__init__() self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.kernels = list() self.bias = None self.input_dim = input_dim self.output_dim = output_dim self.is_sparse_inputs = is_sparse_inputs self.num_features_nonzero = num_features_nonzero self.adjs = [tf.SparseTensor(indices=am[0], values=am[1], dense_shape=am[2]) for am in adj] self.dropout_rate = dropout_rate
Example #8
Source Project: AliNet Author: nju-websoft File: layers.py License: MIT License | 5 votes |
def __init__(self, shape, kernel_initializer='glorot_uniform'): super(InputLayer, self).__init__() self.kernel_initializer = initializers.get(kernel_initializer) self.init_embeds = self.add_weight('embedding', shape=shape, dtype='float32', trainable=True) # self.init_embeds = tf.nn.l2_normalize(self.init_embeds, 1)
Example #9
Source Project: AliNet Author: nju-websoft File: layers.py License: MIT License | 5 votes |
def __init__(self, input_dim, output_dim, adj, num_features_nonzero, dropout_rate=0.0, num_base=-1, is_sparse_inputs=False, featureless=False, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer="l2", bias_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(RGraphConvolutionLayer, self).__init__() self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.bias = None self.input_dim = input_dim self.output_dim = output_dim self.is_sparse_inputs = is_sparse_inputs self.featureless = featureless self.num_features_nonzero = num_features_nonzero self.support = len(adj) self.adj_list = [tf.SparseTensor(indices=adj[i][0], values=adj[i][1], dense_shape=adj[i][2]) for i in range(len(adj))] self.dropout_rate = dropout_rate self.num_bases = num_base self.W = list()
Example #10
Source Project: pinn Author: PML-UCF File: physics.py License: MIT License | 5 votes |
def __init__(self, kernel_initializer = 'glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(StressIntensityRange, self).__init__(**kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint)
Example #11
Source Project: pinn Author: PML-UCF File: physics.py License: MIT License | 5 votes |
def __init__(self, kernel_initializer = 'glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(ParisLaw, self).__init__(**kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint)
Example #12
Source Project: pinn Author: PML-UCF File: physics.py License: MIT License | 5 votes |
def __init__(self, kernel_initializer = 'glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(SNCurve, self).__init__(**kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint)
Example #13
Source Project: pinn Author: PML-UCF File: physics.py License: MIT License | 5 votes |
def __init__(self, kernel_initializer = 'glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(WalkerModel, self).__init__(**kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint)
Example #14
Source Project: pinn Author: PML-UCF File: core.py License: MIT License | 5 votes |
def __init__(self, kernel_initializer = 'glorot_uniform', kernel_regularizer=None, kernel_constraint=None, table_shape=(1,4,4,1), **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(TableInterpolation, self).__init__(**kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.table_shape = table_shape
Example #15
Source Project: nlp-architect Author: NervanaSystems File: temporal_convolutional_network.py License: Apache License 2.0 | 5 votes |
def build(self, input_shape): """Build `Layer`""" input_shape = tensor_shape.TensorShape(input_shape).as_list() self.input_spec = InputSpec(shape=input_shape) if not self.layer.built: self.layer.build(input_shape) self.layer.built = False if not hasattr(self.layer, "kernel"): raise ValueError( "`WeightNorm` must wrap a layer that" " contains a `kernel` for weights" ) # The kernel's filter or unit dimension is -1 self.layer_depth = int(self.layer.kernel.shape[-1]) self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1)) self.layer.v = self.layer.kernel self.layer.g = self.layer.add_variable( name="g", shape=(self.layer_depth,), initializer=initializers.get("ones"), dtype=self.layer.kernel.dtype, trainable=True, ) with ops.control_dependencies([self.layer.g.assign(self._init_norm(self.layer.v))]): self._compute_weights() self.layer.built = True super(WeightNorm, self).build() self.built = True # pylint: disable=arguments-differ
Example #16
Source Project: tf-encrypted Author: tf-encrypted File: convolutional.py License: Apache License 2.0 | 4 votes |
def __init__( self, filters, kernel_size, strides=(1, 1), padding="valid", data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs, ): super(Conv2D, self).__init__(**kwargs) self.rank = 2 self.filters = filters self.kernel_size = conv_utils.normalize_tuple( kernel_size, self.rank, "kernel_size" ) if self.kernel_size[0] != self.kernel_size[1]: raise NotImplementedError( "TF Encrypted currently only supports same " "stride along the height and the width." "You gave: {}".format(self.kernel_size) ) self.strides = conv_utils.normalize_tuple(strides, self.rank, "strides") self.padding = conv_utils.normalize_padding(padding).upper() self.data_format = conv_utils.normalize_data_format(data_format) if activation is not None: logger.info( "Performing an activation before a pooling layer can result " "in unnecessary performance loss. Check model definition in " "case of missed optimization." ) self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) # Not implemented arguments default_args_check(dilation_rate, "dilation_rate", "Conv2D") default_args_check(kernel_regularizer, "kernel_regularizer", "Conv2D") default_args_check(bias_regularizer, "bias_regularizer", "Conv2D") default_args_check(activity_regularizer, "activity_regularizer", "Conv2D") default_args_check(kernel_constraint, "kernel_constraint", "Conv2D") default_args_check(bias_constraint, "bias_constraint", "Conv2D")
Example #17
Source Project: AliNet Author: nju-websoft File: alinet_layer.py License: MIT License | 4 votes |
def __init__(self, input_dim, output_dim, adj, nodes_num, num_features_nonzero, alpha=0.0, dropout_rate=0.0, is_sparse_inputs=False, featureless=False, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer='l2', bias_regularizer='l2', activity_regularizer=None, kernel_constraint=None, bias_constraint=None, coef_dropout=0, **kwargs): super(AliNetGraphAttentionLayer, self).__init__() self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_dim = input_dim self.output_dim = output_dim self.is_sparse_inputs = is_sparse_inputs self.featureless = featureless self.num_features_nonzero = num_features_nonzero self.adjs = [tf.SparseTensor(indices=adj[0][0], values=adj[0][1], dense_shape=adj[0][2])] self.dropout_rate = dropout_rate self.coef_drop = coef_dropout self.nodes_num = nodes_num self.alpha = alpha self.kernel, self.kernel1, self.kernel2 = None, None, None self.mapping = None self.bias = None
Example #18
Source Project: rgat Author: babylonhealth File: relational_graph_convolution.py License: Apache License 2.0 | 4 votes |
def __init__(self, units, relations, kernel_basis_size=None, activation=None, use_bias=False, batch_normalisation=False, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, feature_dropout=None, support_dropout=None, name='relational_graph_conv', **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(RelationalGraphConv, self).__init__( activity_regularizer=regularizers.get(activity_regularizer), name=name, **kwargs) self.units = int(units) self.relations = int(relations) self.kernel_basis_size = (int(kernel_basis_size) if kernel_basis_size is not None else None) self.activation = activations.get(activation) self.use_bias = use_bias self.batch_normalisation = batch_normalisation self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.feature_dropout = feature_dropout self.support_dropout = support_dropout self.supports_masking = True self.input_spec = InputSpec(min_ndim=2) self.dense_layer = rgat_layers.BasisDecompositionDense( units=self.units * self.relations, basis_size=self.kernel_basis_size, coefficients_size=self.relations, use_bias=False, kernel_initializer=self.kernel_initializer, kernel_regularizer=self.kernel_regularizer, kernel_constraint=self.kernel_constraint, name=name + '_basis_decomposition_dense', **kwargs) if self.batch_normalisation: self.batch_normalisation_layer = tf.layers.BatchNormalization()
Example #19
Source Project: nlp-architect Author: NervanaSystems File: temporal_convolutional_network.py License: Apache License 2.0 | 4 votes |
def build_network_graph(self, x, last_timepoint=False): """ Given the input placeholder x, build the entire TCN graph Args: x: Input placeholder last_timepoint: Whether or not to select only the last timepoint to output Returns: output of the TCN """ # loop and define multiple residual blocks with tf.variable_scope("tcn"): for i in range(self.n_hidden_layers): dilation_size = 2 ** i in_channels = self.n_features_in if i == 0 else self.hidden_sizes[i - 1] out_channels = self.hidden_sizes[i] with tf.variable_scope("residual_block_" + str(i)): x = self._residual_block( x, in_channels, out_channels, dilation_size, (self.kernel_size - 1) * dilation_size, ) x = tf.nn.relu(x) self.layer_activations.append(x) self.sequence_output = x # get outputs if not last_timepoint: prediction = self.sequence_output else: # last time point size (batch_size, hidden_sizes_encoder) width = self.sequence_output.shape[1].value lt = tf.squeeze( tf.slice(self.sequence_output, [0, width - 1, 0], [-1, 1, -1]), axis=1 ) prediction = tf.layers.Dense( 1, kernel_initializer=tf.initializers.random_normal(0, 0.01), bias_initializer=tf.initializers.random_normal(0, 0.01), )(lt) return prediction