Python keras.initializers.Ones() Examples
The following are 7
code examples of keras.initializers.Ones().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.initializers
, or try the search function
.
Example #1
Source File: models.py From Hands-On-Generative-Adversarial-Networks-with-Keras with MIT License | 6 votes |
def __init__(self, output_dim, **kwargs): self.output_dim = output_dim super(ScaleLayer, self).__init__(**kwargs) def build(self, input_shape): # Create a trainable weight variable # for this layer. self.weights = self.add_weight( name='weights', shape=(input_shape[1], self.output_dim), initializer=Ones(), trainable=True) super(ScaleLayer, self).build(input_shape) # Be sure to call this at the end def call(self, x): return x * self.weights def compute_output_shape(self, input_shape): return (input_shape[0], self.output_dim)
Example #2
Source File: layer_normalization.py From keras-utility-layer-collection with MIT License | 5 votes |
def build(self, input_shape): self._g = self.add_weight( name='gain', shape=(input_shape[-1],), initializer=Ones(), trainable=True ) self._b = self.add_weight( name='bias', shape=(input_shape[-1],), initializer=Zeros(), trainable=True )
Example #3
Source File: layers.py From BERT with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:], initializer=Ones(), trainable=True) self.beta = self.add_weight(name='beta', shape=input_shape[-1:], initializer=Zeros(), trainable=True) super().build(input_shape)
Example #4
Source File: layers.py From BERT-keras with GNU General Public License v3.0 | 5 votes |
def build(self, input_shape): self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:], initializer=Ones(), trainable=True) self.beta = self.add_weight(name='beta', shape=input_shape[-1:], initializer=Zeros(), trainable=True) super().build(input_shape)
Example #5
Source File: core.py From transformer-keras with Apache License 2.0 | 5 votes |
def build(self, input_shape): self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:], initializer=Ones(), trainable=True) self.beta = self.add_weight(name='beta', shape=input_shape[-1:], initializer=Zeros(), trainable=True) super(LayerNormalization, self).build(input_shape)
Example #6
Source File: nested_lstm.py From Nested-LSTM with MIT License | 4 votes |
def build(self, input_shape): input_dim = input_shape[-1] self.kernels = [] self.biases = [] for i in range(self.depth): if i == 0: input_kernel = self.add_weight(shape=(input_dim, self.units * 4), name='input_kernel_%d' % (i + 1), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) hidden_kernel = self.add_weight(shape=(self.units, self.units * 4), name='kernel_%d' % (i + 1), initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) kernel = K.concatenate([input_kernel, hidden_kernel], axis=0) else: kernel = self.add_weight(shape=(self.units * 2, self.units * 4), name='kernel_%d' % (i + 1), initializer=self.recurrent_initializer, regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) self.kernels.append(kernel) if self.use_bias: if self.unit_forget_bias: def bias_initializer(_, *args, **kwargs): return K.concatenate([ self.bias_initializer((self.units,), *args, **kwargs), initializers.Ones()((self.units,), *args, **kwargs), self.bias_initializer((self.units * 2,), *args, **kwargs), ]) else: bias_initializer = self.bias_initializer for i in range(self.depth): bias = self.add_weight(shape=(self.units * 4,), name='bias_%d' % (i + 1), initializer=bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) self.biases.append(bias) else: self.biases = None self.built = True
Example #7
Source File: graph_emb.py From ccg2lambda with Apache License 2.0 | 4 votes |
def tp1_node_update(graph_node_embs, node_rel, node_rel_weight, max_nodes, max_bi_relations, embed_dim, label): """ graph_node_embs has shape (batch_size, max_nodes per graph, embed_dim feats). """ dense_dim = embed_dim x = gather_layer([graph_node_embs, node_rel]) logging.debug('After gather3 shape: {0}'.format(x.shape)) x = Reshape((max_nodes * max_bi_relations, 2 * embed_dim))(x) x = TimeDistributed( Dense( dense_dim, kernel_initializer=initializers.Ones(), bias_initializer=initializers.Zeros(), name=label + '_dense1'))(x) # TODO: re-enable the batch normalization. # x = BatchNormalization(axis=2, name=label + '_bn1')(x) x = Activation('relu')(x) x = TimeDistributed( Dense( dense_dim, kernel_initializer=initializers.Ones(), bias_initializer=initializers.Zeros(), name=label + '_dense2'))(x) # x = BatchNormalization(axis=2, name=label + '_bn2')(x) x = Activation('relu')(x) normalizer = Reshape((max_nodes * max_bi_relations,))(node_rel_weight) normalizer = RepeatVector(dense_dim)(normalizer) normalizer = Permute((2, 1))(normalizer) x = Multiply()([x, normalizer]) x = Reshape((max_nodes, max_bi_relations, dense_dim))(x) x = Lambda( lambda xin: K.sum(xin, axis=2), output_shape=(None, max_nodes * max_bi_relations, dense_dim), name=label + '_integrate')(x) return x # TODO: Dense use_bias=True