Python tensorflow.keras.initializers.get() Examples
The following are 30
code examples of tensorflow.keras.initializers.get().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.initializers
, or try the search function
.
Example #1
Source File: custom_activation.py From Echo with MIT License | 6 votes |
def call(self, inputs): def brelu(x): # get shape of X, we are interested in the last axis, which is constant shape = K.int_shape(x) # last axis dim = shape[-1] # half of the last axis (+1 if necessary) dim2 = dim // 2 if dim % 2 != 0: dim2 += 1 # multiplier will be a tensor of alternated +1 and -1 multiplier = K.ones((dim2,)) multiplier = K.stack([multiplier, -multiplier], axis=-1) if dim % 2 != 0: multiplier = multiplier[:-1] # adjust multiplier shape to the shape of x multiplier = K.reshape(multiplier, tuple(1 for _ in shape[:-1]) + (-1,)) return multiplier * tf.nn.relu(multiplier * x) return Lambda(brelu)(inputs)
Example #2
Source File: layers.py From neuron with GNU General Public License v3.0 | 6 votes |
def build(self, input_shape): # Create mean and count # These are weights because just maintaining variables don't get saved with the model, and we'd like # to have these numbers saved when we save the model. # But we need to make sure that the weights are untrainable. self.mean = self.add_weight(name='mean', shape=input_shape[1:], initializer='zeros', trainable=False) self.count = self.add_weight(name='count', shape=[1], initializer='zeros', trainable=False) # self.mean = K.zeros(input_shape[1:], name='mean') # self.count = K.variable(0.0, name='count') super(MeanStream, self).build(input_shape) # Be sure to call this somewhere!
Example #3
Source File: graph_conv.py From spektral with MIT License | 6 votes |
def __init__(self, channels, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super().__init__(activity_regularizer=activity_regularizer, **kwargs) self.channels = channels self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.supports_masking = False
Example #4
Source File: base.py From megnet with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, activation: OptStrOrCallable = None, use_bias: bool = True, kernel_initializer: OptStrOrCallable = 'glorot_uniform', bias_initializer: OptStrOrCallable = 'zeros', kernel_regularizer: OptStrOrCallable = None, bias_regularizer: OptStrOrCallable = None, activity_regularizer: OptStrOrCallable = None, kernel_constraint: OptStrOrCallable = None, bias_constraint: OptStrOrCallable = None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) self.activation = activations.get(activation) # noqa self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) super().__init__(**kwargs)
Example #5
Source File: diff_pool.py From spektral with MIT License | 6 votes |
def __init__(self, k, channels=None, return_mask=False, activation=None, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): super().__init__(**kwargs) self.k = k self.channels = channels self.return_mask = return_mask self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint)
Example #6
Source File: condenser.py From TensorNetwork with Apache License 2.0 | 6 votes |
def __init__(self, exp_base: int, num_nodes: int, use_bias: Optional[bool] = True, activation: Optional[Text] = None, kernel_initializer: Optional[Text] = 'glorot_uniform', bias_initializer: Optional[Text] = 'zeros', **kwargs) -> None: if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(DenseCondenser, self).__init__(**kwargs) self.exp_base = exp_base self.num_nodes = num_nodes self.nodes = [] self.use_bias = use_bias self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer)
Example #7
Source File: layers.py From deepchem with MIT License | 6 votes |
def build(self, input_shape): self.W_list = [] self.b_list = [] init = initializers.get(self.init) prev_layer_size = self.n_embedding for i, layer_size in enumerate(self.layer_sizes): self.W_list.append(init([prev_layer_size, layer_size])) self.b_list.append(backend.zeros(shape=[ layer_size, ])) prev_layer_size = layer_size self.W_list.append(init([prev_layer_size, self.n_outputs])) self.b_list.append(backend.zeros(shape=[ self.n_outputs, ])) self.built = True
Example #8
Source File: global_pool.py From spektral with MIT License | 6 votes |
def __init__(self, channels, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super().__init__(**kwargs) self.channels = channels self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint)
Example #9
Source File: dense.py From TensorNetwork with Apache License 2.0 | 6 votes |
def __init__(self, output_dim: int, decomp_size: int, use_bias: Optional[bool] = True, activation: Optional[Text] = None, kernel_initializer: Optional[Text] = 'glorot_uniform', bias_initializer: Optional[Text] = 'zeros', **kwargs) -> None: # Allow specification of input_dim instead of input_shape, # for compatability with Keras layers that support this if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(DenseDecomp, self).__init__(**kwargs) self.output_dim = output_dim self.decomp_size = decomp_size self.use_bias = use_bias self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer)
Example #10
Source File: layers.py From deepchem with MIT License | 6 votes |
def build(self, input_shape): self.W_list = [] self.b_list = [] self.dropouts = [] init = initializers.get(self.init) prev_layer_size = self.n_graph_feat for layer_size in self.layer_sizes: self.W_list.append(init([prev_layer_size, layer_size])) self.b_list.append(backend.zeros(shape=[ layer_size, ])) if self.dropout is not None and self.dropout > 0.0: self.dropouts.append(Dropout(rate=self.dropout)) else: self.dropouts.append(None) prev_layer_size = layer_size self.W_list.append(init([prev_layer_size, self.n_outputs])) self.b_list.append(backend.zeros(shape=[ self.n_outputs, ])) if self.dropout is not None and self.dropout > 0.0: self.dropouts.append(Dropout(rate=self.dropout)) else: self.dropouts.append(None) self.built = True
Example #11
Source File: topk_pool.py From spektral with MIT License | 6 votes |
def __init__(self, ratio, return_mask=False, sigmoid_gating=False, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, **kwargs): super().__init__(**kwargs) self.ratio = ratio self.return_mask = return_mask self.sigmoid_gating = sigmoid_gating self.gating_op = K.sigmoid if self.sigmoid_gating else K.tanh self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint)
Example #12
Source File: layers.py From neuron with GNU General Public License v3.0 | 6 votes |
def call(self, inputx): if not inputx.dtype in [tf.complex64, tf.complex128]: print('Warning: inputx is not complex. Converting.', file=sys.stderr) # if inputx is float, this will assume 0 imag channel inputx = tf.cast(inputx, tf.complex64) # get the right fft if self.ndims == 1: fft = tf.fft elif self.ndims == 2: fft = tf.fft2d else: fft = tf.fft3d perm_dims = [0, self.ndims + 1] + list(range(1, self.ndims + 1)) invert_perm_ndims = [0] + list(range(2, self.ndims + 2)) + [1] perm_inputx = K.permute_dimensions(inputx, perm_dims) # [batch_size, nb_features, *vol_size] fft_inputx = fft(perm_inputx) return K.permute_dimensions(fft_inputx, invert_perm_ndims)
Example #13
Source File: UniRepModel.py From tape-neurips2019 with MIT License | 6 votes |
def convert_sequence_vocab(self, sequence, sequence_lengths): PFAM_TO_UNIREP_ENCODED = {encoding: UNIREP_VOCAB.get(aa, 23) for aa, encoding in PFAM_VOCAB.items()} def to_uniprot_unirep(seq, seqlens): new_seq = np.zeros_like(seq) for pfam_encoding, unirep_encoding in PFAM_TO_UNIREP_ENCODED.items(): new_seq[seq == pfam_encoding] = unirep_encoding # add start/stop new_seq = np.pad(new_seq, [[0, 0], [1, 1]], mode='constant') new_seq[:, 0] = UNIREP_VOCAB['<START>'] new_seq[np.arange(new_seq.shape[0]), seqlens + 1] = UNIREP_VOCAB['<STOP>'] return new_seq new_sequence = tf.py_func(to_uniprot_unirep, [sequence, sequence_lengths], sequence.dtype) new_sequence.set_shape([sequence.shape[0], sequence.shape[1] + 2]) return new_sequence
Example #14
Source File: entangler.py From TensorNetwork with Apache License 2.0 | 5 votes |
def __init__(self, output_dim: int, num_legs: int, num_levels: int, use_bias: Optional[bool] = True, activation: Optional[Text] = None, kernel_initializer: Optional[Text] = 'glorot_uniform', bias_initializer: Optional[Text] = 'zeros', **kwargs) -> None: if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) assert ( num_legs >= 2), f'Need at least 2 legs to create Entangler but got {num_legs} legs' assert ( num_levels >= 1 ), f'Need at least 1 level to create Entangler but got {num_levels} levels' super(DenseEntangler, self).__init__(**kwargs) self.output_dim = output_dim self.num_legs = num_legs self.num_levels = num_levels self.nodes = [] self.use_bias = use_bias self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer)
Example #15
Source File: layers.py From neuron with GNU General Public License v3.0 | 5 votes |
def __init__(self, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(LocallyConnected3D, self).__init__(**kwargs) self.filters = filters self.kernel_size = conv_utils.normalize_tuple( kernel_size, 3, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, 3, 'strides') self.padding = conv_utils.normalize_padding(padding) if self.padding != 'valid': raise ValueError('Invalid border mode for LocallyConnected3D ' '(only "valid" is supported): ' + padding) self.data_format = conv_utils.normalize_data_format(data_format) self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=5)
Example #16
Source File: layers.py From neuron with GNU General Public License v3.0 | 5 votes |
def call(self, x): # get new mean and count this_bs_int = K.shape(x)[0] new_mean, new_count = _mean_update(self.mean, self.count, x, self.cap) # update op updates = [(self.count, new_count), (self.mean, new_mean)] self.add_update(updates, x) # prep for broadcasting :( p = tf.concat((K.reshape(this_bs_int, (1,)), K.shape(self.mean)), 0) z = tf.ones(p) # the first few 1000 should not matter that much towards this cost return K.minimum(1., new_count/self.cap) * (z * K.expand_dims(new_mean, 0))
Example #17
Source File: qlayers.py From qkeras with Apache License 2.0 | 5 votes |
def get_auto_range_constraint_initializer(quantizer, constraint, initializer): """Get value range automatically for quantizer. Arguments: quantizer: A quantizer class in quantizers.py. constraint: A tf.keras constraint. initializer: A tf.keras initializer. Returns: a tuple (constraint, initializer), where constraint is clipped by Clip class in this file, based on the value range of quantizer. initializer is initializer contraint by value range of quantizer. """ if quantizer is not None: # let's use now symmetric clipping function max_value = max(1, quantizer.max()) if hasattr(quantizer, "max") else 1.0 min_value = quantizer.min() if hasattr(quantizer, "min") else -1.0 if constraint: constraint = constraints.get(constraint) constraint = Clip(-max_value, max_value, constraint, quantizer) initializer = initializers.get(initializer) if initializer and initializer.__class__.__name__ not in ["Ones", "Zeros"]: # we want to get the max value of the quantizer that depends # on the distribution and scale if not (hasattr(quantizer, "alpha") and isinstance(quantizer.alpha, six.string_types)): initializer = QInitializer( initializer, use_scale=True, quantizer=quantizer) return constraint, initializer
Example #18
Source File: qlayers.py From qkeras with Apache License 2.0 | 5 votes |
def get_config(self): return { "initializer": self.initializer, "use_scale": self.use_scale, "quantizer": self.quantizer, } # # Because it may be hard to get serialization from activation functions, # we may be replacing their instantiation by QActivation in the future. #
Example #19
Source File: qlayers.py From qkeras with Apache License 2.0 | 5 votes |
def __init__(self, min_value=0.0, max_value=1.0, constraint=None, quantizer=None): """Initializes Clip constraint class.""" self.min_value = min_value self.max_value = max_value self.constraint = constraints.get(constraint) # Don't wrap yourself if isinstance(self.constraint, Clip): self.constraint = None self.quantizer = get_quantizer(quantizer)
Example #20
Source File: layers.py From deepchem with MIT License | 5 votes |
def build(self, input_shape): """Constructs learnable weights for this layer.""" init = initializers.get(self.init) inner_init = initializers.get(self.inner_init) self.W = init((self.input_dim, 4 * self.output_dim)) self.U = inner_init((self.output_dim, 4 * self.output_dim)) self.b = tf.Variable( np.hstack((np.zeros(self.output_dim), np.ones(self.output_dim), np.zeros(self.output_dim), np.zeros(self.output_dim))), dtype=tf.float32) self.built = True
Example #21
Source File: mpo.py From TensorNetwork with Apache License 2.0 | 5 votes |
def __init__(self, output_dim: int, num_nodes: int, bond_dim: int, use_bias: Optional[bool] = True, activation: Optional[Text] = None, kernel_initializer: Optional[Text] = 'glorot_uniform', bias_initializer: Optional[Text] = 'zeros', **kwargs) -> None: # Allow specification of input_dim instead of input_shape, # for compatability with Keras layers that support this if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) assert num_nodes > 2, 'Need at least 3 nodes to create MPO.' super(DenseMPO, self).__init__(**kwargs) self.output_dim = output_dim self.num_nodes = num_nodes self.bond_dim = bond_dim self.nodes = [] self.use_bias = use_bias self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer)
Example #22
Source File: group_norm.py From 3d-brain-tumor-segmentation with Apache License 2.0 | 5 votes |
def __init__(self, groups=8, axis=-1, epsilon=1e-5, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): """ Initializes one group normalization layer. References: - [Group Normalization](https://arxiv.org/abs/1803.08494) """ super(GroupNormalization, self).__init__(**kwargs) self.supports_masking = True self.groups = groups self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint)
Example #23
Source File: set2set.py From megnet with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, T=3, n_hidden=512, activation=None, activation_lstm='tanh', recurrent_activation='hard_sigmoid', kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', use_bias=True, unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, **kwargs): super().__init__(**kwargs) self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.activation_lstm = activations.get(activation_lstm) self.recurrent_activation = activations.get(recurrent_activation) self.recurrent_initializer = initializers.get(recurrent_initializer) self.unit_forget_bias = unit_forget_bias self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.recurrent_constraint = constraints.get(recurrent_constraint) self.T = T self.n_hidden = n_hidden
Example #24
Source File: FRN.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def __init__(self, epsilon=1e-6, beta_initializer='zeros', gamma_initializer='ones', tau_initializers='zeros', beta_regularizer=None, gamma_regularizer=None, tau_regularizer=None, beta_constraint=None, gamma_constraint=None, tau_constraint=None, **kwargs): super(FRN, self).__init__(**kwargs) self.supports_masking = True self.epsilon = epsilon self.beta_initializer = initializers.get(beta_initializer) self.tau_initializer = initializers.get(tau_initializers) self.gamma_initializer = initializers.get(gamma_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.tau_regularizer = regularizers.get(tau_regularizer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint) self.tau_constraint = constraints.get(tau_constraint) self.tau = None self.gamma = None self.beta = None self.axis = -1
Example #25
Source File: Refinenet.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def __init__(self, weights=None, axis=-1, momentum = 0.9, beta_init='zero', gamma_init='one', **kwargs): self.momentum = momentum self.axis = axis self.beta_init = initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init) self.initial_weights = weights super(Scale, self).__init__(**kwargs)
Example #26
Source File: custom_activation.py From Echo with MIT License | 5 votes |
def __init__( self, alpha_initializer="zeros", b_initializer="zeros", S=1, alpha_regularizer=None, b_regularizer=None, alpha_constraint=None, b_constraint=None, shared_axes=None, **kwargs ): super(APL, self).__init__(**kwargs) self.supports_masking = True self.alpha_initializer = initializers.get(alpha_initializer) self.alpha_regularizer = regularizers.get(alpha_regularizer) self.alpha_constraint = constraints.get(alpha_constraint) self.b_initializer = initializers.get(b_initializer) self.b_regularizer = regularizers.get(b_regularizer) self.b_constraint = constraints.get(b_constraint) if shared_axes is None: self.shared_axes = None elif not isinstance(shared_axes, (list, tuple)): self.shared_axes = [shared_axes] else: self.shared_axes = list(shared_axes) self.S = S self.alpha_arr = [] self.b_arr = []
Example #27
Source File: layers.py From deepchem with MIT License | 5 votes |
def __init__(self, n_graph_feat=30, n_outputs=30, max_atoms=50, layer_sizes=[100], init='glorot_uniform', activation='relu', dropout=None, **kwargs): """DAG vector gathering layer Parameters ---------- n_graph_feat: int, optional Number of features for each atom. n_outputs: int, optional Number of features for each molecule. max_atoms: int, optional Maximum number of atoms in molecules. layer_sizes: list of int, optional List of hidden layer size(s): length of this list represents the number of hidden layers, and each element is the width of corresponding hidden layer. init: str, optional Weight initialization for filters. activation: str, optional Activation function applied. dropout: float, optional Dropout probability in the hidden layer(s). """ super(DAGGather, self).__init__(**kwargs) self.init = init # Set weight initialization self.activation = activation # Get activations self.activation_fn = activations.get(activation) self.layer_sizes = layer_sizes self.dropout = dropout self.max_atoms = max_atoms self.n_graph_feat = n_graph_feat self.n_outputs = n_outputs
Example #28
Source File: layers.py From deepchem with MIT License | 5 votes |
def __init__(self, output_dim, input_dim, init_fn='glorot_uniform', inner_init_fn='orthogonal', activation_fn='tanh', inner_activation_fn='hard_sigmoid', **kwargs): """ Parameters ---------- output_dim: int Dimensionality of output vectors. input_dim: int Dimensionality of input vectors. init_fn: str TensorFlow nitialization to use for W. inner_init_fn: str TensorFlow initialization to use for U. activation_fn: str TensorFlow activation to use for output. inner_activation_fn: str TensorFlow activation to use for inner steps. """ super(LSTMStep, self).__init__(**kwargs) self.init = init_fn self.inner_init = inner_init_fn self.output_dim = output_dim # No other forget biases supported right now. self.activation = activation_fn self.inner_activation = inner_activation_fn self.activation_fn = activations.get(activation_fn) self.inner_activation_fn = activations.get(inner_activation_fn) self.input_dim = input_dim
Example #29
Source File: layers.py From deepchem with MIT License | 5 votes |
def build(self, input_shape): """ Construct internal trainable weights.""" init = initializers.get(self.init) # Set weight initialization self.W_AA = init([self.n_atom_input_feat, self.n_hidden_AA]) self.b_AA = backend.zeros(shape=[ self.n_hidden_AA, ]) self.W_PA = init([self.n_pair_input_feat, self.n_hidden_PA]) self.b_PA = backend.zeros(shape=[ self.n_hidden_PA, ]) self.W_A = init([self.n_hidden_A, self.n_atom_output_feat]) self.b_A = backend.zeros(shape=[ self.n_atom_output_feat, ]) if self.update_pair: self.W_AP = init([self.n_atom_input_feat * 2, self.n_hidden_AP]) self.b_AP = backend.zeros(shape=[ self.n_hidden_AP, ]) self.W_PP = init([self.n_pair_input_feat, self.n_hidden_PP]) self.b_PP = backend.zeros(shape=[ self.n_hidden_PP, ]) self.W_P = init([self.n_hidden_P, self.n_pair_output_feat]) self.b_P = backend.zeros(shape=[ self.n_pair_output_feat, ]) self.built = True
Example #30
Source File: layers.py From deepchem with MIT License | 5 votes |
def __init__(self, batch_size, n_input=128, gaussian_expand=False, init='glorot_uniform', activation='tanh', epsilon=1e-3, momentum=0.99, **kwargs): """ Parameters ---------- batch_size: int number of molecules in a batch n_input: int, optional number of features for each input molecule gaussian_expand: boolean. optional Whether to expand each dimension of atomic features by gaussian histogram init: str, optional Weight initialization for filters. activation: str, optional Activation function applied """ super(WeaveGather, self).__init__(**kwargs) self.n_input = n_input self.batch_size = batch_size self.gaussian_expand = gaussian_expand self.init = init # Set weight initialization self.activation = activation # Get activations self.activation_fn = activations.get(activation) self.epsilon = epsilon self.momentum = momentum