Python tensorflow.keras.activations.get() Examples

The following are 30 code examples of tensorflow.keras.activations.get(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.activations , or try the search function .
Example #1
Source File: activations.py    From megnet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get(identifier: OptStrOrCallable = None) -> Callable[..., Any]:
    """
    Get activations by identifier

    Args:
        identifier (str or callable): the identifier of activations

    Returns:
        callable activation

    """
    try:
        return keras_get(identifier)
    except ValueError:
        if isinstance(identifier, str):
            return deserialize(identifier, custom_objects=globals())
        else:
            raise ValueError('Could not interpret:',  identifier) 
Example #2
Source File: condenser.py    From TensorNetwork with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               exp_base: int,
               num_nodes: int,
               use_bias: Optional[bool] = True,
               activation: Optional[Text] = None,
               kernel_initializer: Optional[Text] = 'glorot_uniform',
               bias_initializer: Optional[Text] = 'zeros',
               **kwargs) -> None:

    if 'input_shape' not in kwargs and 'input_dim' in kwargs:
      kwargs['input_shape'] = (kwargs.pop('input_dim'),)

    super(DenseCondenser, self).__init__(**kwargs)

    self.exp_base = exp_base
    self.num_nodes = num_nodes
    self.nodes = []
    self.use_bias = use_bias
    self.activation = activations.get(activation)
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.bias_initializer = initializers.get(bias_initializer) 
Example #3
Source File: diff_pool.py    From spektral with MIT License 6 votes vote down vote up
def __init__(self,
                 k,
                 channels=None,
                 return_mask=False,
                 activation=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):

        super().__init__(**kwargs)
        self.k = k
        self.channels = channels
        self.return_mask = return_mask
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint) 
Example #4
Source File: dense.py    From TensorNetwork with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               output_dim: int,
               decomp_size: int,
               use_bias: Optional[bool] = True,
               activation: Optional[Text] = None,
               kernel_initializer: Optional[Text] = 'glorot_uniform',
               bias_initializer: Optional[Text] = 'zeros',
               **kwargs) -> None:

    # Allow specification of input_dim instead of input_shape,
    # for compatability with Keras layers that support this
    if 'input_shape' not in kwargs and 'input_dim' in kwargs:
      kwargs['input_shape'] = (kwargs.pop('input_dim'),)

    super(DenseDecomp, self).__init__(**kwargs)

    self.output_dim = output_dim
    self.decomp_size = decomp_size

    self.use_bias = use_bias
    self.activation = activations.get(activation)
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.bias_initializer = initializers.get(bias_initializer) 
Example #5
Source File: UniRepModel.py    From tape-neurips2019 with MIT License 6 votes vote down vote up
def convert_sequence_vocab(self, sequence, sequence_lengths):
        PFAM_TO_UNIREP_ENCODED = {encoding: UNIREP_VOCAB.get(aa, 23) for aa, encoding in PFAM_VOCAB.items()}

        def to_uniprot_unirep(seq, seqlens):
            new_seq = np.zeros_like(seq)

            for pfam_encoding, unirep_encoding in PFAM_TO_UNIREP_ENCODED.items():
                new_seq[seq == pfam_encoding] = unirep_encoding

            # add start/stop
            new_seq = np.pad(new_seq, [[0, 0], [1, 1]], mode='constant')
            new_seq[:, 0] = UNIREP_VOCAB['<START>']
            new_seq[np.arange(new_seq.shape[0]), seqlens + 1] = UNIREP_VOCAB['<STOP>']

            return new_seq

        new_sequence = tf.py_func(to_uniprot_unirep, [sequence, sequence_lengths], sequence.dtype)
        new_sequence.set_shape([sequence.shape[0], sequence.shape[1] + 2])

        return new_sequence 
Example #6
Source File: layers.py    From deepchem with MIT License 6 votes vote down vote up
def build(self, input_shape):
    self.W_list = []
    self.b_list = []
    init = initializers.get(self.init)
    prev_layer_size = self.n_embedding
    for i, layer_size in enumerate(self.layer_sizes):
      self.W_list.append(init([prev_layer_size, layer_size]))
      self.b_list.append(backend.zeros(shape=[
          layer_size,
      ]))
      prev_layer_size = layer_size
    self.W_list.append(init([prev_layer_size, self.n_outputs]))
    self.b_list.append(backend.zeros(shape=[
        self.n_outputs,
    ]))
    self.built = True 
Example #7
Source File: layers.py    From deepchem with MIT License 6 votes vote down vote up
def build(self, input_shape):
    self.W_list = []
    self.b_list = []
    self.dropouts = []
    init = initializers.get(self.init)
    prev_layer_size = self.n_graph_feat
    for layer_size in self.layer_sizes:
      self.W_list.append(init([prev_layer_size, layer_size]))
      self.b_list.append(backend.zeros(shape=[
          layer_size,
      ]))
      if self.dropout is not None and self.dropout > 0.0:
        self.dropouts.append(Dropout(rate=self.dropout))
      else:
        self.dropouts.append(None)
      prev_layer_size = layer_size
    self.W_list.append(init([prev_layer_size, self.n_outputs]))
    self.b_list.append(backend.zeros(shape=[
        self.n_outputs,
    ]))
    if self.dropout is not None and self.dropout > 0.0:
      self.dropouts.append(Dropout(rate=self.dropout))
    else:
      self.dropouts.append(None)
    self.built = True 
Example #8
Source File: gin_conv.py    From spektral with MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 epsilon=None,
                 mlp_hidden=None,
                 mlp_activation='relu',
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super().__init__(aggregate='sum',
                         activation=activation,
                         use_bias=use_bias,
                         kernel_initializer=kernel_initializer,
                         bias_initializer=bias_initializer,
                         kernel_regularizer=kernel_regularizer,
                         bias_regularizer=bias_regularizer,
                         activity_regularizer=activity_regularizer,
                         kernel_constraint=kernel_constraint,
                         bias_constraint=bias_constraint,
                         **kwargs)
        self.channels = self.output_dim = channels
        self.epsilon = epsilon
        self.mlp_hidden = mlp_hidden if mlp_hidden else []
        self.mlp_activation = activations.get(mlp_activation) 
Example #9
Source File: base.py    From spektral with MIT License 5 votes vote down vote up
def __init__(self,
                 input_dim_1=None,
                 activation=None,
                 **kwargs):

        super(MinkowskiProduct, self).__init__(**kwargs)
        self.input_dim_1 = input_dim_1
        self.activation = activations.get(activation) 
Example #10
Source File: mincut_pool.py    From spektral with MIT License 5 votes vote down vote up
def __init__(self,
                 k,
                 mlp_hidden=None,
                 mlp_activation='relu',
                 return_mask=False,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super().__init__(**kwargs)
        self.k = k
        self.mlp_hidden = mlp_hidden if mlp_hidden else []
        self.mlp_activation = mlp_activation
        self.return_mask = return_mask
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint) 
Example #11
Source File: edge_conv.py    From spektral with MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 mlp_hidden=None,
                 mlp_activation='relu',
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super().__init__(aggregate='sum',
                         activation=activation,
                         use_bias=use_bias,
                         kernel_initializer=kernel_initializer,
                         bias_initializer=bias_initializer,
                         kernel_regularizer=kernel_regularizer,
                         bias_regularizer=bias_regularizer,
                         activity_regularizer=activity_regularizer,
                         kernel_constraint=kernel_constraint,
                         bias_constraint=bias_constraint,
                         **kwargs)
        self.channels = self.output_dim = channels
        self.mlp_hidden = mlp_hidden if mlp_hidden else []
        self.mlp_activation = activations.get(mlp_activation) 
Example #12
Source File: arma_conv.py    From spektral with MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 order=1,
                 iterations=1,
                 share_weights=False,
                 gcn_activation='relu',
                 dropout_rate=0.0,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super().__init__(channels,
                         activation=activation,
                         use_bias=use_bias,
                         kernel_initializer=kernel_initializer,
                         bias_initializer=bias_initializer,
                         kernel_regularizer=kernel_regularizer,
                         bias_regularizer=bias_regularizer,
                         activity_regularizer=activity_regularizer,
                         kernel_constraint=kernel_constraint,
                         bias_constraint=bias_constraint,
                         **kwargs)
        self.iterations = iterations
        self.order = order
        self.share_weights = share_weights
        self.gcn_activation = activations.get(gcn_activation)
        self.dropout_rate = dropout_rate 
Example #13
Source File: appnp.py    From spektral with MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 alpha=0.2,
                 propagations=1,
                 mlp_hidden=None,
                 mlp_activation='relu',
                 dropout_rate=0.0,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super().__init__(channels,
                         activation=activation,
                         use_bias=use_bias,
                         kernel_initializer=kernel_initializer,
                         bias_initializer=bias_initializer,
                         kernel_regularizer=kernel_regularizer,
                         bias_regularizer=bias_regularizer,
                         activity_regularizer=activity_regularizer,
                         kernel_constraint=kernel_constraint,
                         bias_constraint=bias_constraint,
                         **kwargs)
        self.mlp_hidden = mlp_hidden if mlp_hidden else []
        self.alpha = alpha
        self.propagations = propagations
        self.mlp_activation = activations.get(mlp_activation)
        self.dropout_rate = dropout_rate 
Example #14
Source File: base.py    From spektral with MIT License 5 votes vote down vote up
def __init__(self,
                 trainable_kernel=False,
                 activation=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):

        super().__init__(**kwargs)
        self.trainable_kernel = trainable_kernel
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint) 
Example #15
Source File: entangler.py    From TensorNetwork with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               output_dim: int,
               num_legs: int,
               num_levels: int,
               use_bias: Optional[bool] = True,
               activation: Optional[Text] = None,
               kernel_initializer: Optional[Text] = 'glorot_uniform',
               bias_initializer: Optional[Text] = 'zeros',
               **kwargs) -> None:

    if 'input_shape' not in kwargs and 'input_dim' in kwargs:
      kwargs['input_shape'] = (kwargs.pop('input_dim'),)

    assert (
        num_legs >=
        2), f'Need at least 2 legs to create Entangler but got {num_legs} legs'
    assert (
        num_levels >= 1
    ), f'Need at least 1 level to create Entangler but got {num_levels} levels'

    super(DenseEntangler, self).__init__(**kwargs)

    self.output_dim = output_dim
    self.num_legs = num_legs
    self.num_levels = num_levels
    self.nodes = []
    self.use_bias = use_bias
    self.activation = activations.get(activation)
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.bias_initializer = initializers.get(bias_initializer) 
Example #16
Source File: mpo.py    From TensorNetwork with Apache License 2.0 5 votes vote down vote up
def __init__(self,
               output_dim: int,
               num_nodes: int,
               bond_dim: int,
               use_bias: Optional[bool] = True,
               activation: Optional[Text] = None,
               kernel_initializer: Optional[Text] = 'glorot_uniform',
               bias_initializer: Optional[Text] = 'zeros',
               **kwargs) -> None:

    # Allow specification of input_dim instead of input_shape,
    # for compatability with Keras layers that support this
    if 'input_shape' not in kwargs and 'input_dim' in kwargs:
      kwargs['input_shape'] = (kwargs.pop('input_dim'),)

    assert num_nodes > 2, 'Need at least 3 nodes to create MPO.'

    super(DenseMPO, self).__init__(**kwargs)

    self.output_dim = output_dim
    self.num_nodes = num_nodes
    self.bond_dim = bond_dim
    self.nodes = []
    self.use_bias = use_bias
    self.activation = activations.get(activation)
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.bias_initializer = initializers.get(bias_initializer) 
Example #17
Source File: set2set.py    From megnet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self,
                 T=3,
                 n_hidden=512,
                 activation=None,
                 activation_lstm='tanh',
                 recurrent_activation='hard_sigmoid',
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 use_bias=True,
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super().__init__(**kwargs)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.activation_lstm = activations.get(activation_lstm)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.unit_forget_bias = unit_forget_bias
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.T = T
        self.n_hidden = n_hidden 
Example #18
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def __init__(self,
               output_dim,
               input_dim,
               init_fn='glorot_uniform',
               inner_init_fn='orthogonal',
               activation_fn='tanh',
               inner_activation_fn='hard_sigmoid',
               **kwargs):
    """
    Parameters
    ----------
    output_dim: int
      Dimensionality of output vectors.
    input_dim: int
      Dimensionality of input vectors.
    init_fn: str
      TensorFlow nitialization to use for W.
    inner_init_fn: str
      TensorFlow initialization to use for U.
    activation_fn: str
      TensorFlow activation to use for output.
    inner_activation_fn: str
      TensorFlow activation to use for inner steps.
    """

    super(LSTMStep, self).__init__(**kwargs)
    self.init = init_fn
    self.inner_init = inner_init_fn
    self.output_dim = output_dim
    # No other forget biases supported right now.
    self.activation = activation_fn
    self.inner_activation = inner_activation_fn
    self.activation_fn = activations.get(activation_fn)
    self.inner_activation_fn = activations.get(inner_activation_fn)
    self.input_dim = input_dim 
Example #19
Source File: keras.py    From spektral with MIT License 5 votes vote down vote up
def deserialize_kwarg(key, attr):
    if key.endswith('_initializer'):
        return initializers.get(attr)
    if key.endswith('_regularizer'):
        return regularizers.get(attr)
    if key.endswith('_constraint'):
        return constraints.get(attr)
    if key == 'activation':
        return activations.get(attr) 
Example #20
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def build(self, input_shape):
    n_hidden = self.n_hidden
    init = initializers.get(self.init)
    self.Wz = init([n_hidden, n_hidden])
    self.Wr = init([n_hidden, n_hidden])
    self.Wh = init([n_hidden, n_hidden])
    self.Uz = init([n_hidden, n_hidden])
    self.Ur = init([n_hidden, n_hidden])
    self.Uh = init([n_hidden, n_hidden])
    self.bz = backend.zeros(shape=(n_hidden,))
    self.br = backend.zeros(shape=(n_hidden,))
    self.bh = backend.zeros(shape=(n_hidden,))
    self.built = True 
Example #21
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def build(self, input_shape):
    n_pair_features = self.n_pair_features
    n_hidden = self.n_hidden
    init = initializers.get(self.init)
    self.W = init([n_pair_features, n_hidden * n_hidden])
    self.b = backend.zeros(shape=(n_hidden * n_hidden,))
    self.built = True 
Example #22
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def __init__(self,
               n_graph_feat=30,
               n_outputs=30,
               max_atoms=50,
               layer_sizes=[100],
               init='glorot_uniform',
               activation='relu',
               dropout=None,
               **kwargs):
    """DAG vector gathering layer

    Parameters
    ----------
    n_graph_feat: int, optional
      Number of features for each atom.
    n_outputs: int, optional
      Number of features for each molecule.
    max_atoms: int, optional
      Maximum number of atoms in molecules.
    layer_sizes: list of int, optional
      List of hidden layer size(s):
      length of this list represents the number of hidden layers,
      and each element is the width of corresponding hidden layer.
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied.
    dropout: float, optional
      Dropout probability in the hidden layer(s).
    """
    super(DAGGather, self).__init__(**kwargs)
    self.init = init  # Set weight initialization
    self.activation = activation  # Get activations
    self.activation_fn = activations.get(activation)
    self.layer_sizes = layer_sizes
    self.dropout = dropout
    self.max_atoms = max_atoms
    self.n_graph_feat = n_graph_feat
    self.n_outputs = n_outputs 
Example #23
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def build(self, input_shape):
    """"Construct internal trainable weights."""
    self.W_list = []
    self.b_list = []
    self.dropouts = []
    init = initializers.get(self.init)
    prev_layer_size = self.n_inputs
    for layer_size in self.layer_sizes:
      self.W_list.append(init([prev_layer_size, layer_size]))
      self.b_list.append(backend.zeros(shape=[
          layer_size,
      ]))
      if self.dropout is not None and self.dropout > 0.0:
        self.dropouts.append(Dropout(rate=self.dropout))
      else:
        self.dropouts.append(None)
      prev_layer_size = layer_size
    self.W_list.append(init([prev_layer_size, self.n_outputs]))
    self.b_list.append(backend.zeros(shape=[
        self.n_outputs,
    ]))
    if self.dropout is not None and self.dropout > 0.0:
      self.dropouts.append(Dropout(rate=self.dropout))
    else:
      self.dropouts.append(None)
    self.built = True 
Example #24
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def __init__(self,
               n_embedding=30,
               n_outputs=100,
               layer_sizes=[100],
               output_activation=True,
               init='glorot_uniform',
               activation='tanh',
               **kwargs):
    """
    Parameters
    ----------
    n_embedding: int, optional
      Number of features for each atom
    n_outputs: int, optional
      Number of features for each molecule(output)
    layer_sizes: list of int, optional(default=[1000])
      Structure of hidden layer(s)
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    """
    super(DTNNGather, self).__init__(**kwargs)
    self.n_embedding = n_embedding
    self.n_outputs = n_outputs
    self.layer_sizes = layer_sizes
    self.output_activation = output_activation
    self.init = init  # Set weight initialization
    self.activation = activation  # Get activations
    self.activation_fn = activations.get(activation) 
Example #25
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def build(self, input_shape):
    init = initializers.get(self.init)
    self.W_cf = init([self.n_embedding, self.n_hidden])
    self.W_df = init([self.n_distance, self.n_hidden])
    self.W_fc = init([self.n_hidden, self.n_embedding])
    self.b_cf = backend.zeros(shape=[
        self.n_hidden,
    ])
    self.b_df = backend.zeros(shape=[
        self.n_hidden,
    ])
    self.built = True 
Example #26
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def __init__(self,
               n_embedding=30,
               n_distance=100,
               n_hidden=60,
               init='glorot_uniform',
               activation='tanh',
               **kwargs):
    """
    Parameters
    ----------
    n_embedding: int, optional
      Number of features for each atom
    n_distance: int, optional
      granularity of distance matrix
    n_hidden: int, optional
      Number of nodes in hidden layer
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    """
    super(DTNNStep, self).__init__(**kwargs)
    self.n_embedding = n_embedding
    self.n_distance = n_distance
    self.n_hidden = n_hidden
    self.init = init  # Set weight initialization
    self.activation = activation  # Get activations
    self.activation_fn = activations.get(activation) 
Example #27
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def build(self, input_shape):
    init = initializers.get(self.init)
    self.embedding_list = init([self.periodic_table_length, self.n_embedding])
    self.built = True 
Example #28
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def __init__(self,
               batch_size,
               n_input=128,
               gaussian_expand=False,
               init='glorot_uniform',
               activation='tanh',
               epsilon=1e-3,
               momentum=0.99,
               **kwargs):
    """
    Parameters
    ----------
    batch_size: int
      number of molecules in a batch
    n_input: int, optional
      number of features for each input molecule
    gaussian_expand: boolean. optional
      Whether to expand each dimension of atomic features by gaussian histogram
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    """
    super(WeaveGather, self).__init__(**kwargs)
    self.n_input = n_input
    self.batch_size = batch_size
    self.gaussian_expand = gaussian_expand
    self.init = init  # Set weight initialization
    self.activation = activation  # Get activations
    self.activation_fn = activations.get(activation)
    self.epsilon = epsilon
    self.momentum = momentum 
Example #29
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def build(self, input_shape):
    """ Construct internal trainable weights."""
    init = initializers.get(self.init)  # Set weight initialization

    self.W_AA = init([self.n_atom_input_feat, self.n_hidden_AA])
    self.b_AA = backend.zeros(shape=[
        self.n_hidden_AA,
    ])

    self.W_PA = init([self.n_pair_input_feat, self.n_hidden_PA])
    self.b_PA = backend.zeros(shape=[
        self.n_hidden_PA,
    ])

    self.W_A = init([self.n_hidden_A, self.n_atom_output_feat])
    self.b_A = backend.zeros(shape=[
        self.n_atom_output_feat,
    ])

    if self.update_pair:
      self.W_AP = init([self.n_atom_input_feat * 2, self.n_hidden_AP])
      self.b_AP = backend.zeros(shape=[
          self.n_hidden_AP,
      ])

      self.W_PP = init([self.n_pair_input_feat, self.n_hidden_PP])
      self.b_PP = backend.zeros(shape=[
          self.n_hidden_PP,
      ])

      self.W_P = init([self.n_hidden_P, self.n_pair_output_feat])
      self.b_P = backend.zeros(shape=[
          self.n_pair_output_feat,
      ])
    self.built = True 
Example #30
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def build(self, input_shape):
    """Constructs learnable weights for this layer."""
    init = initializers.get(self.init)
    inner_init = initializers.get(self.inner_init)
    self.W = init((self.input_dim, 4 * self.output_dim))
    self.U = inner_init((self.output_dim, 4 * self.output_dim))

    self.b = tf.Variable(
        np.hstack((np.zeros(self.output_dim), np.ones(self.output_dim),
                   np.zeros(self.output_dim), np.zeros(self.output_dim))),
        dtype=tf.float32)
    self.built = True