Python keras.activations.get() Examples

The following are 30 code examples of keras.activations.get(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.activations , or try the search function .
Example #1
Source File: FFNN.py    From dts with MIT License 6 votes vote down vote up
def evaluate(self, inputs, fn_inverse=None, fn_plot=None):
        try:
            X, y = inputs
            inputs = X
        except:
            X, conditions, y = inputs
            inputs = [X, conditions]

        y_hat = self.predict(inputs)

        if fn_inverse is not None:
            y_hat = fn_inverse(y_hat)
            y = fn_inverse(y)

        if fn_plot is not None:
            fn_plot([y, y_hat])

        results = []
        for m in self.model.metrics:
            if isinstance(m, str):
                results.append(K.eval(K.mean(get(m)(y, y_hat))))
            else:
                results.append(K.eval(K.mean(m(y, y_hat))))
        return results 
Example #2
Source File: cifar10_cnn_capsule.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def __init__(self,
                 num_capsule,
                 dim_capsule,
                 routings=3,
                 share_weights=True,
                 activation='squash',
                 **kwargs):
        super(Capsule, self).__init__(**kwargs)
        self.num_capsule = num_capsule
        self.dim_capsule = dim_capsule
        self.routings = routings
        self.share_weights = share_weights
        if activation == 'squash':
            self.activation = squash
        else:
            self.activation = activations.get(activation) 
Example #3
Source File: recurrent.py    From keras_bn_library with MIT License 6 votes vote down vote up
def __init__(self, output_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):
		self.output_dim = output_dim
		self.init = initializations.get(init)
		self.inner_init = initializations.get(inner_init)
		self.forget_bias_init = initializations.get(forget_bias_init)
		self.activation = activations.get(activation)
		self.inner_activation = activations.get(inner_activation)
		self.W_regularizer = regularizers.get(W_regularizer)
		self.U_regularizer = regularizers.get(U_regularizer)
		self.b_regularizer = regularizers.get(b_regularizer)
		self.dropout_W = dropout_W
		self.dropout_U = dropout_U
		self.stateful = False

		if self.dropout_W or self.dropout_U:
			self.uses_learning_phase = True
		super(QRNN, self).__init__(**kwargs) 
Example #4
Source File: recurrent.py    From keras_bn_library with MIT License 6 votes vote down vote up
def __init__(self, output_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):

		self.output_dim = output_dim
		self.init = initializations.get(init)
		self.inner_init = initializations.get(inner_init)
		self.forget_bias_init = initializations.get(forget_bias_init)
		self.activation = activations.get(activation)
		self.inner_activation = activations.get(inner_activation)
		self.W_regularizer = regularizers.get(W_regularizer)
		self.U_regularizer = regularizers.get(U_regularizer)
		self.b_regularizer = regularizers.get(b_regularizer)
		self.dropout_W, self.dropout_U = dropout_W, dropout_U

		if self.dropout_W or self.dropout_U:
			self.uses_learning_phase = True
		super(DecoderVaeLSTM, self).__init__(**kwargs) 
Example #5
Source File: fm_keras.py    From KDDCup2019_admin with MIT License 6 votes vote down vote up
def __init__(self, feature_num,
    			feature_size,
                 embedding_size,
                 output_dim=1,
                 activation=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super(FMLayer, self).__init__(**kwargs)

        self.output_dim = output_dim
        self.embedding_size = embedding_size
        self.activation = activations.get(activation)
        self.input_spec = InputSpec(ndim=2)
        self.feature_num = feature_num
        self.feature_size = feature_size 
Example #6
Source File: activations_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_get_fn():
    """Activations has a convenience "get" function. All paths of this
    function are tested here, although the behaviour in some instances
    seems potentially surprising (e.g. situation 3)
    """

    # 1. Default returns linear
    a = activations.get(None)
    assert a == activations.linear

    # 2. Passing in a layer raises a warning
    layer = Dense(32)
    with pytest.warns(UserWarning):
        a = activations.get(layer)

    # 3. Callables return themselves for some reason
    a = activations.get(lambda x: 5)
    assert a(None) == 5

    # 4. Anything else is not a valid argument
    with pytest.raises(ValueError):
        a = activations.get(6) 
Example #7
Source File: cifar10_cnn_capsule.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def __init__(self,
                 num_capsule,
                 dim_capsule,
                 routings=3,
                 share_weights=True,
                 activation='squash',
                 **kwargs):
        super(Capsule, self).__init__(**kwargs)
        self.num_capsule = num_capsule
        self.dim_capsule = dim_capsule
        self.routings = routings
        self.share_weights = share_weights
        if activation == 'squash':
            self.activation = squash
        else:
            self.activation = activations.get(activation) 
Example #8
Source File: activations_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_get_fn():
    """Activations has a convenience "get" function. All paths of this
    function are tested here, although the behaviour in some instances
    seems potentially surprising (e.g. situation 3)
    """

    # 1. Default returns linear
    a = activations.get(None)
    assert a == activations.linear

    # 2. Passing in a layer raises a warning
    layer = Dense(32)
    with pytest.warns(UserWarning):
        a = activations.get(layer)

    # 3. Callables return themselves for some reason
    a = activations.get(lambda x: 5)
    assert a(None) == 5

    # 4. Anything else is not a valid argument
    with pytest.raises(ValueError):
        a = activations.get(6) 
Example #9
Source File: cifar10_cnn_capsule.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def __init__(self,
                 num_capsule,
                 dim_capsule,
                 routings=3,
                 share_weights=True,
                 activation='squash',
                 **kwargs):
        super(Capsule, self).__init__(**kwargs)
        self.num_capsule = num_capsule
        self.dim_capsule = dim_capsule
        self.routings = routings
        self.share_weights = share_weights
        if activation == 'squash':
            self.activation = squash
        else:
            self.activation = activations.get(activation) 
Example #10
Source File: activations_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_get_fn():
    """Activations has a convenience "get" function. All paths of this
    function are tested here, although the behaviour in some instances
    seems potentially surprising (e.g. situation 3)
    """

    # 1. Default returns linear
    a = activations.get(None)
    assert a == activations.linear

    # 2. Passing in a layer raises a warning
    layer = Dense(32)
    with pytest.warns(UserWarning):
        a = activations.get(layer)

    # 3. Callables return themselves for some reason
    a = activations.get(lambda x: 5)
    assert a(None) == 5

    # 4. Anything else is not a valid argument
    with pytest.raises(ValueError):
        a = activations.get(6) 
Example #11
Source File: activations_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_get_fn():
    """Activations has a convenience "get" function. All paths of this
    function are tested here, although the behaviour in some instances
    seems potentially surprising (e.g. situation 3)
    """

    # 1. Default returns linear
    a = activations.get(None)
    assert a == activations.linear

    # 2. Passing in a layer raises a warning
    layer = Dense(32)
    with pytest.warns(UserWarning):
        a = activations.get(layer)

    # 3. Callables return themselves for some reason
    a = activations.get(lambda x: 5)
    assert a(None) == 5

    # 4. Anything else is not a valid argument
    with pytest.raises(ValueError):
        a = activations.get(6) 
Example #12
Source File: cifar10_cnn_capsule.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def __init__(self,
                 num_capsule,
                 dim_capsule,
                 routings=3,
                 share_weights=True,
                 activation='squash',
                 **kwargs):
        super(Capsule, self).__init__(**kwargs)
        self.num_capsule = num_capsule
        self.dim_capsule = dim_capsule
        self.routings = routings
        self.share_weights = share_weights
        if activation == 'squash':
            self.activation = squash
        else:
            self.activation = activations.get(activation) 
Example #13
Source File: lstm2ntm.py    From NTM-Keras with MIT License 6 votes vote down vote up
def __init__(self, output_dim, memory_dim=128, memory_size=20,
                 controller_output_dim=100, location_shift_range=1,
                 num_read_head=1, num_write_head=1,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid',
                 W_regularizer=None, U_regularizer=None, R_regularizer=None,
                 b_regularizer=None, W_y_regularizer=None,
                 W_xi_regularizer=None, W_r_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W, self.dropout_U = dropout_W, dropout_U

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        super(NTM, self).__init__(**kwargs) 
Example #14
Source File: layers.py    From research with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, output_dim, output_length, control_dim=2,
               init='glorot_uniform', inner_init='orthogonal',
               activation='tanh',
               W_regularizer=None, U_regularizer=None, b_regularizer=None,
               dropout_W=0., dropout_U=0., **kwargs):
      self.output_dim = output_dim
      self.output_length = output_length
      self.init = initializations.get(init)
      self.inner_init = initializations.get(inner_init)
      self.activation = activations.get(activation)
      self.W_regularizer = regularizers.get(W_regularizer)
      self.U_regularizer = regularizers.get(U_regularizer)
      self.b_regularizer = regularizers.get(b_regularizer)
      self.dropout_W, self.dropout_U = dropout_W, dropout_U
      self.control_dim = control_dim

      if self.dropout_W or self.dropout_U:
          self.uses_learning_phase = True
      super(CondDreamyRNN, self).__init__(**kwargs) 
Example #15
Source File: cifar10_cnn_capsule.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def __init__(self,
                 num_capsule,
                 dim_capsule,
                 routings=3,
                 share_weights=True,
                 activation='squash',
                 **kwargs):
        super(Capsule, self).__init__(**kwargs)
        self.num_capsule = num_capsule
        self.dim_capsule = dim_capsule
        self.routings = routings
        self.share_weights = share_weights
        if activation == 'squash':
            self.activation = squash
        else:
            self.activation = activations.get(activation) 
Example #16
Source File: rhn.py    From deep-models with Apache License 2.0 6 votes vote down vote up
def __init__(self, output_dim, L,
             init='glorot_uniform', inner_init='orthogonal',
             activation='tanh', inner_activation='hard_sigmoid',
             W_regularizer=None, U_regularizer=None, b_regularizer=None,
             dropout_W=0., dropout_U=0., **kwargs):
    self.output_dim = output_dim
    self.init = initializations.get(init)
    self.inner_init = initializations.get(inner_init)
    self.activation = activations.get(activation)
    self.inner_activation = activations.get(inner_activation)
    self.W_regularizer = regularizers.get(W_regularizer)
    self.U_regularizer = regularizers.get(U_regularizer)
    self.b_regularizer = regularizers.get(b_regularizer)
    self.dropout_W, self.dropout_U = dropout_W, dropout_U
    self.L = L

    if self.dropout_W or self.dropout_U:
        self.uses_learning_phase = True
    super(RHN, self).__init__(**kwargs) 
Example #17
Source File: core.py    From keras-contrib with MIT License 6 votes vote down vote up
def __init__(self, units, kernel_initializer='glorot_uniform',
                 activation=None, weights=None,
                 kernel_regularizer=None, bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None, bias_constraint=None,
                 use_bias=True, **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.activation = activations.get(activation)
        self.units = units

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.use_bias = use_bias
        self.initial_weights = weights
        super(CosineDense, self).__init__(**kwargs) 
Example #18
Source File: capsule.py    From keras-contrib with MIT License 6 votes vote down vote up
def __init__(self,
                 num_capsule,
                 dim_capsule,
                 routings=3,
                 share_weights=True,
                 initializer='glorot_uniform',
                 activation=None,
                 regularizer=None,
                 constraint=None,
                 **kwargs):
        super(Capsule, self).__init__(**kwargs)
        self.num_capsule = num_capsule
        self.dim_capsule = dim_capsule
        self.routings = routings
        self.share_weights = share_weights

        self.activation = activations.get(activation)
        self.regularizer = regularizers.get(regularizer)
        self.initializer = initializers.get(initializer)
        self.constraint = constraints.get(constraint) 
Example #19
Source File: layers.py    From bert4keras with Apache License 2.0 6 votes vote down vote up
def __init__(
        self,
        center=True,
        scale=True,
        epsilon=None,
        conditional=False,
        hidden_units=None,
        hidden_activation='linear',
        hidden_initializer='glorot_uniform',
        **kwargs
    ):
        super(LayerNormalization, self).__init__(**kwargs)
        self.center = center
        self.scale = scale
        self.conditional = conditional
        self.hidden_units = hidden_units
        self.hidden_activation = activations.get(hidden_activation)
        self.hidden_initializer = initializers.get(hidden_initializer)
        self.epsilon = epsilon or 1e-12 
Example #20
Source File: layers.py    From bert4keras with Apache License 2.0 6 votes vote down vote up
def __init__(
        self,
        heads,
        head_size,
        key_size=None,
        use_bias=True,
        attention_scale=True,
        kernel_initializer='glorot_uniform',
        **kwargs
    ):
        super(MultiHeadAttention, self).__init__(**kwargs)
        self.heads = heads
        self.head_size = head_size
        self.out_dim = heads * head_size
        self.key_size = key_size or head_size
        self.use_bias = use_bias
        self.attention_scale = attention_scale
        self.kernel_initializer = initializers.get(kernel_initializer) 
Example #21
Source File: spatial_gru.py    From MatchZoo with Apache License 2.0 6 votes vote down vote up
def __init__(
        self,
        units: int = 10,
        activation: str = 'tanh',
        recurrent_activation: str = 'sigmoid',
        kernel_initializer: str = 'glorot_uniform',
        recurrent_initializer: str = 'orthogonal',
        direction: str = 'lt',
        **kwargs
    ):
        """:class:`SpatialGRU` constructor."""
        super().__init__(**kwargs)
        self._units = units
        self._activation = activations.get(activation)
        self._recurrent_activation = activations.get(recurrent_activation)

        self._kernel_initializer = initializers.get(kernel_initializer)
        self._recurrent_initializer = initializers.get(recurrent_initializer)
        self._direction = direction 
Example #22
Source File: attentive_convlstm.py    From sam with MIT License 6 votes vote down vote up
def __init__(self, nb_filters_in, nb_filters_out, nb_filters_att, nb_rows, nb_cols,
                 init='normal', inner_init='orthogonal', attentive_init='zero',
                 activation='tanh', inner_activation='sigmoid',
                 W_regularizer=None, U_regularizer=None,
                 weights=None, go_backwards=False,
                 **kwargs):
        self.nb_filters_in = nb_filters_in
        self.nb_filters_out = nb_filters_out
        self.nb_filters_att = nb_filters_att
        self.nb_rows = nb_rows
        self.nb_cols = nb_cols
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.attentive_init = initializations.get(attentive_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.initial_weights = weights
        self.go_backwards = go_backwards

        self.W_regularizer = W_regularizer
        self.U_regularizer = U_regularizer
        self.input_spec = [InputSpec(ndim=5)]

        super(AttentiveConvLSTM, self).__init__(**kwargs) 
Example #23
Source File: GraphEmbedding.py    From conv_qsar_fast with MIT License 6 votes vote down vote up
def __init__(self, output_dim, inner_dim, depth = 2, init_output='uniform', 
			activation_output='softmax', init_inner='identity',
			activation_inner='linear', scale_output=0.01, padding=False, **kwargs):
		if depth < 1:
			quit('Cannot use GraphFP with depth zero')
		self.init_output = initializations.get(init_output)
		self.activation_output = activations.get(activation_output)
		self.init_inner = initializations.get(init_inner)
		self.activation_inner = activations.get(activation_inner)
		self.output_dim = output_dim
		self.inner_dim = inner_dim
		self.depth = depth
		self.scale_output = scale_output
		self.padding = padding

		self.initial_weights = None
		self.input_dim = 4 # each entry is a 3D N_atom x N_atom x N_feature tensor
		if self.input_dim:
			kwargs['input_shape'] = (None, None, None,) # 3D tensor for each input
		#self.input = K.placeholder(ndim = 4)
		super(GraphFP, self).__init__(**kwargs) 
Example #24
Source File: GraphEmbedding_sumAfter.py    From conv_qsar_fast with MIT License 6 votes vote down vote up
def __init__(self, output_dim, inner_dim, depth = 2, init_output='uniform', 
			activation_output='softmax', init_inner='identity',
			activation_inner='linear', scale_output=0.01, padding=False, **kwargs):
		if depth < 1:
			quit('Cannot use GraphFP with depth zero')
		self.init_output = initializations.get(init_output)
		self.activation_output = activations.get(activation_output)
		self.init_inner = initializations.get(init_inner)
		self.activation_inner = activations.get(activation_inner)
		self.output_dim = output_dim
		self.inner_dim = inner_dim
		self.depth = depth
		self.scale_output = scale_output
		self.padding = padding

		self.initial_weights = None
		self.input_dim = 4 # each entry is a 3D N_atom x N_atom x N_feature tensor
		if self.input_dim:
			kwargs['input_shape'] = (None, None, None,) # 3D tensor for each input
		#self.input = K.placeholder(ndim = 4)
		super(GraphFP, self).__init__(**kwargs) 
Example #25
Source File: layers.py    From research with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, output_dim, output_length,
               init='glorot_uniform', inner_init='orthogonal',
               activation='tanh',
               W_regularizer=None, U_regularizer=None, b_regularizer=None,
               dropout_W=0., dropout_U=0., **kwargs):
      self.output_dim = output_dim
      self.output_length = output_length
      self.init = initializations.get(init)
      self.inner_init = initializations.get(inner_init)
      self.activation = activations.get(activation)
      self.W_regularizer = regularizers.get(W_regularizer)
      self.U_regularizer = regularizers.get(U_regularizer)
      self.b_regularizer = regularizers.get(b_regularizer)
      self.dropout_W, self.dropout_U = dropout_W, dropout_U

      if self.dropout_W or self.dropout_U:
          self.uses_learning_phase = True
      super(DreamyRNN, self).__init__(**kwargs) 
Example #26
Source File: SparseFullyConnectedLayer.py    From NeuralResponseRanking with MIT License 6 votes vote down vote up
def __init__(self, output_dim, init='glorot_uniform', activation='relu',weights=None,
            W_regularizer=None, b_regularizer=None, activity_regularizer=None,
            W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
        self.W_initializer = initializers.get(init)
        self.b_initializer = initializers.get('zeros')
        self.activation = activations.get(activation)
        self.output_dim = output_dim
        self.input_dim = input_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.initial_weights = weights
        self.input_spec = InputSpec(ndim=2)

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(SparseFullyConnectedLayer, self).__init__(**kwargs) 
Example #27
Source File: tied_embeddings.py    From embedding-as-service with MIT License 5 votes vote down vote up
def __init__(self, tied_to=None,
                 activation=None,
                 **kwargs):
        super(TiedEmbeddingsTransposed, self).__init__(**kwargs)
        self.tied_to = tied_to
        self.activation = activations.get(activation) 
Example #28
Source File: activations_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_serialization():
    all_activations = ['softmax', 'relu', 'elu', 'tanh',
                       'sigmoid', 'hard_sigmoid', 'linear',
                       'softplus', 'softsign', 'selu']
    for name in all_activations:
        fn = activations.get(name)
        ref_fn = getattr(activations, name)
        assert fn == ref_fn
        config = activations.serialize(fn)
        fn = activations.deserialize(config)
        assert fn == ref_fn 
Example #29
Source File: generic_utils_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_custom_objects_scope():

    def custom_fn():
        pass

    class CustomClass(object):
        pass

    with custom_object_scope({'CustomClass': CustomClass,
                              'custom_fn': custom_fn}):
        act = activations.get('custom_fn')
        assert act == custom_fn
        cl = regularizers.get('CustomClass')
        assert cl.__class__ == CustomClass 
Example #30
Source File: extras.py    From keras-transformer with MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        main_input, embedding_matrix = inputs
        input_shape_tensor = K.shape(main_input)
        last_input_dim = K.int_shape(main_input)[-1]
        emb_input_dim, emb_output_dim = K.int_shape(embedding_matrix)
        projected = K.dot(K.reshape(main_input, (-1, last_input_dim)),
                          self.projection)
        if self.add_biases:
            projected = K.bias_add(projected, self.biases,
                                   data_format='channels_last')
        if 0 < self.projection_dropout < 1:
            projected = K.in_train_phase(
                lambda: K.dropout(projected, self.projection_dropout),
                projected,
                training=kwargs.get('training'))
        attention = K.dot(projected, K.transpose(embedding_matrix))
        if self.scaled_attention:
            # scaled dot-product attention, described in
            # "Attention is all you need" (https://arxiv.org/abs/1706.03762)
            sqrt_d = K.constant(math.sqrt(emb_output_dim), dtype=K.floatx())
            attention = attention / sqrt_d
        result = K.reshape(
            self.activation(attention),
            (input_shape_tensor[0],
             input_shape_tensor[1],
             emb_input_dim))
        return result