Python keras.regularizers.get() Examples

The following are code examples for showing how to use keras.regularizers.get(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: deep-models   Author: LaurentMazare   File: rhn.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, output_dim, L,
             init='glorot_uniform', inner_init='orthogonal',
             activation='tanh', inner_activation='hard_sigmoid',
             W_regularizer=None, U_regularizer=None, b_regularizer=None,
             dropout_W=0., dropout_U=0., **kwargs):
    self.output_dim = output_dim
    self.init = initializations.get(init)
    self.inner_init = initializations.get(inner_init)
    self.activation = activations.get(activation)
    self.inner_activation = activations.get(inner_activation)
    self.W_regularizer = regularizers.get(W_regularizer)
    self.U_regularizer = regularizers.get(U_regularizer)
    self.b_regularizer = regularizers.get(b_regularizer)
    self.dropout_W, self.dropout_U = dropout_W, dropout_U
    self.L = L

    if self.dropout_W or self.dropout_U:
        self.uses_learning_phase = True
    super(RHN, self).__init__(**kwargs) 
Example 2
Project: phoneticSimilarity   Author: ronggong   File: attentionWithContext.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def __init__(self,
                 W_regularizer=None, u_regularizer=None, b_regularizer=None,
                 W_constraint=None, u_constraint=None, b_constraint=None,
                 bias=True, return_attention=False, **kwargs):

        self.supports_masking = True
        self.return_attention = return_attention
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.u_regularizer = regularizers.get(u_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.u_constraint = constraints.get(u_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        super(AttentionWithContext, self).__init__(**kwargs) 
Example 3
Project: cdc   Author: ckbjimmy   File: Attention.py    MIT License 6 votes vote down vote up
def __init__(self,
                 W_regularizer=None, u_regularizer=None, b_regularizer=None,
                 W_constraint=None, u_constraint=None, b_constraint=None,
                 bias=True, **kwargs):

        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.u_regularizer = regularizers.get(u_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.u_constraint = constraints.get(u_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        super(AttentionWithContext, self).__init__(**kwargs) 
Example 4
Project: 3DGCN   Author: blackmints   File: layer.py    MIT License 6 votes vote down vote up
def __init__(self,
                 filters,
                 pooling='sum',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 bias_initializer='zeros',
                 activation=None,
                 **kwargs):
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.filters = filters
        self.pooling = pooling

        super(GraphConvS, self).__init__(**kwargs) 
Example 5
Project: cbc_networks   Author: saralajew   File: reasoning_layers.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self,
                 n_classes,
                 n_replicas=1,
                 reasoning_initializer='zeros',
                 reasoning_regularizer=None,
                 use_component_probabilities=False,
                 component_probabilities_initializer='zeros',
                 component_probabilities_regularizer=None,
                 component_probabilities_constraint=None,
                 **kwargs):
        super(Reasoning, self).__init__(**kwargs)
        self.n_classes = n_classes
        self.n_replicas = n_replicas

        self.reasoning_initializer = initializers.get(reasoning_initializer)
        self.reasoning_regularizer = regularizers.get(reasoning_regularizer)

        self.use_component_probabilities = use_component_probabilities
        self.component_probabilities_initializer = initializers.get(
            component_probabilities_initializer)
        self.component_probabilities_regularizer = regularizers.get(
            component_probabilities_regularizer)
        self.component_probabilities_constraint = constraints.get(
            component_probabilities_constraint) 
Example 6
Project: ICASSP2019_TCN   Author: DSIP-UPatras   File: custom_layers.py    MIT License 6 votes vote down vote up
def __init__(self,
                 W_regularizer=None, u_regularizer=None, b_regularizer=None,
                 W_constraint=None, u_constraint=None, b_constraint=None,
                 bias=True,
                 return_attention=False, **kwargs):

        self.supports_masking = True
        self.return_attention = return_attention
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.u_regularizer = regularizers.get(u_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.u_constraint = constraints.get(u_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        super(AttentionWithContext, self).__init__(**kwargs) 
Example 7
Project: Document-Classifier-LSTM   Author: AlexGidiotis   File: attention.py    MIT License 6 votes vote down vote up
def __init__(self,
                 W_regularizer=None, u_regularizer=None, b_regularizer=None,
                 W_constraint=None, u_constraint=None, b_constraint=None,
                 bias=True, **kwargs):


        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.u_regularizer = regularizers.get(u_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.u_constraint = constraints.get(u_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        super(AttentionWithContext, self).__init__(**kwargs) 
Example 8
Project: dialectal_arabic_segmenter   Author: qcri   File: ChainCRF.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def __init__(self, init='glorot_uniform',
                 U_regularizer=None, b_start_regularizer=None, b_end_regularizer=None,
                 U_constraint=None, b_start_constraint=None, b_end_constraint=None,
                 weights=None,
                 **kwargs):
        self.supports_masking = True
        self.uses_learning_phase = True
        self.input_spec = [InputSpec(ndim=3)]
        self.init = initializers.get(init)

        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_start_regularizer = regularizers.get(b_start_regularizer)
        self.b_end_regularizer = regularizers.get(b_end_regularizer)
        self.U_constraint = constraints.get(U_constraint)
        self.b_start_constraint = constraints.get(b_start_constraint)
        self.b_end_constraint = constraints.get(b_end_constraint)

        self.initial_weights = weights

        super(ChainCRF, self).__init__(**kwargs) 
Example 9
Project: musical_genres_classification   Author: shaoeric   File: Attention.py    MIT License 6 votes vote down vote up
def __init__(self, step_dim,
                 W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.step_dim = step_dim
        self.features_dim = 0
        super(Attention, self).__init__(**kwargs) 
Example 10
Project: musical_genres_classification   Author: shaoeric   File: Attention.py    MIT License 6 votes vote down vote up
def __init__(self, step_dim,
                 W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.step_dim = step_dim
        self.features_dim = 0
        super(Attention, self).__init__(**kwargs) 
Example 11
Project: spektral   Author: danielegrattarola   File: base.py    MIT License 6 votes vote down vote up
def __init__(self,
                 trainable_kernel=False,
                 activation=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super(InnerProduct, self).__init__(**kwargs)
        self.trainable_kernel = trainable_kernel
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint) 
Example 12
Project: spektral   Author: danielegrattarola   File: pooling.py    MIT License 6 votes vote down vote up
def __init__(self, ratio,
                 return_mask=False,
                 sigmoid_gating=False,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super().__init__(**kwargs)
        self.ratio = ratio  # Ratio of nodes to keep in each graph
        self.return_mask = return_mask
        self.sigmoid_gating = sigmoid_gating
        self.gating_op = K.sigmoid if self.sigmoid_gating else K.tanh
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint) 
Example 13
Project: spektral   Author: danielegrattarola   File: pooling.py    MIT License 6 votes vote down vote up
def __init__(self, ratio,
                 return_mask=False,
                 sigmoid_gating=False,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        super().__init__(**kwargs)
        self.ratio = ratio  # Ratio of nodes to keep in each graph
        self.return_mask = return_mask
        self.sigmoid_gating = sigmoid_gating
        self.gating_op = K.sigmoid if self.sigmoid_gating else K.tanh
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint) 
Example 14
Project: spektral   Author: danielegrattarola   File: pooling.py    MIT License 6 votes vote down vote up
def __init__(self, channels=32,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super().__init__(**kwargs)
        self.channels = channels
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint) 
Example 15
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 6 votes vote down vote up
def __init__(self,
                 channels,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super().__init__(channels, **kwargs)
        self.channels = channels
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = False 
Example 16
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 6 votes vote down vote up
def __init__(self,
                 channels,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super().__init__(channels, **kwargs)
        self.channels = channels
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = False 
Example 17
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 6 votes vote down vote up
def __init__(self,
                 cell,
                 n_layers,
                 return_sequences=False,
                 output_sequence_axis=-1,
                 return_state=False,
                 activation='sigmoid',
                 **kwargs):
        super(GraphRRNN, self).__init__(**kwargs)
        self.cell = cell
        self.n_layers = n_layers
        self.return_sequences = return_sequences
        self.output_sequence_axis = output_sequence_axis
        self.return_state = return_state
        self.activation = activations.get(activation)
        self.grnn_layer = \
            GraphRNN(cell=cell,
                     return_state=True,
                     activation=activation,
                     **kwargs) 
Example 18
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 6 votes vote down vote up
def __init__(self,
                 units,
                 use_node_weight=True,
                 activation='sigmoid',
                 use_bias=False,
                 bias_initializer='zeros',
                 bias_regularizer=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        super(MultiGraphConv, self).__init__(
            bias_initializer, bias_regularizer, bias_constraint,
            kernel_initializer, kernel_regularizer, kernel_constraint,
            **kwargs)
        self.units = units
        self.use_node_weight = use_node_weight
        self.activation = activations.get(activation)
        self.use_bias = use_bias 
Example 19
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 6 votes vote down vote up
def __init__(self,
                 cell,
                 return_state=False,
                 activation='sigmoid',
                 bias_initializer='zeros',
                 bias_regularizer=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        super(GraphRNN, self).__init__(
            bias_initializer, bias_regularizer, bias_constraint,
            kernel_initializer, kernel_regularizer, kernel_constraint,
            **kwargs)
        self.cell = cell
        self.return_state = return_state
        self.activation = activations.get(activation) 
Example 20
Project: keras_extension   Author: k1414st   File: mac.py    MIT License 6 votes vote down vote up
def __init__(self,
                 attention_activation='softmax',
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(ControlUnit, self).__init__(**kwargs)
        self.attention_activation = activations.get(attention_activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint) 
Example 21
Project: keras_extension   Author: k1414st   File: mac.py    MIT License 6 votes vote down vote up
def __init__(self,
                 attention_activation='softmax',
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(ReadUnit, self).__init__(**kwargs)
        self.attention_activation = activations.get(attention_activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint) 
Example 22
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 6 votes vote down vote up
def __init__(self,
                 cell,
                 n_layers,
                 return_sequences=False,
                 output_sequence_axis=-1,
                 return_state=False,
                 activation='sigmoid',
                 **kwargs):
        super(GraphRRNN, self).__init__(**kwargs)
        self.cell = cell
        self.n_layers = n_layers
        self.return_sequences = return_sequences
        self.output_sequence_axis = output_sequence_axis
        self.return_state = return_state
        self.activation = activations.get(activation)
        self.grnn_layer = \
            GraphRNN(cell=cell,
                     return_state=True,
                     activation=activation,
                     **kwargs) 
Example 23
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 6 votes vote down vote up
def __init__(self,
                 units,
                 use_node_weight=True,
                 activation='sigmoid',
                 use_bias=False,
                 bias_initializer='zeros',
                 bias_regularizer=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        super(MultiGraphConv, self).__init__(
            bias_initializer, bias_regularizer, bias_constraint,
            kernel_initializer, kernel_regularizer, kernel_constraint,
            **kwargs)
        self.units = units
        self.use_node_weight = use_node_weight
        self.activation = activations.get(activation)
        self.use_bias = use_bias 
Example 24
Project: keras_extension   Author: k1414st   File: mac.py    MIT License 6 votes vote down vote up
def __init__(self,
                 attention_activation='softmax',
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(ControlUnit, self).__init__(**kwargs)
        self.attention_activation = activations.get(attention_activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint) 
Example 25
Project: keras-mobilenet   Author: rcmalli   File: depthwise_conv2d.py    MIT License 5 votes vote down vote up
def __init__(self, filters,
                 kernel_size,
                 strides=(1, 1),
                 padding='valid',
                 data_format=None,
                 depth_multiplier=1,
                 activation=None,
                 use_bias=True,
                 depthwise_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 depthwise_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 depthwise_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(DepthwiseConv2D, self).__init__(
            filters=filters,
            kernel_size=kernel_size,
            strides=strides,
            padding=padding,
            data_format=data_format,
            activation=activation,
            use_bias=use_bias,
            bias_regularizer=bias_regularizer,
            activity_regularizer=activity_regularizer,
            bias_constraint=bias_constraint,
            **kwargs)

        self.depth_multiplier = depth_multiplier
        self.depthwise_initializer = initializers.get(depthwise_initializer)
        self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
        self.depthwise_constraint = constraints.get(depthwise_constraint) 
Example 26
Project: cdc   Author: ckbjimmy   File: Attention.py    MIT License 5 votes vote down vote up
def __init__(self,
                 W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        super(Attention, self).__init__(**kwargs) 
Example 27
Project: AI_Competition   Author: Decalogue   File: attention.py    MIT License 5 votes vote down vote up
def __init__(self, step_dim,
                 W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        """
        Keras Layer that implements an Attention mechanism for temporal data.
        Supports Masking.
        Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
        # Input shape
            3D tensor with shape: `(samples, steps, features)`.
        # Output shape
            2D tensor with shape: `(samples, features)`.
        :param kwargs:
        Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
        The dimensions are inferred based on the output shape of the RNN.
        Example:
            model.add(LSTM(64, return_sequences=True))
            model.add(Attention())
        """
        self.supports_masking = True
        # self.init = initializations.get('glorot_uniform')
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.step_dim = step_dim
        self.features_dim = 0
        super(Attention, self).__init__(**kwargs) 
Example 28
Project: AI_Competition   Author: Decalogue   File: attention.py    MIT License 5 votes vote down vote up
def __init__(self, step_dim, hid_size,
                 W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        """
        Keras Layer that implements an Attention mechanism according to other vector.
        Supports Masking.
        # Input shape, list of
            2D tensor with shape: `(samples, features_1)`.
            3D tensor with shape: `(samples, steps, features_2)`.
        # Output shape
            2D tensor with shape: `(samples, features)`.
        :param kwargs:
        Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
        The dimensions are inferred based on the output shape of the RNN.
        Example:
            en = LSTM(64, return_sequences=False)(input)
            de = LSTM(64, return_sequences=True)(input2)
            output = JoinAttention(64, 20)([en, de])
        """
        self.supports_masking = True
        # self.init = initializations.get('glorot_uniform')
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.step_dim = step_dim
        self.hid_size = hid_size
        super(JoinAttention, self).__init__(**kwargs) 
Example 29
Project: kaggle-carvana-2017   Author: killthekitten   File: mobile_net_fixed.py    MIT License 5 votes vote down vote up
def __init__(self,
                 kernel_size,
                 strides=(1, 1),
                 padding='valid',
                 depth_multiplier=1,
                 data_format=None,
                 activation=None,
                 use_bias=True,
                 depthwise_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 depthwise_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 depthwise_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(DepthwiseConv2D, self).__init__(
            filters=None,
            kernel_size=kernel_size,
            strides=strides,
            padding=padding,
            data_format=data_format,
            activation=activation,
            use_bias=use_bias,
            bias_regularizer=bias_regularizer,
            activity_regularizer=activity_regularizer,
            bias_constraint=bias_constraint,
            **kwargs)
        self.depth_multiplier = depth_multiplier
        self.depthwise_initializer = initializers.get(depthwise_initializer)
        self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
        self.depthwise_constraint = constraints.get(depthwise_constraint)
        self.bias_initializer = initializers.get(bias_initializer) 
Example 30
Project: FasterRCNN_KERAS   Author: akshaylamba   File: FixedBatchNormalization.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, epsilon=1e-3, axis=-1,
                 weights=None, beta_init='zero', gamma_init='one',
                 gamma_regularizer=None, beta_regularizer=None, **kwargs):

        self.supports_masking = True
        self.beta_init = initializers.get(beta_init)
        self.gamma_init = initializers.get(gamma_init)
        self.epsilon = epsilon
        self.axis = axis
        self.gamma_regularizer = regularizers.get(gamma_regularizer)
        self.beta_regularizer = regularizers.get(beta_regularizer)
        self.initial_weights = weights
        super(FixedBatchNormalization, self).__init__(**kwargs) 
Example 31
Project: 3DGCN   Author: blackmints   File: layer.py    MIT License 5 votes vote down vote up
def __init__(self,
                 filters,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 bias_initializer='zeros',
                 activation=None,
                 **kwargs):
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.filters = filters

        super(GraphSToS, self).__init__(**kwargs) 
Example 32
Project: 3DGCN   Author: blackmints   File: layer.py    MIT License 5 votes vote down vote up
def __init__(self,
                 filters,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 bias_initializer='zeros',
                 activation=None,
                 **kwargs):
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.filters = filters

        super(GraphSToV, self).__init__(**kwargs) 
Example 33
Project: 3DGCN   Author: blackmints   File: layer.py    MIT License 5 votes vote down vote up
def __init__(self,
                 filters,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 bias_initializer='zeros',
                 activation=None,
                 **kwargs):
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.filters = filters

        super(GraphVToV, self).__init__(**kwargs) 
Example 34
Project: 3DGCN   Author: blackmints   File: layer.py    MIT License 5 votes vote down vote up
def __init__(self,
                 filters,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 bias_initializer='zeros',
                 activation=None,
                 **kwargs):
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.filters = filters

        super(GraphVToS, self).__init__(**kwargs) 
Example 35
Project: 3DGCN   Author: blackmints   File: layer.py    MIT License 5 votes vote down vote up
def __init__(self,
                 pooling="sum",
                 system="cartesian",
                 activation=None,
                 **kwargs):
        self.activation = activations.get(activation)
        self.pooling = pooling
        self.system = system

        super(GraphGather, self).__init__(**kwargs) 
Example 36
Project: fancy-cnn   Author: textclf   File: convolutions.py    MIT License 5 votes vote down vote up
def __init__(self, nb_filter, nb_row, nb_col,
        init='glorot_uniform', activation='linear', weights=None,
        border_mode='valid', subsample=(1, 1),
        W_regularizer=None, b_regularizer=None, activity_regularizer=None, 
        W_constraint=None, b_constraint=None, **kwargs):
    
        if border_mode not in {'valid', 'full', 'same'}:
            raise Exception('Invalid border mode for TimeDistributedConvolution2D:', border_mode)

        self.nb_filter = nb_filter
        self.nb_row = nb_row
        self.nb_col = nb_col
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.border_mode = border_mode
        self.subsample = tuple(subsample)

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.constraints = [self.W_constraint, self.b_constraint]

        self.initial_weights = weights
        super(TimeDistributedConvolution2D,self).__init__(**kwargs) 
Example 37
Project: dense_tensor   Author: bstriner   File: dense_tensor.py    MIT License 5 votes vote down vote up
def __init__(self, units,
                 activation='linear',
                 weights=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 bias_initializer='uniform',
                 bias_regularizer=None,
                 bias_constraint=None,
                 activity_regularizer=None,
                 bias=True,
                 input_dim=None,
                 factorization=simple_tensor_factorization(),
                 **kwargs):
        self.activation = activations.get(activation)
        self.units = units
        self.input_dim = input_dim
        self.factorization = factorization

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_initializer = get_initializer(kernel_initializer)
        self.bias_initializer = get_initializer(bias_initializer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.bias = bias
        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim=2)]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(DenseTensor, self).__init__(**kwargs) 
Example 38
Project: cbc_networks   Author: saralajew   File: component_input.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self,
                 shape,
                 initializer='zeros',
                 regularizer=None,
                 constraint=None,
                 **kwargs):
        super(AddComponents, self).__init__(**kwargs)
        self.shape = tuple(shape)
        self.initializer = initializers.get(initializer)
        self.regularizer = regularizers.get(regularizer)
        self.constraint = constraints.get(constraint) 
Example 39
Project: 360_aware_saliency   Author: MikhailStartsev   File: gaussian_prior.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, nb_gaussian, init='normal', weights=None,
                 W_regularizer=None, activity_regularizer=None,
                 W_constraint=None, **kwargs):
        self.nb_gaussian = nb_gaussian
        self.init = initializations.get(init, dim_ordering='th')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)

        self.input_spec = [InputSpec(ndim=4)]
        self.initial_weights = weights
        super(LearningPrior, self).__init__(**kwargs) 
Example 40
Project: lmtc-eurlex57k   Author: iliaschalkidis   File: attention.py    Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 kernel_regularizer=None, bias_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True,
                 return_attention=False,
                 **kwargs):
        """
        Keras Layer that implements an Attention mechanism for temporal data.
        Supports Masking.
        Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
        # Input shape
            3D tensor with shape: `(samples, steps, features)`.
        # Output shape
            2D tensor with shape: `(samples, features)`.
        :param kwargs:

        Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
        The dimensions are inferred based on the output shape of the RNN.


        Note: The layer has been tested with Keras 1.x

        Example:
            model.add(LSTM(64, return_sequences=True))
            model.add(Attention())
            # next add a Dense layer (for classification/regression) or whatever...

        """
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(kernel_regularizer)
        self.b_regularizer = regularizers.get(bias_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.return_attention = return_attention
        super(Attention, self).__init__(**kwargs) 
Example 41
Project: lmtc-eurlex57k   Author: iliaschalkidis   File: attention.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, kernel_regularizer=None, bias_regularizer=None, return_attention=False,
                 **kwargs):

        self.W_regularizer = regularizers.get(kernel_regularizer)
        self.b_regularizer = regularizers.get(bias_regularizer)
        self.init = initializers.get('he_normal')
        self.supports_masking = True
        self.return_attention = return_attention
        super(LabelDrivenAttention, self).__init__(**kwargs) 
Example 42
Project: lmtc-eurlex57k   Author: iliaschalkidis   File: attention.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, kernel_regularizer=None, bias_regularizer=None,
                 return_attention=False, n_classes=4271, **kwargs):

        self.W_regularizer = regularizers.get(kernel_regularizer)
        self.b_regularizer = regularizers.get(bias_regularizer)
        self.init = initializers.get('he_normal')
        self.supports_masking = True
        self.return_attention = return_attention
        self.n_classes = n_classes
        super(LabelWiseAttention, self).__init__(**kwargs) 
Example 43
Project: DeepLearn   Author: GauravBh1010tt   File: layers.py    MIT License 5 votes vote down vote up
def __init__(self,inp_size, out_size, activation='tanh', **kwargs):
        super(ntn, self).__init__(**kwargs)
        self.k = out_size
        self.d = inp_size
        self.activation = activations.get(activation)
        self.test_out = 0 
Example 44
Project: DeepLearn   Author: GauravBh1010tt   File: layers.py    MIT License 5 votes vote down vote up
def __init__(self, v_dim, kernel_regularizer=None, **kwargs):
        self.v_dim = v_dim
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        super(Similarity, self).__init__(**kwargs) 
Example 45
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, nb_filter, filter_length,
                 init='uniform', activation='linear', weights=None,
                 border_mode='valid', subsample_length=1,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, input_dim=None, input_length=None, tied_to=None,
                 **kwargs):

        if border_mode not in {'valid', 'same'}:
            raise Exception('Invalid border mode for Convolution1D:', border_mode)

        self.tied_to = tied_to
        self.nb_filter = nb_filter #TODO may have to change this and the one below...
        self.filter_length = tied_to.filter_length
        self.init = initializations.get(init, dim_ordering='th')
        self.activation = activations.get(activation)
        assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}'
        self.border_mode = border_mode
        self.subsample_length = subsample_length

        self.subsample = (subsample_length, 1)

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.input_spec = [InputSpec(ndim=3)]
        self.initial_weights = tied_to.initial_weights
        self.input_dim = input_dim
        self.input_length = input_length
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length, self.input_dim)
        super(Convolution1D_tied, self).__init__(**kwargs) 
Example 46
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, nb_filter, nb_row, nb_col,
                 init='glorot_uniform', activation='linear', weights=None,
                 border_mode='valid', subsample=(1, 1), dim_ordering='default',
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, tied_to=None, **kwargs):
        if dim_ordering == 'default':
            dim_ordering = K.image_dim_ordering()
        if border_mode not in {'valid', 'same'}:
            raise Exception('Invalid border mode for Convolution2D:', border_mode)
        self.tied_to = tied_to
        self.nb_filter = nb_filter
        self.nb_row = tied_to.nb_row
        self.nb_col = tied_to.nb_col
        self.init = initializations.get(init, dim_ordering=dim_ordering)
        self.activation = activations.get(activation)
        assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}'
        self.border_mode = border_mode
        self.subsample = tuple(subsample)
        assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
        self.dim_ordering = dim_ordering

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.input_spec = [InputSpec(ndim=4)]
        self.initial_weights = tied_to.initial_weights
        super(Convolution2D_tied, self).__init__(**kwargs) 
Example 47
Project: spektral   Author: danielegrattarola   File: base.py    MIT License 5 votes vote down vote up
def __init__(self,
                 input_dim_1=None,
                 activation=None,
                 activity_regularizer=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super(MinkowskiProduct, self).__init__(**kwargs)
        self.input_dim_1 = input_dim_1
        self.activation = activations.get(activation)
        self.activity_regularizer = regularizers.get(activity_regularizer) 
Example 48
Project: spektral   Author: danielegrattarola   File: pooling.py    MIT License 5 votes vote down vote up
def __init__(self,
                 k,
                 h=None,
                 return_mask=True,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super().__init__(**kwargs)
        self.k = k
        self.h = h
        self.return_mask = return_mask
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint) 
Example 49
Project: spektral   Author: danielegrattarola   File: pooling.py    MIT License 5 votes vote down vote up
def __init__(self,
                 attn_kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 attn_kernel_regularizer=None,
                 attn_kernel_constraint=None,
                 **kwargs):
        super().__init__(**kwargs)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.attn_kernel_initializer = initializers.get(attn_kernel_initializer)
        self.attn_kernel_regularizer = regularizers.get(attn_kernel_regularizer)
        self.attn_kernel_constraint = constraints.get(attn_kernel_constraint) 
Example 50
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super().__init__(**kwargs)
        self.channels = channels
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = False 
Example 51
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 aggregate_method='mean',
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super().__init__(channels, **kwargs)
        self.channels = channels
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = False
        if aggregate_method == 'sum':
            self.aggregate_op = tf.segment_sum
        elif aggregate_method == 'mean':
            self.aggregate_op = tf.segment_mean
        elif aggregate_method == 'max':
            self.aggregate_op = tf.segment_max
        elif aggregate_method == 'min':
            self.aggregate_op = tf.segment_sum
        elif aggregate_method == 'prod':
            self.aggregate_op = tf.segment_prod
        else:
            raise ValueError('Possbile aggragation methods: sum, mean, max, min, '
                             'prod') 
Example 52
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 5 votes vote down vote up
def dense_layer(self,
                    x,
                    units,
                    name,
                    activation=None,
                    use_bias=True,
                    kernel_initializer='glorot_uniform',
                    bias_initializer='zeros',
                    kernel_regularizer=None,
                    bias_regularizer=None,
                    kernel_constraint=None,
                    bias_constraint=None):
        input_dim = K.int_shape(x)[-1]
        kernel = self.add_weight(shape=(input_dim, units),
                                 name=name + '_kernel',
                                 initializer=kernel_initializer,
                                 regularizer=kernel_regularizer,
                                 constraint=kernel_constraint)
        bias = self.add_weight(shape=(units,),
                               name=name + '_bias',
                               initializer=bias_initializer,
                               regularizer=bias_regularizer,
                               constraint=bias_constraint)
        act = activations.get(activation)
        output = K.dot(x, kernel)
        if use_bias:
            output = K.bias_add(output, bias)
        output = act(output)
        return output 
Example 53
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 order=1,
                 iterations=1,
                 share_weights=False,
                 gcn_activation='relu',
                 dropout_rate=0.0,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super().__init__(channels, **kwargs)
        self.channels = channels
        self.iterations = iterations
        self.order = order
        self.share_weights = share_weights
        self.activation = activations.get(activation)
        self.gcn_activation = activations.get(gcn_activation)
        self.dropout_rate = dropout_rate
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = False 
Example 54
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 alpha=0.2,
                 propagations=1,
                 mlp_hidden=None,
                 mlp_activation='relu',
                 dropout_rate=0.0,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super().__init__(channels, **kwargs)
        self.channels = channels
        self.mlp_hidden = mlp_hidden if mlp_hidden else []
        self.alpha = alpha
        self.propagations = propagations
        self.mlp_activation = activations.get(mlp_activation)
        self.activation = activations.get(activation)
        self.dropout_rate = dropout_rate
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint) 
Example 55
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 5 votes vote down vote up
def __init__(self,
                 channels,
                 mlp_channels=16,
                 n_hidden_layers=0,
                 epsilon=None,
                 mlp_activation='relu',
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super().__init__(channels, **kwargs)
        self.channels = channels
        self.channels_hid = mlp_channels
        self.extra_hidden_layers = n_hidden_layers
        self.epsilon = epsilon
        self.hidden_activation = activations.get(mlp_activation)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = False 
Example 56
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 5 votes vote down vote up
def __init__(self,
                 bias_initializer='zeros',
                 bias_regularizer=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        super(_ParametricLayer, self).__init__(**kwargs)
        self.bias_initializer = initializers.get(bias_initializer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.bias_constraint = constraints.get(bias_constraint)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint) 
Example 57
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 5 votes vote down vote up
def __init__(self,
                 units,
                 use_node_weight=True,
                 activation='sigmoid',
                 use_bias=False,
                 gate_units=None,
                 gate_mode=None,
                 gat_units=None,
                 gat_n_heads=None,
                 **kwargs):
        super(GraphConv, self).__init__(**kwargs)
        self.units = units
        self.use_node_weight = use_node_weight
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        # gate
        if gate_units is not None:
            if gate_mode is None:
                gate_mode = 'dual_weight'
            if gate_mode in ['single_weight', 'dual_weight']:
                self.use_gate = True
                self.gate_units = gate_units
                self.gate_mode = gate_mode
            else:
                raise ValueError('set gate_mode to "(single|dual)_weight".')
        else:
            self.use_gate = False
        # gat
        if gat_units is not None:
            if gat_n_heads is not None:
                self.use_gat = True
                self.gat_units = gat_units
                self.gat_n_heads = gat_n_heads
            else:
                raise ValueError('set gat_units & gat_n_heads simultaneously.')
        else:
            self.use_gat = False 
Example 58
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 5 votes vote down vote up
def __init__(self,
                 cell,
                 return_state=False,
                 activation='sigmoid',
                 **kwargs):
        super(GraphRNN, self).__init__(**kwargs)
        self.cell = cell
        self.return_state = return_state
        self.activation = activations.get(activation) 
Example 59
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 5 votes vote down vote up
def __init__(self,
                 bias_initializer='zeros',
                 bias_regularizer=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        super(_ParametricLayer, self).__init__(**kwargs)
        self.bias_initializer = initializers.get(bias_initializer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.bias_constraint = constraints.get(bias_constraint)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint) 
Example 60
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 5 votes vote down vote up
def __init__(self,
                 cell,
                 n_layers,
                 return_sequences=False,
                 output_sequence_axis=-1,
                 return_state=False,
                 activation='sigmoid',
                 bias_initializer='zeros',
                 bias_regularizer=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        super(GraphRRNN, self).__init__(
            bias_initializer, bias_regularizer, bias_constraint,
            kernel_initializer, kernel_regularizer, kernel_constraint,
            **kwargs)
        self.cell = cell
        self.n_layers = n_layers
        self.return_sequences = return_sequences
        self.output_sequence_axis = output_sequence_axis
        self.return_state = return_state
        self.activation = activations.get(activation)
        self.grnn_layer = \
            GraphRNN(cell=cell,
                     return_state=True,
                     activation=activation,
                     bias_initializer=bias_initializer,
                     bias_regularizer=bias_regularizer,
                     bias_constraint=bias_constraint,
                     kernel_initializer=kernel_initializer,
                     kernel_regularizer=kernel_regularizer,
                     kernel_constraint=kernel_constraint,
                     **kwargs) 
Example 61
Project: keras_extension   Author: k1414st   File: mac.py    MIT License 5 votes vote down vote up
def __init__(self,
                 recurrent_length,
                 attention_activation='softmax',
                 forget_activation='sigmoid',
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        self.recurrent_length = recurrent_length
        self.attention_activation = activations.get(attention_activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.mac_cell = \
            MACCell(attention_activation=attention_activation,
                    forget_activation=forget_activation,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer,
                    unit_forget_bias=unit_forget_bias,
                    kernel_regularizer=kernel_regularizer,
                    bias_regularizer=bias_regularizer,
                    kernel_constraint=kernel_constraint,
                    bias_constraint=bias_constraint)
        super(MAC, self).__init__(**kwargs) 
Example 62
Project: keras_extension   Author: k1414st   File: partial_convolutional.py    MIT License 5 votes vote down vote up
def __init__(self, rank,
                 filters,
                 kernel_size,
                 strides=1,
                 data_format=None,
                 dilation_rate=1,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(_Conv, self).__init__(**kwargs)
        self.rank = rank
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                      'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding('same')
        self.data_format = K.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank,
                                                        'dilation_rate')
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(ndim=self.rank + 2) 
Example 63
Project: keras_extension   Author: k1414st   File: core_sparse_tf.py    MIT License 5 votes vote down vote up
def __init__(self, units,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 sparse=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super().__init__(**kwargs)
        self.units = units
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        if sparse:
            self.input_spec = InputSpec()
        else:
            self.input_spec = InputSpec(min_ndim=2)
        self.supports_masking = True 
Example 64
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 5 votes vote down vote up
def __init__(self,
                 bias_initializer='zeros',
                 bias_regularizer=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        super(_ParametricLayer, self).__init__(**kwargs)
        self.bias_initializer = initializers.get(bias_initializer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.bias_constraint = constraints.get(bias_constraint)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint) 
Example 65
Project: keras_extension   Author: k1414st   File: graph.py    MIT License 5 votes vote down vote up
def __init__(self,
                 units,
                 use_node_weight=True,
                 activation='sigmoid',
                 use_bias=False,
                 gate_units=None,
                 gate_mode=None,
                 gat_units=None,
                 gat_n_heads=None,
                 **kwargs):
        super(GraphConv, self).__init__(**kwargs)
        self.units = units
        self.use_node_weight = use_node_weight
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        # gate
        if gate_units is not None:
            if gate_mode is None:
                gate_mode = 'dual_weight'
            if gate_mode in ['single_weight', 'dual_weight']:
                self.use_gate = True
                self.gate_units = gate_units
                self.gate_mode = gate_mode
            else:
                raise ValueError('set gate_mode to "(single|dual)_weight".')
        else:
            self.use_gate = False
        # gat
        if gat_units is not None:
            if gat_n_heads is not None:
                self.use_gat = True
                self.gat_units = gat_units
                self.gat_n_heads = gat_n_heads
            else:
                raise ValueError('set gat_units & gat_n_heads simultaneously.')
        else:
            self.use_gat = False 
Example 66
Project: keras_extension   Author: k1414st   File: core_sparse_tf_bak.py    MIT License 5 votes vote down vote up
def __init__(self, units,
                 fold_shape=None,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 sparse=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super().__init__(**kwargs)
        self.units = units
        self.fold_shape = fold_shape
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        if sparse:
            self.input_spec = InputSpec()
        else:
            self.input_spec = InputSpec(min_ndim=2)
        self.supports_masking = True 
Example 67
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 5 votes vote down vote up
def __init__(self,
                 bias_initializer='zeros',
                 bias_regularizer=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        super(_ParametricLayer, self).__init__(**kwargs)
        self.bias_initializer = initializers.get(bias_initializer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.bias_constraint = constraints.get(bias_constraint)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint) 
Example 68
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 5 votes vote down vote up
def __init__(self,
                 units,
                 use_node_weight=True,
                 activation='sigmoid',
                 use_bias=False,
                 gat_units=None,
                 gat_n_heads=None,
                 bias_initializer='zeros',
                 bias_regularizer=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        super(GraphConv, self).__init__(
            bias_initializer, bias_regularizer, bias_constraint,
            kernel_initializer, kernel_regularizer, kernel_constraint,
            **kwargs)
        self.units = units
        self.use_node_weight = use_node_weight
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        if gat_units is not None:
            if gat_n_heads is not None:
                self.use_gat = True
                self.gat_units = gat_units
                self.gat_n_heads = gat_n_heads
            else:
                raise ValueError('set gat_units & gat_n_heads simultaneously.')
        else:
            self.use_gat = False 
Example 69
Project: keras_extension   Author: k1414st   File: layer.py    MIT License 5 votes vote down vote up
def __init__(self,
                 cell,
                 n_layers,
                 return_sequences=False,
                 output_sequence_axis=-1,
                 return_state=False,
                 activation='sigmoid',
                 bias_initializer='zeros',
                 bias_regularizer=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 **kwargs):
        super(GraphRRNN, self).__init__(
            bias_initializer, bias_regularizer, bias_constraint,
            kernel_initializer, kernel_regularizer, kernel_constraint,
            **kwargs)
        self.cell = cell
        self.n_layers = n_layers
        self.return_sequences = return_sequences
        self.output_sequence_axis = output_sequence_axis
        self.return_state = return_state
        self.activation = activations.get(activation)
        self.grnn_layer = \
            GraphRNN(cell=cell,
                     return_state=True,
                     activation=activation,
                     bias_initializer=bias_initializer,
                     bias_regularizer=bias_regularizer,
                     bias_constraint=bias_constraint,
                     kernel_initializer=kernel_initializer,
                     kernel_regularizer=kernel_regularizer,
                     kernel_constraint=kernel_constraint,
                     **kwargs) 
Example 70
Project: smach_based_introspection_framework   Author: birlrobotics   File: layer_utils.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def __init__(self, units,
                 activation='tanh',
                 recurrent_activation='hard_sigmoid',
                 attention_activation='tanh',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 attention_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 attention_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 attention_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 return_attention=False,
                 implementation=1,
                 **kwargs):
        super(AttentionLSTM, self).__init__(**kwargs)
        self.units = units
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.attention_activation = activations.get(attention_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.attention_initializer = initializers.get(attention_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.attention_regularizer = regularizers.get(attention_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.attention_constraint = constraints.get(attention_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.return_attention = return_attention
        self.state_spec = [InputSpec(shape=(None, self.units)),
                           InputSpec(shape=(None, self.units))]
        self.implementation = implementation 
Example 71
Project: keras-minimal-rnn   Author: titu1994   File: minimal_rnn.py    MIT License 4 votes vote down vote up
def __init__(self, units,
                 activation='tanh',
                 recurrent_activation='sigmoid',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 implementation=1,
                 **kwargs):
        super(MinimalRNNCell, self).__init__(**kwargs)
        self.input_spec = [InputSpec(ndim=3)]
        self.units = units
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self._dropout_mask = None
        self._recurrent_dropout_mask = None
        self.implementation = implementation
        self.state_spec = [InputSpec(shape=(None, self.units)),]
        self.state_size = (self.units,) 
Example 72
Project: phoneticSimilarity   Author: ronggong   File: attention.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def __init__(self,
                 W_regularizer=None, b_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True,
                 return_attention=False,
                 **kwargs):
        """
        Keras Layer that implements an Attention mechanism for temporal data.
        Supports Masking.
        Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
        # Input shape
            3D tensor with shape: `(samples, steps, features)`.
        # Output shape
            2D tensor with shape: `(samples, features)`.
        :param kwargs:
        Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
        The dimensions are inferred based on the output shape of the RNN.


        Note: The layer has been tested with Keras 1.x

        Example:

            # 1
            model.add(LSTM(64, return_sequences=True))
            model.add(Attention())
            # next add a Dense layer (for classification/regression) or whatever...

            # 2 - Get the attention scores
            hidden = LSTM(64, return_sequences=True)(words)
            sentence, word_scores = Attention(return_attention=True)(hidden)

        """
        self.supports_masking = True
        self.return_attention = return_attention
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        super(Attention, self).__init__(**kwargs) 
Example 73
Project: IJCAI_Keras_Defense   Author: gujingxiao   File: denseMoE.py    Apache License 2.0 4 votes vote down vote up
def __init__(self, units,
                 n_experts,
                 expert_activation=None,
                 gating_activation=None,
                 use_expert_bias=True,
                 use_gating_bias=True,
                 expert_kernel_initializer_scale=1.0,
                 gating_kernel_initializer_scale=1.0,
                 expert_bias_initializer='zeros',
                 gating_bias_initializer='zeros',
                 expert_kernel_regularizer=None,
                 gating_kernel_regularizer=None,
                 expert_bias_regularizer=None,
                 gating_bias_regularizer=None,
                 expert_kernel_constraint=None,
                 gating_kernel_constraint=None,
                 expert_bias_constraint=None,
                 gating_bias_constraint=None,
                 activity_regularizer=None,
                 **kwargs):

        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super(DenseMoE, self).__init__(**kwargs)
        self.units = units
        self.n_experts = n_experts

        self.expert_activation = activations.get(expert_activation)
        self.gating_activation = activations.get(gating_activation)

        self.use_expert_bias = use_expert_bias
        self.use_gating_bias = use_gating_bias

        self.expert_kernel_initializer_scale = expert_kernel_initializer_scale
        self.gating_kernel_initializer_scale = gating_kernel_initializer_scale

        self.expert_bias_initializer = initializers.get(expert_bias_initializer)
        self.gating_bias_initializer = initializers.get(gating_bias_initializer)

        self.expert_kernel_regularizer = regularizers.get(expert_kernel_regularizer)
        self.gating_kernel_regularizer = regularizers.get(gating_kernel_regularizer)

        self.expert_bias_regularizer = regularizers.get(expert_bias_regularizer)
        self.gating_bias_regularizer = regularizers.get(gating_bias_regularizer)

        self.expert_kernel_constraint = constraints.get(expert_kernel_constraint)
        self.gating_kernel_constraint = constraints.get(gating_kernel_constraint)

        self.expert_bias_constraint = constraints.get(expert_bias_constraint)
        self.gating_bias_constraint = constraints.get(gating_bias_constraint)

        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.input_spec = InputSpec(min_ndim=2)
        self.supports_masking = True 
Example 74
Project: IJCAI_Keras_Defense   Author: gujingxiao   File: denseMoE.py    Apache License 2.0 4 votes vote down vote up
def __init__(self, units,
                 n_experts,
                 expert_activation=None,
                 gating_activation=None,
                 use_expert_bias=True,
                 use_gating_bias=True,
                 expert_kernel_initializer_scale=1.0,
                 gating_kernel_initializer_scale=1.0,
                 expert_bias_initializer='zeros',
                 gating_bias_initializer='zeros',
                 expert_kernel_regularizer=None,
                 gating_kernel_regularizer=None,
                 expert_bias_regularizer=None,
                 gating_bias_regularizer=None,
                 expert_kernel_constraint=None,
                 gating_kernel_constraint=None,
                 expert_bias_constraint=None,
                 gating_bias_constraint=None,
                 activity_regularizer=None,
                 **kwargs):

        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)
        super(DenseMoE_LeakyReLU, self).__init__(**kwargs)
        self.units = units
        self.n_experts = n_experts

        self.expert_activation = LeakyReLU()
        self.gating_activation = activations.get(gating_activation)

        self.use_expert_bias = use_expert_bias
        self.use_gating_bias = use_gating_bias

        self.expert_kernel_initializer_scale = expert_kernel_initializer_scale
        self.gating_kernel_initializer_scale = gating_kernel_initializer_scale

        self.expert_bias_initializer = initializers.get(expert_bias_initializer)
        self.gating_bias_initializer = initializers.get(gating_bias_initializer)

        self.expert_kernel_regularizer = regularizers.get(expert_kernel_regularizer)
        self.gating_kernel_regularizer = regularizers.get(gating_kernel_regularizer)

        self.expert_bias_regularizer = regularizers.get(expert_bias_regularizer)
        self.gating_bias_regularizer = regularizers.get(gating_bias_regularizer)

        self.expert_kernel_constraint = constraints.get(expert_kernel_constraint)
        self.gating_kernel_constraint = constraints.get(gating_kernel_constraint)

        self.expert_bias_constraint = constraints.get(expert_bias_constraint)
        self.gating_bias_constraint = constraints.get(gating_bias_constraint)

        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.input_spec = InputSpec(min_ndim=2)
        self.supports_masking = True 
Example 75
Project: cbc_networks   Author: saralajew   File: reasoning_layers.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def __init__(self,
                 n_classes,
                 n_replicas=1,
                 kernel_size=None,
                 strides=(1, 1),
                 padding='valid',
                 dilation_rate=(1, 1),
                 reasoning_initializer='zeros',
                 reasoning_regularizer=None,
                 use_component_probabilities=False,
                 component_probabilities_initializer='zeros',
                 component_probabilities_regularizer=None,
                 component_probabilities_constraint=None,
                 use_pixel_probabilities=False,
                 pixel_probabilities_initializer='zeros',
                 pixel_probabilities_regularizer=None,
                 pixel_probabilities_constraint=None,
                 **kwargs):
        super(Reasoning2D, self).__init__(**kwargs)

        self.n_classes = n_classes
        self.n_replicas = n_replicas

        self.rank = 2
        if kernel_size is not None:
            self.kernel_size = conv_utils.normalize_tuple(kernel_size,
                                                          self.rank,
                                                          'kernel_size')
        else:
            self.kernel_size = None
        self.strides = conv_utils.normalize_tuple(strides,
                                                  self.rank,
                                                  'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.dilation_rate = conv_utils.normalize_tuple(dilation_rate,
                                                        self.rank,
                                                        'dilation_rate')

        self.reasoning_initializer = initializers.get(reasoning_initializer)
        self.reasoning_regularizer = regularizers.get(reasoning_regularizer)

        self.use_component_probabilities = use_component_probabilities
        self.component_probabilities_initializer = initializers.get(
            component_probabilities_initializer)
        self.component_probabilities_regularizer = regularizers.get(
            component_probabilities_regularizer)
        self.component_probabilities_constraint = constraints.get(
            component_probabilities_constraint)

        self.use_pixel_probabilities = use_pixel_probabilities
        self.pixel_probabilities_initializer = initializers.get(
            pixel_probabilities_initializer)
        self.pixel_probabilities_regularizer = regularizers.get(
            pixel_probabilities_regularizer)
        self.pixel_probabilities_constraint = constraints.get(
            pixel_probabilities_constraint) 
Example 76
Project: cbc_networks   Author: saralajew   File: reasoning_layers.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def call(self, inputs, **kwargs):
        # decode the reasoning probabilities
        positive_kernel = self.reasoning_probabilities[0]
        negative_kernel = (1 - positive_kernel) * \
                          self.reasoning_probabilities[1]

        if self.use_component_probabilities:
            # squash component probabilities
            components_probabilities = softmax(self.component_probabilities,
                                               axis=2)

            positive_kernel = positive_kernel * components_probabilities
            negative_kernel = negative_kernel * components_probabilities

        # get normalization tensor
        # stabilize the division with a small epsilon
        normalization = K.sum(positive_kernel + negative_kernel,
                              axis=2,
                              keepdims=True) + K.epsilon()

        # get sliding kernel and bias
        if self.use_pixel_probabilities:
            pixel_probabilities = softmax(self.pixel_probabilities,
                                          axis=(0, 1))
            # scale kernel with priors
            kernel = (positive_kernel - negative_kernel) / normalization \
                     * pixel_probabilities
            bias = K.sum(negative_kernel / normalization
                         * pixel_probabilities,
                         axis=(0, 1, 2),
                         keepdims=True)
        else:
            kernel = (positive_kernel - negative_kernel) / normalization
            bias = K.sum(negative_kernel / normalization,
                         axis=(0, 1, 2),
                         keepdims=True)

        # compute probabilities by a sliding operation
        probs = K.conv2d(inputs, kernel,
                         strides=self.strides,
                         padding=self.padding,
                         data_format='channels_last',
                         dilation_rate=self.dilation_rate) + bias

        if not self.use_pixel_probabilities:
            # divide by number of kernel_size
            probs = probs / np.prod(self.kernel_size)

        # reshape to m x n x #classes x #replicas
        probs = K.reshape(probs,
                          (-1,) + K.int_shape(probs)[1:3]
                          + (self.n_classes, self.n_replicas))

        # squeeze replica dimension if one.
        if self.n_replicas == 1:
            probs = K.squeeze(probs, axis=-1)

        return probs 
Example 77
Project: lmtc-eurlex57k   Author: iliaschalkidis   File: attention.py    Apache License 2.0 4 votes vote down vote up
def __init__(self,
                 kernel_regularizer=None, u_regularizer=None, bias_regularizer=None,
                 W_constraint=None, u_constraint=None, b_constraint=None,
                 bias=True,
                 return_attention=False,
                 **kwargs):
        """
        Keras Layer that implements a context-aware Attention mechanism for temporal data.
        Supports Masking.
        Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]
        "Hierarchical Attention Networks for Document Classification"
        by using a context vector to assist the attention
        # Input shape
            3D tensor with shape: `(samples, steps, features)`.
        # Output shape
            2D tensor with shape: `(samples, features)`.
        :param kwargs:

        Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
        The dimensions are inferred based on the output shape of the RNN.


        Note: The layer has been tested with Keras 1.x

        Example:
            model.add(LSTM(64, return_sequences=True))
            model.add(Attention())
            # next add a Dense layer (for classification/regression) or whatever...

        """
        self.supports_masking = True
        self.init = initializers.get('glorot_uniform')

        self.W_regularizer = regularizers.get(kernel_regularizer)
        self.u_regularizer = regularizers.get(u_regularizer)
        self.b_regularizer = regularizers.get(bias_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.u_constraint = constraints.get(u_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.return_attention = return_attention
        super(ContextualAttention, self).__init__(**kwargs) 
Example 78
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 4 votes vote down vote up
def __init__(self, units,
                 activation='tanh',
                 recurrent_activation='tanh',
                 features_initializer='glorot_uniform',
                 recurrent_initializer='glorot_uniform',
                 average_initializer = 'glorot_uniform',
                 initial_attention_initializer = 'zeros',
                 bias_initializer='zeros',
                 features_regularizer=None,
                 recurrent_regularizer=None,
                 average_regularizer=None,
                 initial_attention_regularizer = None,
                 bias_regularizer=None,
                 features_constraint=None,
                 recurrent_constraint=None,
                 average_constraint=None,
                 initial_attention_constraint = None,
                 bias_constraint=None,
#                  dropout=0.,
#                  recurrent_dropout=0.,
                 **kwargs):
        super(RWA, self).__init__(**kwargs)
        self.units = units
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.supports_masking = False
        self.unroll = False
        self.return_sequences = False
        self.go_backwards = False
        self.features_initializer = initializers.get(features_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.average_initializer = initializers.get(average_initializer)
        self.initial_attention_initializer = initializers.get(initial_attention_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.features_regularizer = regularizers.get(features_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.average_regularizer = regularizers.get(average_regularizer)
        self.initial_attention_regularizer = regularizers.get(initial_attention_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)


        self.features_constraint = constraints.get(features_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.average_constraint = constraints.get(average_constraint)
        self.initial_attention_constraint = constraints.get(initial_attention_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

#         self.dropout = min(1., max(0., dropout))
#         self.recurrent_dropout = min(1., max(0., recurrent_dropout)) 
Example 79
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 4 votes vote down vote up
def __init__(self,
                 channels,
                 attn_heads=1,
                 concat_heads=True,
                 dropout_rate=0.5,
                 return_attn_coef=False,
                 activation='relu',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 attn_kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 attn_kernel_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 attn_kernel_constraint=None,
                 **kwargs):
        super().__init__(channels, **kwargs)

        self.channels = channels
        self.attn_heads = attn_heads
        self.concat_heads = concat_heads
        self.dropout_rate = dropout_rate
        self.return_attn_coef = return_attn_coef
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.attn_kernel_initializer = initializers.get(attn_kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.attn_kernel_regularizer = regularizers.get(attn_kernel_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.attn_kernel_constraint = constraints.get(attn_kernel_constraint)
        self.supports_masking = False

        # Populated by build()
        self.kernels = []       # Layer kernels for attention heads
        self.biases = []        # Layer biases for attention heads
        self.attn_kernels = []  # Attention kernels for attention heads

        if concat_heads:
            # Output will have shape (..., attention_heads * channels)
            self.output_dim = self.channels * self.attn_heads
        else:
            # Output will have shape (..., channels)
            self.output_dim = self.channels 
Example 80
Project: spektral   Author: danielegrattarola   File: convolutional.py    MIT License 4 votes vote down vote up
def call(self, inputs):
        X = inputs[0]
        A = inputs[1]

        outputs = []
        output_attn = []
        for head in range(self.attn_heads):
            kernel = self.kernels[head]
            attention_kernel = self.attn_kernels[head]  # Attention kernel a in the paper (2F' x 1)

            # Compute inputs to attention network
            features = K.dot(X, kernel)

            # Compue attention coefficients
            # [[a_1], [a_2]]^T [[Wh_i], [Wh_2]] = [a_1]^T [Wh_i] + [a_2]^T [Wh_j]
            attn_for_self = K.dot(features, attention_kernel[0])    # [a_1]^T [Wh_i]
            attn_for_neighs = K.dot(features, attention_kernel[1])  # [a_2]^T [Wh_j]
            if len(K.int_shape(features)) == 2:
                # Single / mixed mode
                attn_for_neighs_T = K.transpose(attn_for_neighs)
            else:
                # Batch mode
                attn_for_neighs_T = K.permute_dimensions(attn_for_neighs, (0, 2, 1))
            attn_coef = attn_for_self + attn_for_neighs_T
            attn_coef = LeakyReLU(alpha=0.2)(attn_coef)

            # Mask values before activation (Vaswani et al., 2017)
            mask = -10e9 * (1.0 - A)
            attn_coef += mask

            # Apply softmax to get attention coefficients
            attn_coef = K.softmax(attn_coef)
            output_attn.append(attn_coef)

            # Apply dropout to attention coefficients
            attn_coef_drop = Dropout(self.dropout_rate)(attn_coef)

            # Convolution
            features = filter_dot(attn_coef_drop, features)
            if self.use_bias:
                features = K.bias_add(features, self.biases[head])

            # Add output of attention head to final output
            outputs.append(features)

        # Aggregate the heads' output according to the reduction method
        if self.concat_heads:
            output = K.concatenate(outputs)
        else:
            output = K.mean(K.stack(outputs), axis=0)

        output = self.activation(output)

        if self.return_attn_coef:
            return output, output_attn
        else:
            return output