Python keras.constraints() Examples

The following are code examples for showing how to use keras.constraints(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: fancy-cnn   Author: textclf   File: timedistributed.py    MIT License 6 votes vote down vote up
def build(self):
        try:
            self.input_ndim = len(self.previous.input_shape)
        except AttributeError:
            self.input_ndim = len(self.input_shape)

        self.layer.set_input_shape((None, ) + self.input_shape[2:])

        if hasattr(self.layer, 'regularizers'):
            self.regularizers = self.layer.regularizers

        if hasattr(self.layer, 'constraints'):
            self.constraints = self.layer.constraints
        
        if hasattr(self.layer, 'trainable_weights'):
            self.trainable_weights = self.layer.trainable_weights

            if self.initial_weights is not None:
                self.layer.set_weights(self.initial_weights)
                del self.initial_weights 
Example 2
Project: deeplearning4nlp-tutorial   Author: UKPLab   File: FixedEmbedding.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, input_dim, output_dim, init='uniform', input_length=None,
                 W_regularizer=None, activity_regularizer=None, W_constraint=None,
                 mask_zero=False, weights=None, **kwargs):
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.input_length = input_length
        self.mask_zero = mask_zero

        self.W_constraint = constraints.get(W_constraint)
        self.constraints = [self.W_constraint]

        self.W_regularizer = regularizers.get(W_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.initial_weights = weights
        kwargs['input_shape'] = (self.input_dim,)
        super(FixedEmbedding, self).__init__(**kwargs) 
Example 3
Project: deeplearning4nlp-tutorial   Author: UKPLab   File: ConvolutionalMaxOverTime.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, output_dim, init='glorot_uniform', activation='linear', weights=None,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.output_dim = output_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.constraints = [self.W_constraint, self.b_constraint]

        self.initial_weights = weights

        self.input_dim = input_dim
        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(ConvolutionalMaxOverTime, self).__init__(**kwargs) 
Example 4
Project: deeplearning4nlp-tutorial   Author: UKPLab   File: ConvolutionalMaxOverTime.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, output_dim, init='glorot_uniform', activation='linear', weights=None,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.output_dim = output_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.constraints = [self.W_constraint, self.b_constraint]

        self.initial_weights = weights

        self.input_dim = input_dim
        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(ConvolutionalMaxOverTime, self).__init__(**kwargs) 
Example 5
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
        self.shape=input_shape
        in_dim = input_shape[-1]
        if self.n_basis > in_dim:
           print('[ERROR]: n_basis must not be larger than in_dim! Program terminates')
           sys.exit()


        self.E_layer = keras.layers.Conv1D(self.n_basis,
                                            kernel_size=1,
                                            activation=self.activation,
                                            use_bias=self.use_bias
                                            # kernel_constraint= non_neg_unit_norm(axis=1)
                                            # kernel_constraint=keras.constraints.NonNeg()
                                            # kernel_regularizer=regularizers.l1(100.0)
                                            )

        self.F_layer = keras.layers.Conv1D(self.n_basis,
                                            kernel_size=1,
                                            activation=self.activation,
                                            use_bias=self.use_bias
                                            # kernel_constraint= non_neg_unit_norm(axis=1)
                                            # kernel_constraint=keras.constraints.NonNeg()
                                            # kernel_regularizer=regularizers.l1(100.0)
                                            )

        super(TensorRelaxationPooling2, self).build(input_shape) 
Example 6
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 4 votes vote down vote up
def build(self, input_shape):
        self.shape=input_shape
        in_dim = input_shape[-1]

        if self.n_basis > in_dim:
           print('[ERROR]: n_basis must not be larger than in_dim! Program terminates')
           sys.exit()



        ## define the two matrix with orthogonal columns
        self.E_list = []
        self.F_list = []
        if not self.init_sigma:
            init_sigma = np.sqrt(in_dim)
        else:
            init_sigma = self.init_sigma



        for n in range(self.n_components):
            
            E0 = self.add_weight(name='E_{}'.format(n),
                                shape=[in_dim, self.n_basis], 
                                initializer=keras.initializers.Orthogonal(),
                                trainable=False)
            sigma_e = self.add_weight(name='sE_{}'.format(n),
                                shape=[1], 
                                initializer=keras.initializers.Constant(init_sigma),
                                constraint=keras.constraints.NonNeg(),
                                trainable=self.learnable_radius)
            self.E_list.append( np.sqrt(in_dim) / (K.epsilon() + sigma_e) * E0  )

            F0 = self.add_weight(name='F_{}'.format(n),
                                shape=[in_dim, self.n_basis], 
                                initializer=keras.initializers.Orthogonal(),
                                trainable=False)
            sigma_f = self.add_weight(name='sF_{}'.format(n),
                                shape=[1], 
                                initializer=keras.initializers.Constant(init_sigma),
                                constraint=keras.constraints.NonNeg(),
                                trainable=self.learnable_radius)
            self.F_list.append( np.sqrt(in_dim) / (K.epsilon() + sigma_f) * F0  )

        super(RPGaussianPooling, self).build(input_shape) 
Example 7
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 4 votes vote down vote up
def build(self, input_shape):
        self.shape=input_shape
        in_dim = input_shape[-1]

        if self.n_basis > in_dim:
           print('[ERROR]: n_basis must not be larger than in_dim! Program terminates')
           sys.exit()

        ## define the two matrix with orthogonal columns
        self.E_list = []
        self.F_list = []
        if not self.init_sigma:
            init_sigma = np.sqrt(in_dim)
        else:
            init_sigma = self.init_sigma


        for n in range(self.n_components):
            
            E0 = self.add_weight(name='E_{}'.format(n),
                                shape=[in_dim, self.n_basis], 
                                initializer=keras.initializers.Orthogonal(),
                                trainable=False)
            sigma_e = self.add_weight(name='sE_{}'.format(n),
                                shape=[1], 
                                initializer=keras.initializers.Constant(init_sigma),
                                constraint=keras.constraints.NonNeg(),
                                trainable=self.learnable_radius)
            
            self.E_list.append( np.sqrt(in_dim) / (K.epsilon() + sigma_e) * E0  )

            F0 = self.add_weight(name='F_{}'.format(n),
                                shape=[in_dim, self.n_basis], 
                                initializer=keras.initializers.Orthogonal(),
                                trainable=False)
            sigma_f = self.add_weight(name='sF_{}'.format(n),
                                shape=[1], 
                                initializer=keras.initializers.Constant(init_sigma),
                                constraint=keras.constraints.NonNeg(),
                                trainable=self.learnable_radius)
            self.F_list.append( np.sqrt(in_dim) / (K.epsilon() + sigma_f) * F0  )

        super(RPGaussianPooling2, self).build(input_shape)