Python keras.initializers.RandomUniform() Examples

The following are 30 code examples of keras.initializers.RandomUniform(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.initializers , or try the search function .
Example #1
Source File: srelu.py    From keras-contrib with MIT License 6 votes vote down vote up
def __init__(self, t_left_initializer='zeros',
                 a_left_initializer=initializers.RandomUniform(minval=0, maxval=1),
                 t_right_initializer=initializers.RandomUniform(minval=0, maxval=5),
                 a_right_initializer='ones',
                 shared_axes=None,
                 **kwargs):
        super(SReLU, self).__init__(**kwargs)
        self.supports_masking = True
        self.t_left_initializer = initializers.get(t_left_initializer)
        self.a_left_initializer = initializers.get(a_left_initializer)
        self.t_right_initializer = initializers.get(t_right_initializer)
        self.a_right_initializer = initializers.get(a_right_initializer)
        if shared_axes is None:
            self.shared_axes = None
        elif not isinstance(shared_axes, (list, tuple)):
            self.shared_axes = [shared_axes]
        else:
            self.shared_axes = list(shared_axes) 
Example #2
Source File: hadamard.py    From landmark-recognition-challenge with GNU General Public License v3.0 6 votes vote down vote up
def build(self, input_shape):

        hadamard_size = 2 ** int(math.ceil(math.log(max(input_shape[1], self.output_dim), 2)))
        self.hadamard = K.constant(
            value=hadamard(hadamard_size, dtype=np.int8)[:input_shape[1], :self.output_dim])

        init_scale = 1. / math.sqrt(self.output_dim)

        self.scale = self.add_weight(name='scale', 
                                      shape=(1,),
                                      initializer=Constant(init_scale),
                                      trainable=True)

        if self.use_bias:
            self.bias  = self.add_weight(name='bias', 
                                          shape=(self.output_dim,),
                                          initializer=RandomUniform(-init_scale, init_scale),
                                          trainable=True)

        super(HadamardClassifier, self).build(input_shape) 
Example #3
Source File: attention_model.py    From neural-tweet-search with Apache License 2.0 5 votes vote down vote up
def add_embed_layer(vocab_emb, vocab_size, embed_size, train_embed, dropout_rate):
    emb_layer = Sequential()
    if vocab_emb is not None:
        print("Embedding with initialized weights")
        print(vocab_size, embed_size)
        emb_layer.add(Embedding(input_dim=vocab_size, output_dim=embed_size, weights=[vocab_emb],
                                trainable=train_embed, mask_zero=False))
    else:
        print("Embedding with random weights")
        emb_layer.add(Embedding(input_dim=vocab_size, output_dim=embed_size, trainable=True, mask_zero=False,
                                embeddings_initializer=RandomUniform(-0.05, 0.05)))
    emb_layer.add(SpatialDropout1D(dropout_rate))
    return emb_layer 
Example #4
Source File: binary_layers.py    From QuantizedNeuralNetworks-Keras-Tensorflow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[1]

        if self.H == 'Glorot':
            self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
            #print('Glorot H: {}'.format(self.H))
        if self.kernel_lr_multiplier == 'Glorot':
            self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
            #print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
            
        self.kernel_constraint = Clip(-self.H, self.H)
        self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
        self.kernel = self.add_weight(shape=(input_dim, self.units),
                                     initializer=self.kernel_initializer,
                                     name='kernel',
                                     regularizer=self.kernel_regularizer,
                                     constraint=self.kernel_constraint)

        if self.use_bias:
            self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
            self.bias = self.add_weight(shape=(self.output_dim,),
                                     initializer=self.bias_initializer,
                                     name='bias',
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)
        else:
            self.lr_multipliers = [self.kernel_lr_multiplier]
            self.bias = None

        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True 
Example #5
Source File: quantized_layers.py    From QuantizedNeuralNetworks-Keras-Tensorflow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[1]

        if self.H == 'Glorot':
            self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
            #print('Glorot H: {}'.format(self.H))
        if self.kernel_lr_multiplier == 'Glorot':
            self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
            #print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
            
        self.kernel_constraint = Clip(-self.H, self.H)
        self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
        self.kernel = self.add_weight(shape=(input_dim, self.units),
                                     initializer=self.kernel_initializer,
                                     name='kernel',
                                     regularizer=self.kernel_regularizer,
                                     constraint=self.kernel_constraint)

        if self.use_bias:
            self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
            self.bias = self.add_weight(shape=(self.units,),
                                     initializer=self.bias_initializer,
                                     name='bias',
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)
        else:
            self.lr_multipliers = [self.kernel_lr_multiplier]
            self.bias = None

        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True 
Example #6
Source File: DenseMoE.py    From mixture-of-experts with GNU General Public License v3.0 5 votes vote down vote up
def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[-1]
        expert_init_lim = np.sqrt(3.0*self.expert_kernel_initializer_scale / (max(1., float(input_dim + self.units) / 2)))
        gating_init_lim = np.sqrt(3.0*self.gating_kernel_initializer_scale / (max(1., float(input_dim + 1) / 2)))

        self.expert_kernel = self.add_weight(shape=(input_dim, self.units, self.n_experts),
                                      initializer=RandomUniform(minval=-expert_init_lim,maxval=expert_init_lim),
                                      name='expert_kernel',
                                      regularizer=self.expert_kernel_regularizer,
                                      constraint=self.expert_kernel_constraint)

        self.gating_kernel = self.add_weight(shape=(input_dim, self.n_experts),
                                      initializer=RandomUniform(minval=-gating_init_lim,maxval=gating_init_lim),
                                      name='gating_kernel',
                                      regularizer=self.gating_kernel_regularizer,
                                      constraint=self.gating_kernel_constraint)

        if self.use_expert_bias:
            self.expert_bias = self.add_weight(shape=(self.units, self.n_experts),
                                        initializer=self.expert_bias_initializer,
                                        name='expert_bias',
                                        regularizer=self.expert_bias_regularizer,
                                        constraint=self.expert_bias_constraint)
        else:
            self.expert_bias = None

        if self.use_gating_bias:
            self.gating_bias = self.add_weight(shape=(self.n_experts,),
                                        initializer=self.gating_bias_initializer,
                                        name='gating_bias',
                                        regularizer=self.gating_bias_regularizer,
                                        constraint=self.gating_bias_constraint)
        else:
            self.gating_bias = None

        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True 
Example #7
Source File: keras_mt_shared_cnn.py    From Benchmarks with MIT License 5 votes vote down vote up
def init_export_network(num_classes,
                        in_seq_len,
                        vocab_size,
                        wv_space,
                        filter_sizes,
                        num_filters,
                        concat_dropout_prob,
                        emb_l2,
                        w_l2,
                        optimizer):


    # define network layers ----------------------------------------------------
    input_shape = tuple([in_seq_len])
    model_input = Input(shape=input_shape, name= "Input")
    # embedding lookup
    emb_lookup = Embedding(vocab_size,
                           wv_space,
                           input_length=in_seq_len,
                           name="embedding",
                           #embeddings_initializer=RandomUniform,
                           embeddings_regularizer=l2(emb_l2))(model_input)
    # convolutional layer and dropout
    conv_blocks = []
    for ith_filter,sz in enumerate(filter_sizes):
        conv = Convolution1D(filters=num_filters[ ith_filter ],
                             kernel_size=sz,
                             padding="same",
                             activation="relu",
                             strides=1,
                             # kernel_initializer ='lecun_uniform,
                             name=str(ith_filter) + "_thfilter")(emb_lookup)
        conv_blocks.append(GlobalMaxPooling1D()(conv))
    concat = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
    concat_drop = Dropout(concat_dropout_prob)(concat)

    # different dense layer per tasks
    FC_models = []
    for i in range(len(num_classes)):
        outlayer = Dense(num_classes[i], name= "Dense"+str(i), activation='softmax')( concat_drop )#, kernel_regularizer=l2(0.01))( concat_drop )
        FC_models.append(outlayer)


    # the multitsk model
    model = Model(inputs=model_input, outputs = FC_models)
    model.compile( loss= "sparse_categorical_crossentropy", optimizer= optimizer, metrics=[ "acc" ] )

    return model 
Example #8
Source File: models.py    From WeSHClass with Apache License 2.0 5 votes vote down vote up
def instantiate(self, class_tree, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False,
                    word_embedding_dim=100, hidden_dim=20, act='relu', init=RandomUniform(minval=-0.01, maxval=0.01)):
        num_children = len(class_tree.children)
        if num_children <= 1:
            class_tree.model = None
        else:
            class_tree.model = ConvolutionLayer(self.x, self.input_shape[1], filter_sizes=filter_sizes,
                                                n_classes=num_children,
                                                vocab_sz=self.vocab_sz, embedding_matrix=class_tree.embedding,
                                                hidden_dim=hidden_dim,
                                                word_embedding_dim=word_embedding_dim, num_filters=num_filters,
                                                init=init,
                                                word_trainable=word_trainable, act=act) 
Example #9
Source File: models.py    From WeSHClass with Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 input_shape,
                 class_tree,
                 max_level,
                 sup_source,
                 init=RandomUniform(minval=-0.01, maxval=0.01),
                 y=None,
                 vocab_sz=None,
                 word_embedding_dim=100,
                 blocking_perc=0,
                 block_thre=1.0,
                 block_level=1,
                 ):

        super(WSTC, self).__init__()

        self.input_shape = input_shape
        self.class_tree = class_tree
        self.y = y
        if type(y) == dict:
            self.eval_set = np.array([ele for ele in y])
        else:
            self.eval_set = None
        self.vocab_sz = vocab_sz
        self.block_level = block_level
        self.block_thre = block_thre
        self.block_label = {}
        self.siblings_map = {}
        self.x = Input(shape=(input_shape[1],), name='input')
        self.model = []
        self.sup_dict = {}
        if sup_source == 'docs':
            n_classes = class_tree.get_size() - 1
            leaves = class_tree.find_leaves()
            for leaf in leaves:
                current = np.zeros(n_classes)
                for i in class_tree.name2label(leaf.name):
                    current[i] = 1.0
                for idx in leaf.sup_idx:
                    self.sup_dict[idx] = current 
Example #10
Source File: model.py    From WeSTClass with Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 input_shape,
                 n_classes=None,
                 init=RandomUniform(minval=-0.01, maxval=0.01),
                 y=None,
                 model='cnn',
                 vocab_sz=None,
                 word_embedding_dim=100,
                 embedding_matrix=None
                 ):

        super(WSTC, self).__init__()

        self.input_shape = input_shape
        self.y = y
        self.n_classes = n_classes
        if model == 'cnn':
            self.classifier = ConvolutionLayer(self.input_shape[1], n_classes=n_classes,
                                                vocab_sz=vocab_sz, embedding_matrix=embedding_matrix, 
                                                word_embedding_dim=word_embedding_dim, init=init)
        elif model == 'rnn':
            self.classifier = HierAttLayer(self.input_shape, n_classes=n_classes,
                                             vocab_sz=vocab_sz, embedding_matrix=embedding_matrix, 
                                             word_embedding_dim=word_embedding_dim)
        
        self.model = self.classifier
        self.sup_list = {} 
Example #11
Source File: ternary_layers.py    From nn_playground with MIT License 5 votes vote down vote up
def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[1]

        if self.H == 'Glorot':
            self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
            #print('Glorot H: {}'.format(self.H))
        if self.kernel_lr_multiplier == 'Glorot':
            self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
            #print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
            
        self.kernel_constraint = Clip(-self.H, self.H)
        self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
        self.kernel = self.add_weight(shape=(input_dim, self.units),
                                     initializer=self.kernel_initializer,
                                     name='kernel',
                                     regularizer=self.kernel_regularizer,
                                     constraint=self.kernel_constraint)

        if self.use_bias:
            self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
            self.bias = self.add_weight(shape=(self.output_dim,),
                                     initializer=self.bias_initializer,
                                     name='bias',
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)
        else:
            self.lr_multipliers = [self.kernel_lr_multiplier]
            self.bias = None

        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True 
Example #12
Source File: rbflayer.py    From rbf_keras with MIT License 5 votes vote down vote up
def __init__(self, output_dim, initializer=None, betas=1.0, **kwargs):
        self.output_dim = output_dim
        self.init_betas = betas
        if not initializer:
            self.initializer = RandomUniform(0.0, 1.0)
        else:
            self.initializer = initializer
        super(RBFLayer, self).__init__(**kwargs) 
Example #13
Source File: binary_layers.py    From nn_playground with MIT License 5 votes vote down vote up
def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[1]

        if self.H == 'Glorot':
            self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
            #print('Glorot H: {}'.format(self.H))
        if self.kernel_lr_multiplier == 'Glorot':
            self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
            #print('Glorot learning rate multiplier: {}'.format(self.lr_multiplier))
            
        self.kernel_constraint = Clip(-self.H, self.H)
        self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
        self.kernel = self.add_weight(shape=(input_dim, self.units),
                                     initializer=self.kernel_initializer,
                                     name='kernel',
                                     regularizer=self.kernel_regularizer,
                                     constraint=self.kernel_constraint)

        if self.use_bias:
            self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
            self.bias = self.add_weight(shape=(self.output_dim,),
                                     initializer=self.bias_initializer,
                                     name='bias',
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)
        else:
            self.lr_multipliers = [self.kernel_lr_multiplier]
            self.bias = None
        self.built = True 
Example #14
Source File: Model.py    From pysster with MIT License 5 votes vote down vote up
def _add_rnn_layer(self, rnn, return_sequences, x):
        if self.params["rnn_bidirectional"][x] == False:
            self.cnn = rnn(units = self.params["rnn_units"][x],
                           dropout = self.params["rnn_dropout_input"][x],
                           recurrent_dropout = self.params["rnn_dropout_recurrent"][x],
                           kernel_initializer = RandomUniform(),
                           kernel_constraint = max_norm(self.params["kernel_constraint"]),
                           return_sequences = return_sequences)(self.cnn)
        else:
            self.cnn = Bidirectional(rnn(units = self.params["rnn_units"][x],
                                         dropout = self.params["rnn_dropout_input"][x],
                                         recurrent_dropout = self.params["rnn_dropout_recurrent"][x],
                                         kernel_initializer = RandomUniform(),
                                         kernel_constraint = max_norm(self.params["kernel_constraint"]),
                                         return_sequences = return_sequences))(self.cnn) 
Example #15
Source File: binary_layers.py    From nn_playground with MIT License 5 votes vote down vote up
def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[1]

        if self.H == 'Glorot':
            self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
            #print('Glorot H: {}'.format(self.H))
        if self.kernel_lr_multiplier == 'Glorot':
            self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
            #print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
            
        self.kernel_constraint = Clip(-self.H, self.H)
        self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
        self.kernel = self.add_weight(shape=(input_dim, self.units),
                                     initializer=self.kernel_initializer,
                                     name='kernel',
                                     regularizer=self.kernel_regularizer,
                                     constraint=self.kernel_constraint)

        if self.use_bias:
            self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
            self.bias = self.add_weight(shape=(self.output_dim,),
                                     initializer=self.bias_initializer,
                                     name='bias',
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)
        else:
            self.lr_multipliers = [self.kernel_lr_multiplier]
            self.bias = None

        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True 
Example #16
Source File: anmm.py    From MatchZoo with Apache License 2.0 5 votes vote down vote up
def build(self):
        """
        Build model structure.

        aNMM model based on bin weighting and query term attentions
        """
        # query is [batch_size, left_text_len]
        # doc is [batch_size, right_text_len, bin_num]
        query, doc = self._make_inputs()
        embedding = self._make_embedding_layer()

        q_embed = embedding(query)
        q_attention = keras.layers.Dense(
            1, kernel_initializer=RandomUniform(), use_bias=False)(q_embed)
        q_text_len = self._params['input_shapes'][0][0]

        q_attention = keras.layers.Lambda(
            lambda x: softmax(x, axis=1),
            output_shape=(q_text_len,)
        )(q_attention)
        d_bin = keras.layers.Dropout(
            rate=self._params['dropout_rate'])(doc)
        for layer_id in range(self._params['num_layers'] - 1):
            d_bin = keras.layers.Dense(
                self._params['hidden_sizes'][layer_id],
                kernel_initializer=RandomUniform())(d_bin)
            d_bin = keras.layers.Activation('tanh')(d_bin)
        d_bin = keras.layers.Dense(
            self._params['hidden_sizes'][self._params['num_layers'] - 1])(
            d_bin)
        d_bin = keras.layers.Reshape((q_text_len,))(d_bin)
        q_attention = keras.layers.Reshape((q_text_len,))(q_attention)
        score = keras.layers.Dot(axes=[1, 1])([d_bin, q_attention])
        x_out = self._make_output_layer()(score)
        self._backend = keras.Model(inputs=[query, doc], outputs=x_out) 
Example #17
Source File: utils.py    From fast-neural-style-keras with MIT License 5 votes vote down vote up
def build(self, input_shape):
        init = initializers.RandomUniform(minval=-50, maxval=50, seed=None)
        self.kernel = self.add_weight(name='kernel', shape=(self.height, self.width, 3),
                                      initializer=init, trainable=True)

        super(InputReflect, self).build(input_shape) 
Example #18
Source File: models.py    From delft with Apache License 2.0 5 votes vote down vote up
def __init__(self, config, ntags=None):

        # build input, directly feed with word embedding by the data generator
        word_input = Input(shape=(None, config.word_embedding_size), name='word_input')

        # build character based embedding
        char_input = Input(shape=(None, config.max_char_length), dtype='int32', name='char_input')
        char_embeddings = TimeDistributed(Embedding(input_dim=config.char_vocab_size,
                                    output_dim=config.char_embedding_size,
                                    mask_zero=True,
                                    #embeddings_initializer=RandomUniform(minval=-0.5, maxval=0.5),
                                    name='char_embeddings'
                                    ))(char_input)

        chars = TimeDistributed(Bidirectional(LSTM(config.num_char_lstm_units, return_sequences=False)))(char_embeddings)

        # length of sequence not used for the moment (but used for f1 communication)
        length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')

        # combine characters and word embeddings
        x = Concatenate()([word_input, chars])
        x = Dropout(config.dropout)(x)

        x = Bidirectional(GRU(units=config.num_word_lstm_units, 
                               return_sequences=True, 
                               recurrent_dropout=config.recurrent_dropout))(x)
        x = Dropout(config.dropout)(x)
        x = Bidirectional(GRU(units=config.num_word_lstm_units, 
                               return_sequences=True, 
                               recurrent_dropout=config.recurrent_dropout))(x)
        x = Dense(config.num_word_lstm_units, activation='tanh')(x)
        x = Dense(ntags)(x)
        self.crf = ChainCRF()
        pred = self.crf(x)

        self.model = Model(inputs=[word_input, char_input, length_input], outputs=[pred])
        self.config = config 
Example #19
Source File: models.py    From delft with Apache License 2.0 5 votes vote down vote up
def __init__(self, config, ntags=None):

        # build input, directly feed with word embedding by the data generator
        word_input = Input(shape=(None, config.word_embedding_size), name='word_input')

        # build character based embedding
        char_input = Input(shape=(None, config.max_char_length), dtype='int32', name='char_input')
        char_embeddings = TimeDistributed(Embedding(input_dim=config.char_vocab_size,
                                    output_dim=config.char_embedding_size,
                                    #mask_zero=True,
                                    #embeddings_initializer=RandomUniform(minval=-0.5, maxval=0.5),
                                    name='char_embeddings'
                                    ))(char_input)

        chars = TimeDistributed(Bidirectional(LSTM(config.num_char_lstm_units, return_sequences=False)))(char_embeddings)

        # length of sequence not used for the moment (but used for f1 communication)
        length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')

        # combine characters and word embeddings
        x = Concatenate()([word_input, chars])
        x = Dropout(config.dropout)(x)

        x = Bidirectional(LSTM(units=config.num_word_lstm_units, 
                               return_sequences=True, 
                               recurrent_dropout=config.recurrent_dropout))(x)
        x = Dropout(config.dropout)(x)
        x = Dense(config.num_word_lstm_units, activation='tanh')(x)
        x = Dense(ntags)(x)
        self.crf = ChainCRF()
        pred = self.crf(x)

        self.model = Model(inputs=[word_input, char_input, length_input], outputs=[pred])
        self.config = config 
Example #20
Source File: initializers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_uniform(tensor_shape):
    _runner(initializers.RandomUniform(minval=-1, maxval=1), tensor_shape,
            target_mean=0., target_max=1, target_min=-1) 
Example #21
Source File: initializers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_uniform(tensor_shape):
    _runner(initializers.RandomUniform(minval=-1, maxval=1), tensor_shape,
            target_mean=0., target_max=1, target_min=-1) 
Example #22
Source File: initializers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_uniform(tensor_shape):
    _runner(initializers.RandomUniform(minval=-1, maxval=1), tensor_shape,
            target_mean=0., target_max=1, target_min=-1) 
Example #23
Source File: initializers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_uniform(tensor_shape):
    _runner(initializers.RandomUniform(minval=-1, maxval=1), tensor_shape,
            target_mean=0., target_max=1, target_min=-1) 
Example #24
Source File: initializers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_uniform(tensor_shape):
    _runner(initializers.RandomUniform(minval=-1, maxval=1), tensor_shape,
            target_mean=0., target_max=1, target_min=-1) 
Example #25
Source File: initializers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_uniform(tensor_shape):
    _runner(initializers.RandomUniform(minval=-1, maxval=1), tensor_shape,
            target_mean=0., target_max=1, target_min=-1) 
Example #26
Source File: initializers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_uniform(tensor_shape):
    _runner(initializers.RandomUniform(minval=-1, maxval=1), tensor_shape,
            target_mean=0., target_max=1, target_min=-1) 
Example #27
Source File: Model.py    From pysster with MIT License 4 votes vote down vote up
def _prepare_model(self):
        np.random.seed(self.params["seed"])
        random.seed(self.params["seed"])

        # input
        self.main_input = Input(shape = self.params["input_shape"])
        self.cnn = Dropout(rate = self.params["dropout_input"])(self.main_input)

        # convolutional/pooling block
        for x in range(self.params["conv_num"]):
            self.cnn = Conv1D(filters = self.params["kernel_num"][x],
                              kernel_size = self.params["kernel_len"][x],
                              padding = "valid",
                              kernel_initializer = RandomUniform(),
                              kernel_constraint = max_norm(self.params["kernel_constraint"]),
                              activation = "relu")(self.cnn)
            self.cnn = MaxPooling1D(pool_size = self.params["pool_size"][x],
                                    strides = self.params["pool_stride"][x])(self.cnn)
            self.cnn = Dropout(rate = self.params["dropout_conv"][x])(self.cnn)

        # recurrent block
        if self.params["rnn_type"] != None:
            if self.params["rnn_type"] == "LSTM":
                rnn = LSTM
            elif self.params["rnn_type"] == "GRU":
                rnn = GRU
            else:
                raise ValueError("rnn_type '{}' not supported.".format(self.params["rnn_type"]))
            for x in range(self.params["rnn_num"]-1):
                self._add_rnn_layer(rnn, True, x)
            self._add_rnn_layer(rnn, False, self.params["rnn_num"]-1)
        else:
            self.cnn = Flatten()(self.cnn)
        
        # dense block
        for x in range(self.params["dense_num"]):
            # add additional input to the first dense layer if available
            if x == 0 and self.params["additional_input_length"] > 0:
                self.additional_input = Input(shape=(self.params["additional_input_length"],))
                self.additional_dropout = Dropout(rate = self.params["dropout_input"])(self.additional_input)
                self.cnn = concatenate([self.cnn, self.additional_dropout])
            self.cnn = Dense(units = self.params["neuron_num"][x],
                             kernel_initializer = RandomUniform(),
                             kernel_constraint = max_norm(self.params["kernel_constraint"]),
                             activation = "relu")(self.cnn)
            self.cnn = Dropout(rate = self.params["dropout_dense"][x])(self.cnn)

        # output
        self.cnn = Dense(units = self.params["class_num"],
                         kernel_initializer = RandomUniform(),
                         activation = self.params['activation'])(self.cnn)
        if self.params["dense_num"] > 0 and self.params["additional_input_length"] > 0:
            self.inputs = [self.main_input, self.additional_input]
        else:
            self.inputs = [self.main_input]
        self.model = KModel(inputs=self.inputs, outputs=[self.cnn])
        self.model.compile(loss = self.params['loss'],
                           optimizer = Adam(lr = self.params["learning_rate"])) 
Example #28
Source File: models.py    From keras-image-captioning with MIT License 4 votes vote down vote up
def __init__(self,
                 learning_rate=None,
                 vocab_size=None,
                 embedding_size=None,
                 rnn_output_size=None,
                 dropout_rate=None,
                 bidirectional_rnn=None,
                 rnn_type=None,
                 rnn_layers=None,
                 l1_reg=None,
                 l2_reg=None,
                 initializer=None,
                 word_vector_init=None):
        """
        If an arg is None, it will get its value from config.active_config.
        """
        self._learning_rate = learning_rate or active_config().learning_rate
        self._vocab_size = vocab_size or active_config().vocab_size
        self._embedding_size = embedding_size or active_config().embedding_size
        self._rnn_output_size = (rnn_output_size or
                                 active_config().rnn_output_size)
        self._dropout_rate = dropout_rate or active_config().dropout_rate
        self._rnn_type = rnn_type or active_config().rnn_type
        self._rnn_layers = rnn_layers or active_config().rnn_layers
        self._word_vector_init = (word_vector_init or
                                  active_config().word_vector_init)

        self._initializer = initializer or active_config().initializer
        if self._initializer == 'vinyals_uniform':
            self._initializer = RandomUniform(-0.08, 0.08)

        if bidirectional_rnn is None:
            self._bidirectional_rnn = active_config().bidirectional_rnn
        else:
            self._bidirectional_rnn = bidirectional_rnn

        l1_reg = l1_reg or active_config().l1_reg
        l2_reg = l2_reg or active_config().l2_reg
        self._regularizer = l1_l2(l1_reg, l2_reg)

        self._keras_model = None

        if self._vocab_size is None:
            raise ValueError('config.active_config().vocab_size cannot be '
                             'None! You should check your config or you can '
                             'explicitly pass the vocab_size argument.')

        if self._rnn_type not in ('lstm', 'gru'):
            raise ValueError('rnn_type must be either "lstm" or "gru"!')

        if self._rnn_layers < 1:
            raise ValueError('rnn_layers must be >= 1!')

        if self._word_vector_init is not None and self._embedding_size != 300:
            raise ValueError('If word_vector_init is not None, embedding_size '
                             'must be 300') 
Example #29
Source File: binary_layers.py    From QuantizedNeuralNetworks-Keras-Tensorflow with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1 
        if input_shape[channel_axis] is None:
                raise ValueError('The channel dimension of the inputs '
                                 'should be defined. Found `None`.')

        input_dim = input_shape[channel_axis]
        kernel_shape = self.kernel_size + (input_dim, self.filters)
            
        base = self.kernel_size[0] * self.kernel_size[1]
        if self.H == 'Glorot':
            nb_input = int(input_dim * base)
            nb_output = int(self.filters * base)
            self.H = np.float32(np.sqrt(1.5 / (nb_input + nb_output)))
            #print('Glorot H: {}'.format(self.H))
            
        if self.kernel_lr_multiplier == 'Glorot':
            nb_input = int(input_dim * base)
            nb_output = int(self.filters * base)
            self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5/ (nb_input + nb_output)))
            #print('Glorot learning rate multiplier: {}'.format(self.lr_multiplier))

        self.kernel_constraint = Clip(-self.H, self.H)
        self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
        #self.bias_initializer = initializers.RandomUniform(-self.H, self.H)
        self.kernel = self.add_weight(shape=kernel_shape,
                                 initializer=self.kernel_initializer,
                                 name='kernel',
                                 regularizer=self.kernel_regularizer,
                                 constraint=self.kernel_constraint)

        if self.use_bias:
            self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
            self.bias = self.add_weight((self.filters,),
                                     initializer=self.bias_initializer,
                                     name='bias',
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)

        else:
            self.lr_multipliers = [self.kernel_lr_multiplier]
            self.bias = None

        # Set input spec.
        self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
        self.built = True 
Example #30
Source File: quantized_layers.py    From QuantizedNeuralNetworks-Keras-Tensorflow with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1 
        if input_shape[channel_axis] is None:
                raise ValueError('The channel dimension of the inputs '
                                 'should be defined. Found `None`.')

        input_dim = input_shape[channel_axis]
        kernel_shape = self.kernel_size + (input_dim, self.filters)
            
        base = self.kernel_size[0] * self.kernel_size[1]
        if self.H == 'Glorot':
            nb_input = int(input_dim * base)
            nb_output = int(self.filters * base)
            self.H = np.float32(np.sqrt(1.5 / (nb_input + nb_output)))
            #print('Glorot H: {}'.format(self.H))
            
        if self.kernel_lr_multiplier == 'Glorot':
            nb_input = int(input_dim * base)
            nb_output = int(self.filters * base)
            self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5/ (nb_input + nb_output)))
            #print('Glorot learning rate multiplier: {}'.format(self.lr_multiplier))

        self.kernel_constraint = Clip(-self.H, self.H)
        self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
        #self.bias_initializer = initializers.RandomUniform(-self.H, self.H)
        self.kernel = self.add_weight(shape=kernel_shape,
                                 initializer=self.kernel_initializer,
                                 name='kernel',
                                 regularizer=self.kernel_regularizer,
                                 constraint=self.kernel_constraint)

        if self.use_bias:
            self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
            self.bias = self.add_weight((self.filters,),
                                     initializer=self.bias_initializer,
                                     name='bias',
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)

        else:
            self.lr_multipliers = [self.kernel_lr_multiplier]
            self.bias = None

        # Set input spec.
        self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
        self.built = True