Python keras.layers.recurrent.SimpleRNN() Examples

The following are code examples for showing how to use keras.layers.recurrent.SimpleRNN(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: dlopt   Author: acamero   File: rnn.py    GNU General Public License v3.0 6 votes vote down vote up
def _build_model(self, layers, dense_activation):
        self.hidden_layers = len(layers) - 2
        self.layers = layers
        self.input_dim = layers[0]
        self.output_dim = layers[-1]
        model = Sequential()
        for i in range(len(layers) - 2):
            model.add(
                    LSTM(
                    #SimpleRNN(
                    input_dim=layers[i],
                    output_dim=layers[i+1],
                    kernel_initializer='zeros', 
                    recurrent_initializer='zeros',
                    bias_initializer='zeros',
                    # Uncomment to use last batch state to init next training step.
                    # Specify shuffle=False when calling fit() 
                    #batch_size=batch_size, stateful=True,
                    return_sequences= True if i < len(layers) - 3 else False )
                    )
        model.add(Dense(layers[-1], activation=dense_activation, kernel_initializer='zeros', bias_initializer='zeros'))        
        return model 
Example 2
Project: dlopt   Author: acamero   File: rnn.py    GNU General Public License v3.0 6 votes vote down vote up
def _build_model(self, layers, dense_activation):
        self.hidden_layers = len(layers) - 2
        self.layers = layers
        self.input_dim = layers[0]
        self.output_dim = layers[-1]
        model = Sequential()
        for i in range(len(layers) - 2):
            model.add(
                    LSTM(
                    #SimpleRNN(
                    input_dim=layers[i],
                    output_dim=layers[i+1],
                    kernel_initializer='zeros', 
                    recurrent_initializer='zeros',
                    bias_initializer='zeros',
                    # Uncomment to use last batch state to init next training step.
                    # Specify shuffle=False when calling fit() 
                    #batch_size=batch_size, stateful=True,
                    return_sequences= True if i < len(layers) - 3 else False )
                    )
        model.add(Dense(layers[-1], activation=dense_activation, kernel_initializer='zeros', bias_initializer='zeros'))        
        return model 
Example 3
Project: applications   Author: geomstats   File: recurrent_test.py    MIT License 6 votes vote down vote up
def test_masking_layer():
    ''' This test based on a previously failing issue here:
    https://github.com/keras-team/keras/issues/1567
    '''
    inputs = np.random.random((6, 3, 4))
    targets = np.abs(np.random.random((6, 3, 5)))
    targets /= targets.sum(axis=-1, keepdims=True)

    model = Sequential()
    model.add(Masking(input_shape=(3, 4)))
    model.add(recurrent.SimpleRNN(units=5, return_sequences=True, unroll=False))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.fit(inputs, targets, epochs=1, batch_size=100, verbose=1)

    model = Sequential()
    model.add(Masking(input_shape=(3, 4)))
    model.add(recurrent.SimpleRNN(units=5, return_sequences=True, unroll=True))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.fit(inputs, targets, epochs=1, batch_size=100, verbose=1) 
Example 4
Project: CAPTCHA-breaking   Author: lllcho   File: test_recurrent.py    MIT License 5 votes vote down vote up
def test_simple(self):
        _runner(recurrent.SimpleRNN) 
Example 5
Project: distnet   Author: ssamot   File: neuralnetworks.py    GNU General Public License v2.0 5 votes vote down vote up
def __init__(self, embed_hidden_size=size, sent_hidden_size=size, query_hidden_size=size,
                 deep_hidden_size=size, RNN=SimpleRNN):
        self.deep_hidden_size = deep_hidden_size

        self.embed_hidden_size = embed_hidden_size
        self.sent_hidden_size = sent_hidden_size
        self.query_hidden_size = query_hidden_size
        self.RNN = RNN 
Example 6
Project: allen-ai-science-qa   Author: arranger1044   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def get_recurrent_layer(model_name, input_size, output_size, return_sequences=False):
    layer = None
    if model_name == 'rnn':
        layer = SimpleRNN(input_dim=input_size, output_dim=output_size, return_sequences=return_sequences)
    elif model_name == 'lstm':
        layer = LSTM(input_dim=input_size, output_dim=output_size, return_sequences=return_sequences)
    elif model_name == 'gru':
        layer = GRU(input_dim=input_size, output_dim=output_size, return_sequences=return_sequences)
    if layer is None:
        raise ValueError('Unknown recurrent layer: %s' % model_name)
    return layer 
Example 7
Project: conv-match   Author: ssamot   File: neuralnetworks.py    GNU General Public License v2.0 5 votes vote down vote up
def __init__(self, embed_hidden_size=size, sent_hidden_size=size, query_hidden_size=size,
                 deep_hidden_size=size, RNN=SimpleRNN):
        self.deep_hidden_size = deep_hidden_size

        self.embed_hidden_size = embed_hidden_size
        self.sent_hidden_size = sent_hidden_size
        self.query_hidden_size = query_hidden_size
        self.RNN = RNN 
Example 8
Project: neuralforecast   Author: maxpumperla   File: recurrent.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        if self.stateful:
            self.reset_states()
        else:
            # initial states: all-zero tensor of shape (output_dim)
            self.states = [None]
        input_dim = input_shape[2]
        self.input_dim = input_dim

        self.W = self.init((input_dim, self.output_dim),
                           name='{}_W'.format(self.name))
        # Only change in build compared to SimpleRNN:
        # U is of shape (inner_input_dim, output_dim) now.
        self.U = self.inner_init((self.inner_input_dim, self.output_dim),
                                 name='{}_U'.format(self.name))
        self.b = K.zeros((self.output_dim,), name='{}_b'.format(self.name))

        self.regularizers = []
        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)
        if self.U_regularizer:
            self.U_regularizer.set_param(self.U)
            self.regularizers.append(self.U_regularizer)
        if self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)

        self.trainable_weights = [self.W, self.U, self.b]

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights 
Example 9
Project: drider   Author: w4nderlust   File: utils.py    MIT License 5 votes vote down vote up
def get_recurrent_layer(model_name, input_size, output_size, return_sequences=False):
    layer = None
    if model_name == 'rnn':
        layer = SimpleRNN(input_dim=input_size, output_dim=output_size, return_sequences=return_sequences)
    elif model_name == 'lstm':
        layer = LSTM(input_dim=input_size, output_dim=output_size, return_sequences=return_sequences)
    elif model_name == 'gru':
        layer = GRU(input_dim=input_size, output_dim=output_size, return_sequences=return_sequences)
    if layer is None:
        raise ValueError('Unknown recurrent layer: %s' % model_name)
    return layer 
Example 10
Project: applications   Author: geomstats   File: recurrent_test.py    MIT License 4 votes vote down vote up
def rnn_test(f):
    """
    All the recurrent layers share the same interface,
    so we can run through them with a single function.
    """
    f = keras_test(f)
    return pytest.mark.parametrize('layer_class', [
        recurrent.SimpleRNN,
        recurrent.GRU,
        recurrent.LSTM
    ])(f) 
Example 11
Project: distnet   Author: ssamot   File: neuralnetworks.py    GNU General Public License v2.0 4 votes vote down vote up
def distancenet(self, vocab_size, output_size,  maxsize = 1, hop_depth = -1, dropout = False, d_perc = 1,  type = "CCE"):
        print(bcolors.UNDERLINE + 'Building nn model...' + bcolors.ENDC)


        sentrnn = Sequential()
        emb = Embedding(vocab_size, self.embed_hidden_size, mask_zero=False,W_constraint=mx(), W_regularizer=reg(), init = init_function)
        sentrnn.add(emb)

        sentrnn.add(MaxPooling1D(pool_length=maxsize))
        #sentrnn.add(UpSample1D(length=4))
        #sentrnn.add(GRU( self.query_hidden_size, return_sequences=True,activation = "elu", init = init_function))
        #sentrnn.add(DownSample1D(length=maxsize))
        #sentrnn.add(Dropout(0.6))
        # sentrnn.add(TimeDistributedDense(self.sent_hidden_size, activation = "elu", init = init_function))
        # sentrnn.add(Dropout(0.2))


        qrnn = Sequential()


        emb = Embedding(vocab_size, self.embed_hidden_size, mask_zero=False,W_constraint=mx(), W_regularizer=reg(), init = init_function)
        qrnn.add(emb)

        qrnn.add(SimpleRNN( self.query_hidden_size, return_sequences=False,activation = "leakyrelu", init = init_function))
        #qrnn.add(BatchNormalization(mode = 1, momentum=0.9))
        #qrnn.add(Dense)
        #qrnn.add(AttentionRecurrent(self.query_hidden_size))



        init_qa = [sentrnn, qrnn]
        past = []
        for i in range(hop_depth):
            hop = Sequential()
            l_size = self.sent_hidden_size
            hop.add(AttentionMerge(init_qa + past, input_shape = (None, None, l_size), mode = "distance"))
            hop.add(Dropout(0.1))
            hop.add(TimeDistributedDense(self.sent_hidden_size, activation = "leakyrelu", init = init_function))
            hop.add(Dropout(0.1))
            hop.add(AttentionRecurrent(self.sent_hidden_size, init = init_function))
            hop.add(Dropout(0.1))
            past.append(hop)


        model = hop
        model.add(bn())





        self._adddepth(model, output_size, dropout, d_perc, softmax = (type == "CCE"))

        if(type == "CCE"):
            model.compile(optimizer=self._getopt(), loss='categorical_crossentropy', class_mode='categorical')
        else:
            model.compile(optimizer=self._getopt(), loss='mse')



        return model