Python keras.layers.SimpleRNN() Examples

The following are 30 code examples of keras.layers.SimpleRNN(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source File: layers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_mask_3d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # embeddings
    input_a = layers.Input(shape=(3,), dtype='int32')
    input_b = layers.Input(shape=(3,), dtype='int32')
    embedding = layers.Embedding(3, 4, mask_zero=True)
    embedding_a = embedding(input_a)
    embedding_b = embedding(input_b)

    # rnn
    rnn = layers.SimpleRNN(3, return_sequences=True)
    rnn_a = rnn(embedding_a)
    rnn_b = rnn(embedding_b)

    # concatenation
    merged_concat = legacy_layers.merge([rnn_a, rnn_b], mode='concat', concat_axis=-1)
    model = models.Model([input_a, input_b], [merged_concat])
    model.compile(loss='mse', optimizer='sgd')
    model.fit([rand(2, 3), rand(2, 3)], [rand(2, 3, 6)]) 
Example #2
Source File: layers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_mask_3d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # embeddings
    input_a = layers.Input(shape=(3,), dtype='int32')
    input_b = layers.Input(shape=(3,), dtype='int32')
    embedding = layers.Embedding(3, 4, mask_zero=True)
    embedding_a = embedding(input_a)
    embedding_b = embedding(input_b)

    # rnn
    rnn = layers.SimpleRNN(3, return_sequences=True)
    rnn_a = rnn(embedding_a)
    rnn_b = rnn(embedding_b)

    # concatenation
    merged_concat = legacy_layers.merge([rnn_a, rnn_b], mode='concat', concat_axis=-1)
    model = models.Model([input_a, input_b], [merged_concat])
    model.compile(loss='mse', optimizer='sgd')
    model.fit([rand(2, 3), rand(2, 3)], [rand(2, 3, 6)]) 
Example #3
Source File: layers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_mask_3d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # embeddings
    input_a = layers.Input(shape=(3,), dtype='int32')
    input_b = layers.Input(shape=(3,), dtype='int32')
    embedding = layers.Embedding(3, 4, mask_zero=True)
    embedding_a = embedding(input_a)
    embedding_b = embedding(input_b)

    # rnn
    rnn = layers.SimpleRNN(3, return_sequences=True)
    rnn_a = rnn(embedding_a)
    rnn_b = rnn(embedding_b)

    # concatenation
    merged_concat = legacy_layers.merge([rnn_a, rnn_b], mode='concat', concat_axis=-1)
    model = models.Model([input_a, input_b], [merged_concat])
    model.compile(loss='mse', optimizer='sgd')
    model.fit([rand(2, 3), rand(2, 3)], [rand(2, 3, 6)]) 
Example #4
Source File: layers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_mask_3d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # embeddings
    input_a = layers.Input(shape=(3,), dtype='int32')
    input_b = layers.Input(shape=(3,), dtype='int32')
    embedding = layers.Embedding(3, 4, mask_zero=True)
    embedding_a = embedding(input_a)
    embedding_b = embedding(input_b)

    # rnn
    rnn = layers.SimpleRNN(3, return_sequences=True)
    rnn_a = rnn(embedding_a)
    rnn_b = rnn(embedding_b)

    # concatenation
    merged_concat = legacy_layers.merge([rnn_a, rnn_b], mode='concat', concat_axis=-1)
    model = models.Model([input_a, input_b], [merged_concat])
    model.compile(loss='mse', optimizer='sgd')
    model.fit([rand(2, 3), rand(2, 3)], [rand(2, 3, 6)]) 
Example #5
Source File: layers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_mask_3d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # embeddings
    input_a = layers.Input(shape=(3,), dtype='int32')
    input_b = layers.Input(shape=(3,), dtype='int32')
    embedding = layers.Embedding(3, 4, mask_zero=True)
    embedding_a = embedding(input_a)
    embedding_b = embedding(input_b)

    # rnn
    rnn = layers.SimpleRNN(3, return_sequences=True)
    rnn_a = rnn(embedding_a)
    rnn_b = rnn(embedding_b)

    # concatenation
    merged_concat = legacy_layers.merge([rnn_a, rnn_b], mode='concat', concat_axis=-1)
    model = models.Model([input_a, input_b], [merged_concat])
    model.compile(loss='mse', optimizer='sgd')
    model.fit([rand(2, 3), rand(2, 3)], [rand(2, 3, 6)]) 
Example #6
Source File: layers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_mask_3d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # embeddings
    input_a = layers.Input(shape=(3,), dtype='int32')
    input_b = layers.Input(shape=(3,), dtype='int32')
    embedding = layers.Embedding(3, 4, mask_zero=True)
    embedding_a = embedding(input_a)
    embedding_b = embedding(input_b)

    # rnn
    rnn = layers.SimpleRNN(3, return_sequences=True)
    rnn_a = rnn(embedding_a)
    rnn_b = rnn(embedding_b)

    # concatenation
    merged_concat = legacy_layers.merge([rnn_a, rnn_b], mode='concat', concat_axis=-1)
    model = models.Model([input_a, input_b], [merged_concat])
    model.compile(loss='mse', optimizer='sgd')
    model.fit([rand(2, 3), rand(2, 3)], [rand(2, 3, 6)]) 
Example #7
Source File: policy.py    From rasa_wechat with Apache License 2.0 6 votes vote down vote up
def _build_model(self, num_features, num_actions, max_history_len):
        """Build a keras model and return a compiled model.
        :param max_history_len: The maximum number of historical turns used to
                                decide on next action"""
        from keras.layers import Activation, Masking, Dense, SimpleRNN
        from keras.models import Sequential

        n_hidden = 8  # size of hidden layer in RNN
        # Build Model
        batch_input_shape = (None, max_history_len, num_features)

        model = Sequential()
        model.add(Masking(-1, batch_input_shape=batch_input_shape))
        model.add(SimpleRNN(n_hidden, batch_input_shape=batch_input_shape))
        model.add(Dense(input_dim=n_hidden, output_dim=num_actions))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])

        logger.debug(model.summary())
        return model 
Example #8
Source File: layers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_mask_3d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # embeddings
    input_a = layers.Input(shape=(3,), dtype='int32')
    input_b = layers.Input(shape=(3,), dtype='int32')
    embedding = layers.Embedding(3, 4, mask_zero=True)
    embedding_a = embedding(input_a)
    embedding_b = embedding(input_b)

    # rnn
    rnn = layers.SimpleRNN(3, return_sequences=True)
    rnn_a = rnn(embedding_a)
    rnn_b = rnn(embedding_b)

    # concatenation
    merged_concat = legacy_layers.merge([rnn_a, rnn_b], mode='concat', concat_axis=-1)
    model = models.Model([input_a, input_b], [merged_concat])
    model.compile(loss='mse', optimizer='sgd')
    model.fit([rand(2, 3), rand(2, 3)], [rand(2, 3, 6)]) 
Example #9
Source File: test_views.py    From Fabrik with GNU General Public License v3.0 6 votes vote down vote up
def test_keras_import(self):
        model = Sequential()
        model.add(LSTM(64, return_sequences=True, input_shape=(10, 64)))
        model.add(SimpleRNN(32, return_sequences=True))
        model.add(GRU(10, kernel_regularizer=regularizers.l2(0.01),
                      bias_regularizer=regularizers.l2(0.01), recurrent_regularizer=regularizers.l2(0.01),
                      activity_regularizer=regularizers.l2(0.01), kernel_constraint='max_norm',
                      bias_constraint='max_norm', recurrent_constraint='max_norm'))
        model.build()
        json_string = Model.to_json(model)
        with open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'w') as out:
            json.dump(json.loads(json_string), out, indent=4)
        sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.json'), 'r')
        response = self.client.post(reverse('keras-import'), {'file': sample_file})
        response = json.loads(response.content)
        layerId = sorted(response['net'].keys())
        self.assertEqual(response['result'], 'success')
        self.assertGreaterEqual(len(response['net'][layerId[1]]['params']), 7)
        self.assertGreaterEqual(len(response['net'][layerId[3]]['params']), 7)
        self.assertGreaterEqual(len(response['net'][layerId[6]]['params']), 7)


# ********** Embedding Layers ********** 
Example #10
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_medium_no_sequence_simple_rnn_random(self):
        np.random.seed(1988)
        input_dim = 10
        input_length = 1
        num_channels = 10

        # Define a model
        model = Sequential()
        model.add(SimpleRNN(num_channels, input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights(
            [np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
        )

        # Test the keras model
        self._test_model(model) 
Example #11
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_rnn_seq(self):
        np.random.seed(1988)
        input_dim = 11
        input_length = 5

        # Define a model
        model = Sequential()
        model.add(
            SimpleRNN(20, input_shape=(input_length, input_dim), return_sequences=False)
        )

        # Set some random weights
        model.set_weights(
            [np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
        )

        # Test the keras model
        self._test_model(model) 
Example #12
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_tiny_seq2seq_rnn_random(self):
        np.random.seed(1988)
        input_dim = 2
        input_length = 4
        num_channels = 3

        # Define a model
        model = Sequential()
        model.add(
            SimpleRNN(
                num_channels,
                input_shape=(input_length, input_dim),
                return_sequences=True,
            )
        )

        # Set some random weights
        model.set_weights(
            [np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
        )

        # Test the keras model
        self._test_model(model) 
Example #13
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_tiny_sequence_simple_rnn_random(self):
        np.random.seed(1988)
        input_dim = 2
        input_length = 4
        num_channels = 3

        # Define a model
        model = Sequential()
        model.add(SimpleRNN(num_channels, input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights(
            [np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
        )

        # Test the keras model
        self._test_model(model) 
Example #14
Source File: layers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_mask_3d():
    rand = lambda *shape: np.asarray(np.random.random(shape) > 0.5, dtype='int32')

    # embeddings
    input_a = layers.Input(shape=(3,), dtype='int32')
    input_b = layers.Input(shape=(3,), dtype='int32')
    embedding = layers.Embedding(3, 4, mask_zero=True)
    embedding_a = embedding(input_a)
    embedding_b = embedding(input_b)

    # rnn
    rnn = layers.SimpleRNN(3, return_sequences=True)
    rnn_a = rnn(embedding_a)
    rnn_b = rnn(embedding_b)

    # concatenation
    merged_concat = legacy_layers.merge([rnn_a, rnn_b], mode='concat', concat_axis=-1)
    model = models.Model([input_a, input_b], [merged_concat])
    model.compile(loss='mse', optimizer='sgd')
    model.fit([rand(2, 3), rand(2, 3)], [rand(2, 3, 6)]) 
Example #15
Source File: wrappers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_Bidirectional_trainable():
    # test layers that need learning_phase to be set
    x = Input(shape=(3, 2))
    layer = wrappers.Bidirectional(layers.SimpleRNN(3))
    _ = layer(x)
    assert len(layer.trainable_weights) == 6
    layer.trainable = False
    assert len(layer.trainable_weights) == 0
    layer.trainable = True
    assert len(layer.trainable_weights) == 6 
Example #16
Source File: wrappers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_Bidirectional_trainable():
    # test layers that need learning_phase to be set
    x = Input(shape=(3, 2))
    layer = wrappers.Bidirectional(layers.SimpleRNN(3))
    _ = layer(x)
    assert len(layer.trainable_weights) == 6
    layer.trainable = False
    assert len(layer.trainable_weights) == 0
    layer.trainable = True
    assert len(layer.trainable_weights) == 6 
Example #17
Source File: test_temporal_data_tasks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_temporal_classification_functional():
    '''
    Classify temporal sequences of float numbers
    of length 3 into 2 classes using
    single layer of GRU units and softmax applied
    to the last activations of the units
    '''
    np.random.seed(1337)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
                                                         num_test=20,
                                                         input_shape=(3, 4),
                                                         classification=True,
                                                         num_classes=2)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    inputs = layers.Input(shape=(x_train.shape[1], x_train.shape[2]))
    x = layers.SimpleRNN(8)(inputs)
    outputs = layers.Dense(y_train.shape[-1], activation='softmax')(x)
    model = keras.models.Model(inputs, outputs)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    history = model.fit(x_train, y_train, epochs=4, batch_size=10,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert(history.history['acc'][-1] >= 0.8) 
Example #18
Source File: test_temporal_data_tasks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_temporal_classification_functional():
    '''
    Classify temporal sequences of float numbers
    of length 3 into 2 classes using
    single layer of GRU units and softmax applied
    to the last activations of the units
    '''
    np.random.seed(1337)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
                                                         num_test=20,
                                                         input_shape=(3, 4),
                                                         classification=True,
                                                         num_classes=2)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    inputs = layers.Input(shape=(x_train.shape[1], x_train.shape[2]))
    x = layers.SimpleRNN(8)(inputs)
    outputs = layers.Dense(y_train.shape[-1], activation='softmax')(x)
    model = keras.models.Model(inputs, outputs)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    history = model.fit(x_train, y_train, epochs=4, batch_size=10,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert(history.history['acc'][-1] >= 0.8) 
Example #19
Source File: wrappers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_Bidirectional_trainable():
    # test layers that need learning_phase to be set
    x = Input(shape=(3, 2))
    layer = wrappers.Bidirectional(layers.SimpleRNN(3))
    _ = layer(x)
    assert len(layer.trainable_weights) == 6
    layer.trainable = False
    assert len(layer.trainable_weights) == 0
    layer.trainable = True
    assert len(layer.trainable_weights) == 6 
Example #20
Source File: wrappers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_Bidirectional_trainable():
    # test layers that need learning_phase to be set
    x = Input(shape=(3, 2))
    layer = wrappers.Bidirectional(layers.SimpleRNN(3))
    _ = layer(x)
    assert len(layer.trainable_weights) == 6
    layer.trainable = False
    assert len(layer.trainable_weights) == 0
    layer.trainable = True
    assert len(layer.trainable_weights) == 6 
Example #21
Source File: vae_definition.py    From MIDI-VAE with MIT License 5 votes vote down vote up
def _build_composer_decoder_at_notes_output(self, composer_notes_input):

        if self.cell_type == 'SimpleRNN': composer_notes_decoder_prediction = SimpleRNN(self.lstm_size, return_sequences=False, activation=self.lstm_activation, name='rnn_composer_decoder_at_notes')(composer_notes_input)
        if self.cell_type == 'LSTM': composer_notes_decoder_prediction = LSTM(self.lstm_size, return_sequences=False, activation=self.lstm_activation, name='lstm_composer_decoder_at_notes')(composer_notes_input)
        if self.cell_type == 'GRU': composer_notes_decoder_prediction = GRU(self.lstm_size, return_sequences=False, activation=self.lstm_activation, name='gru_composer_decoder_at_notes')(composer_notes_input)
        composer_notes_decoder_prediction = Dense(self.num_composers, activation=self.composer_decoder_at_notes_activation)(composer_notes_decoder_prediction)
        return composer_notes_decoder_prediction 
Example #22
Source File: vae_definition.py    From MIDI-VAE with MIT License 5 votes vote down vote up
def _build_composer_decoder_at_instrument_output(self, composer_instrument_input):

        if self.cell_type == 'SimpleRNN': composer_instrument_decoder_prediction = SimpleRNN(self.lstm_size, return_sequences=False, activation=self.lstm_activation, name='rnn_composer_decoder_at_instrument')(composer_instrument_input)
        if self.cell_type == 'LSTM': composer_instrument_decoder_prediction = LSTM(self.lstm_size, return_sequences=False, activation=self.lstm_activation, name='lstm_composer_decoder_at_instrument')(composer_instrument_input)
        if self.cell_type == 'GRU': composer_instrument_decoder_prediction = GRU(self.lstm_size, return_sequences=False, activation=self.lstm_activation, name='gru_composer_decoder_at_instrument')(composer_instrument_input)
        composer_instrument_decoder_prediction = Dense(self.num_composers, activation=self.composer_decoder_at_instrument_activation)(composer_instrument_decoder_prediction)
        return composer_instrument_decoder_prediction



# prerpares encoder input  for a song that is already split
# X: input pitches of shape (num_samples, input_length, different_pitches)
# I: instruments for each voice of shape (max_voices, different_instruments)
# V: velocity information of shape (num_samples, output_length==input_length), values between 0 and 1 when there is no silent note, 1 denotes MAX_VELOCITY
# D: duration information of shape (num_samples, output_length==input_length), values are 1 if a note is held 
Example #23
Source File: layers.py    From asr-study with MIT License 5 votes vote down vote up
def recurrent(output_dim, model='keras_lstm', activation='tanh',
              regularizer=None, dropout=0., **kwargs):
    if model == 'rnn':
        return keras_layers.SimpleRNN(output_dim, activation=activation,
                                      W_regularizer=regularizer,
                                      U_regularizer=regularizer,
                                      dropout_W=dropout, dropout_U=dropout, consume_less='gpu',
                                      **kwargs)
    if model == 'gru':
        return keras_layers.GRU(output_dim, activation=activation,
                                W_regularizer=regularizer,
                                U_regularizer=regularizer, dropout_W=dropout,
                                dropout_U=dropout,
                                consume_less='gpu', **kwargs)
    if model == 'keras_lstm':
        return keras_layers.LSTM(output_dim, activation=activation,
                                 W_regularizer=regularizer,
                                 U_regularizer=regularizer,
                                 dropout_W=dropout, dropout_U=dropout,
                                 consume_less='gpu', **kwargs)
    if model == 'rhn':
        return RHN(output_dim, depth=1,
                   bias_init=highway_bias_initializer,
                   activation=activation, layer_norm=False, ln_gain_init='one',
                   ln_bias_init='zero', mi=False,
                   W_regularizer=regularizer, U_regularizer=regularizer,
                   dropout_W=dropout, dropout_U=dropout, consume_less='gpu',
                   **kwargs)

    if model == 'lstm':
        return LSTM(output_dim, activation=activation,
                    W_regularizer=regularizer, U_regularizer=regularizer,
                    dropout_W=dropout, dropout_U=dropout,
                    consume_less='gpu', **kwargs)
    raise ValueError('model %s was not recognized' % model) 
Example #24
Source File: test_views.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input2'], 'l1': net['RNN']}
        net['l0']['connection']['output'].append('l1')
        # # net = get_shapes(net)
        inp = data(net['l0'], '', 'l0')['l0']
        net = recurrent(net['l1'], [inp], 'l1')
        model = Model(inp, net['l1'])
        self.assertEqual(model.layers[1].__class__.__name__, 'SimpleRNN') 
Example #25
Source File: learning.py    From Automated-Equity-Asset-Selection-and-Allocation with MIT License 5 votes vote down vote up
def RNN(self):
        featureNum = len(self.X[0]) / 6
        X = np.empty((len(self.X), 6, featureNum))
        X_test = np.empty((len(self.X_test), 6, featureNum))
        self.X = self.X.reshape(len(self.X), featureNum, 6)
        self.X_test = self.X_test.reshape(len(self.X_test), featureNum, 6)
        for i in range(len(self.X)):
            X[i] = self.X[i].transpose()
        for i in range(len(self.X_test)):
            X_test[i] = self.X_test[i].transpose()

        np.random.seed(0)
        model = Sequential()
        model.add(SimpleRNN(20, batch_input_shape=(None, 6, 28)))
        model.add(Dropout(0.1))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
        print(model.summary())
        model.fit(X, self.y, verbose=2)
        predicted = model.predict_classes(X_test, verbose=0)
        # Final evaluation of the model
        scores = model.evaluate(X_test, self.expected, verbose=0)
        print("Accuracy: %.2f%%" % (scores[1]*100))
        self.ptLocal(self.fout, "Classification report for classifier:\n%s", \
            (metrics.classification_report(self.expected, predicted)))
        self.ptLocal(self.fout, "Confusion matrix:\n%s", \
            metrics.confusion_matrix(self.expected, predicted))
        self.ptLocal(self.fout, "Random pick successful rate: %.3f\n",\
            round(float(sum(self.expected)) / len(self.expected), 3)) 
Example #26
Source File: test_temporal_data_tasks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_temporal_classification_functional():
    '''
    Classify temporal sequences of float numbers
    of length 3 into 2 classes using
    single layer of GRU units and softmax applied
    to the last activations of the units
    '''
    np.random.seed(1337)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
                                                         num_test=20,
                                                         input_shape=(3, 4),
                                                         classification=True,
                                                         num_classes=2)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    inputs = layers.Input(shape=(x_train.shape[1], x_train.shape[2]))
    x = layers.SimpleRNN(8)(inputs)
    outputs = layers.Dense(y_train.shape[-1], activation='softmax')(x)
    model = keras.models.Model(inputs, outputs)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    history = model.fit(x_train, y_train, epochs=4, batch_size=10,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert(history.history['acc'][-1] >= 0.8) 
Example #27
Source File: test_keras.py    From coremltools with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_simple_rnn(self):
        """
        Test the conversion of a simple RNN layer.
        """
        from keras.layers import SimpleRNN

        # Create a simple Keras model
        model = Sequential()
        model.add(SimpleRNN(32, input_dim=32, input_length=10))

        input_names = ["input"]
        output_names = ["output"]
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField("neuralNetwork"))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names) + 1)
        self.assertEquals(input_names[0], spec.description.input[0].name)

        self.assertEquals(32, spec.description.input[1].type.multiArrayType.shape[0])

        self.assertEquals(len(spec.description.output), len(output_names) + 1)
        self.assertEquals(output_names[0], spec.description.output[0].name)
        self.assertEquals(32, spec.description.output[0].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.output[1].type.multiArrayType.shape[0])

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.simpleRecurrent)
        self.assertEquals(len(layer_0.input), 2)
        self.assertEquals(len(layer_0.output), 2) 
Example #28
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_tiny_no_sequence_simple_rnn_random(self):
        np.random.seed(1988)
        input_dim = 10
        input_length = 1
        num_channels = 1

        # Define a model
        model = Sequential()
        model.add(SimpleRNN(num_channels, input_shape=(input_length, input_dim)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Test the keras model
        self._test_model(model) 
Example #29
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_lstm_td(self):
        np.random.seed(1988)
        input_dim = 2
        input_length = 4
        num_channels = 3

        # Define a model
        model = Sequential()
        model.add(
            SimpleRNN(
                num_channels,
                return_sequences=True,
                input_shape=(input_length, input_dim),
            )
        )
        model.add(TimeDistributed(Dense(5)))

        # Set some random weights
        model.set_weights(
            [np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
        )

        # Test the keras model
        self._test_model(model)

    # Making sure that giant channel sizes get handled correctly 
Example #30
Source File: test_keras2.py    From coremltools with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_simple_rnn(self):
        """
        Test the conversion of a simple RNN layer.
        """
        from keras.layers import SimpleRNN

        # Create a simple Keras model
        model = Sequential()
        model.add(SimpleRNN(32, input_shape=(10, 32)))

        input_names = ["input"]
        output_names = ["output"]
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField("neuralNetwork"))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names) + 1)
        self.assertEquals(input_names[0], spec.description.input[0].name)

        self.assertEquals(32, spec.description.input[1].type.multiArrayType.shape[0])

        self.assertEquals(len(spec.description.output), len(output_names) + 1)
        self.assertEquals(output_names[0], spec.description.output[0].name)
        self.assertEquals(32, spec.description.output[0].type.multiArrayType.shape[0])
        self.assertEquals(32, spec.description.output[1].type.multiArrayType.shape[0])

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.simpleRecurrent)
        self.assertEquals(len(layer_0.input), 2)
        self.assertEquals(len(layer_0.output), 2)