Python keras.Input() Examples
The following are 30 code examples for showing how to use keras.Input(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
keras
, or try the search function
.
Example 1
Project: social_lstm_keras_tf Author: t2kasa File: my_social_model.py License: GNU General Public License v3.0 | 6 votes |
def __init__(self, config: ModelConfig) -> None: self.x_input = Input((config.obs_len, config.max_n_peds, pxy_dim)) # y_input = Input((config.obs_len, config.max_n_peds, pxy_dim)) self.grid_input = Input( (config.obs_len, config.max_n_peds, config.max_n_peds, config.grid_side_squared)) self.zeros_input = Input( (config.obs_len, config.max_n_peds, config.lstm_state_dim)) # Social LSTM layers self.lstm_layer = LSTM(config.lstm_state_dim, return_state=True) self.W_e_relu = Dense(config.emb_dim, activation="relu") self.W_a_relu = Dense(config.emb_dim, activation="relu") self.W_p = Dense(out_dim) self._build_model(config)
Example 2
Project: bi-lstm-crf Author: GlassyWing File: core.py License: Apache License 2.0 | 6 votes |
def __build_model(self, emb_matrix=None): word_input = Input(shape=(None,), dtype='int32', name="word_input") word_emb = Embedding(self.vocab_size + 1, self.embed_dim, weights=[emb_matrix] if emb_matrix is not None else None, trainable=True if emb_matrix is None else False, name='word_emb')(word_input) bilstm_output = Bidirectional(LSTM(self.bi_lstm_units // 2, return_sequences=True))(word_emb) bilstm_output = Dropout(self.dropout_rate)(bilstm_output) output = Dense(self.chunk_size + 1, kernel_initializer="he_normal")(bilstm_output) output = CRF(self.chunk_size + 1, sparse_target=self.sparse_target)(output) model = Model([word_input], [output]) parallel_model = model if self.num_gpu > 1: parallel_model = multi_gpu_model(model, gpus=self.num_gpu) parallel_model.compile(optimizer=self.optimizer, loss=crf_loss, metrics=[crf_accuracy]) return model, parallel_model
Example 3
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: cudnn_recurrent_test.py License: MIT License | 6 votes |
def test_specify_initial_state_keras_tensor(): input_size = 10 timesteps = 6 units = 2 num_samples = 32 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input((timesteps, input_size)) initial_state = [keras.Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = keras.models.Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, input_size)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example 4
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_specify_initial_state_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example 5
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_specify_initial_state_non_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with non-Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [K.random_normal_variable((num_samples, units), 0, 1) for _ in range(num_states)] layer = layer_class(units) output = layer(inputs, initial_state=initial_state) model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) targets = np.random.random((num_samples, units)) model.fit(inputs, targets)
Example 6
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_initial_states_as_other_inputs(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor main_inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] inputs = [main_inputs] + initial_state layer = layer_class(units) output = layer(inputs) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') main_inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.train_on_batch([main_inputs] + initial_state, targets)
Example 7
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_stacked_rnn_attributes(): cells = [recurrent.LSTMCell(3), recurrent.LSTMCell(3, kernel_regularizer='l2')] layer = recurrent.RNN(cells) layer.build((None, None, 5)) # Test regularization losses assert len(layer.losses) == 1 # Test weights assert len(layer.trainable_weights) == 6 cells[0].trainable = False assert len(layer.trainable_weights) == 3 assert len(layer.non_trainable_weights) == 3 # Test `get_losses_for` x = keras.Input((None, 5)) y = K.sum(x) cells[0].add_loss(y, inputs=x) assert layer.get_losses_for(x) == [y]
Example 8
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: cudnn_recurrent_test.py License: MIT License | 6 votes |
def test_return_state(): input_size = 10 timesteps = 6 units = 2 num_samples = 32 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input(batch_shape=(num_samples, timesteps, input_size)) layer = layer_class(units, return_state=True, stateful=True) outputs = layer(inputs) output, state = outputs[0], outputs[1:] assert len(state) == num_states model = keras.models.Model(inputs, state[0]) inputs = np.random.random((num_samples, timesteps, input_size)) state = model.predict(inputs) np.testing.assert_allclose( keras.backend.eval(layer.states[0]), state, atol=1e-4)
Example 9
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: cudnn_recurrent_test.py License: MIT License | 6 votes |
def test_specify_initial_state_keras_tensor(): input_size = 10 timesteps = 6 units = 2 num_samples = 32 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input((timesteps, input_size)) initial_state = [keras.Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = keras.models.Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, input_size)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example 10
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_specify_initial_state_non_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with non-Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [K.random_normal_variable((num_samples, units), 0, 1) for _ in range(num_states)] layer = layer_class(units) output = layer(inputs, initial_state=initial_state) model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) targets = np.random.random((num_samples, units)) model.fit(inputs, targets)
Example 11
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_initial_states_as_other_inputs(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor main_inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] inputs = [main_inputs] + initial_state layer = layer_class(units) output = layer(inputs) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') main_inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.train_on_batch([main_inputs] + initial_state, targets)
Example 12
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_specify_state_with_masking(layer_class): ''' This test based on a previously failing issue here: https://github.com/keras-team/keras/issues/1567 ''' num_states = 2 if layer_class is recurrent.LSTM else 1 inputs = Input((timesteps, embedding_dim)) _ = Masking()(inputs) initial_state = [Input((units,)) for _ in range(num_states)] output = layer_class(units)(inputs, initial_state=initial_state) model = Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example 13
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_stacked_rnn_attributes(): cells = [recurrent.LSTMCell(3), recurrent.LSTMCell(3, kernel_regularizer='l2')] layer = recurrent.RNN(cells) layer.build((None, None, 5)) # Test regularization losses assert len(layer.losses) == 1 # Test weights assert len(layer.trainable_weights) == 6 cells[0].trainable = False assert len(layer.trainable_weights) == 3 assert len(layer.non_trainable_weights) == 3 # Test `get_losses_for` x = keras.Input((None, 5)) y = K.sum(x) cells[0].add_loss(y, inputs=x) assert layer.get_losses_for(x) == [y]
Example 14
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: cudnn_recurrent_test.py License: MIT License | 6 votes |
def test_specify_initial_state_keras_tensor(): input_size = 10 timesteps = 6 units = 2 num_samples = 32 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input((timesteps, input_size)) initial_state = [keras.Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = keras.models.Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, input_size)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example 15
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_specify_initial_state_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example 16
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_specify_initial_state_non_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with non-Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [K.random_normal_variable((num_samples, units), 0, 1) for _ in range(num_states)] layer = layer_class(units) output = layer(inputs, initial_state=initial_state) model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) targets = np.random.random((num_samples, units)) model.fit(inputs, targets)
Example 17
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_initial_states_as_other_inputs(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor main_inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] inputs = [main_inputs] + initial_state layer = layer_class(units) output = layer(inputs) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') main_inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.train_on_batch([main_inputs] + initial_state, targets)
Example 18
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_stacked_rnn_attributes(): cells = [recurrent.LSTMCell(3), recurrent.LSTMCell(3, kernel_regularizer='l2')] layer = recurrent.RNN(cells) layer.build((None, None, 5)) # Test regularization losses assert len(layer.losses) == 1 # Test weights assert len(layer.trainable_weights) == 6 cells[0].trainable = False assert len(layer.trainable_weights) == 3 assert len(layer.non_trainable_weights) == 3 # Test `get_losses_for` x = keras.Input((None, 5)) y = K.sum(x) cells[0].add_loss(y, inputs=x) assert layer.get_losses_for(x) == [y]
Example 19
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: cudnn_recurrent_test.py License: MIT License | 6 votes |
def test_return_state(): input_size = 10 timesteps = 6 units = 2 num_samples = 32 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input(batch_shape=(num_samples, timesteps, input_size)) layer = layer_class(units, return_state=True, stateful=True) outputs = layer(inputs) output, state = outputs[0], outputs[1:] assert len(state) == num_states model = keras.models.Model(inputs, state[0]) inputs = np.random.random((num_samples, timesteps, input_size)) state = model.predict(inputs) np.testing.assert_allclose( keras.backend.eval(layer.states[0]), state, atol=1e-4)
Example 20
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: cudnn_recurrent_test.py License: MIT License | 6 votes |
def test_specify_initial_state_keras_tensor(): input_size = 10 timesteps = 6 units = 2 num_samples = 32 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1 inputs = keras.Input((timesteps, input_size)) initial_state = [keras.Input((units,)) for _ in range(num_states)] layer = layer_class(units) if len(initial_state) == 1: output = layer(inputs, initial_state=initial_state[0]) else: output = layer(inputs, initial_state=initial_state) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = keras.models.Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, input_size)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example 21
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_specify_initial_state_non_keras_tensor(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with non-Keras tensor inputs = Input((timesteps, embedding_dim)) initial_state = [K.random_normal_variable((num_samples, units), 0, 1) for _ in range(num_states)] layer = layer_class(units) output = layer(inputs, initial_state=initial_state) model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) targets = np.random.random((num_samples, units)) model.fit(inputs, targets)
Example 22
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_initial_states_as_other_inputs(layer_class): num_states = 2 if layer_class is recurrent.LSTM else 1 # Test with Keras tensor main_inputs = Input((timesteps, embedding_dim)) initial_state = [Input((units,)) for _ in range(num_states)] inputs = [main_inputs] + initial_state layer = layer_class(units) output = layer(inputs) assert initial_state[0] in layer._inbound_nodes[0].input_tensors model = Model(inputs, output) model.compile(loss='categorical_crossentropy', optimizer='adam') main_inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.train_on_batch([main_inputs] + initial_state, targets)
Example 23
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: recurrent_test.py License: MIT License | 6 votes |
def test_specify_state_with_masking(layer_class): ''' This test based on a previously failing issue here: https://github.com/keras-team/keras/issues/1567 ''' num_states = 2 if layer_class is recurrent.LSTM else 1 inputs = Input((timesteps, embedding_dim)) _ = Masking()(inputs) initial_state = [Input((units,)) for _ in range(num_states)] output = layer_class(units)(inputs, initial_state=initial_state) model = Model([inputs] + initial_state, output) model.compile(loss='categorical_crossentropy', optimizer='adam') inputs = np.random.random((num_samples, timesteps, embedding_dim)) initial_state = [np.random.random((num_samples, units)) for _ in range(num_states)] targets = np.random.random((num_samples, units)) model.fit([inputs] + initial_state, targets)
Example 24
Project: UnDeepVO Author: drmaj File: autoencoder_train.py License: MIT License | 5 votes |
def main(args): train_gen, test_gen, train_samples, test_samples = get_stereo_image_generators(args.data_path + '/train', args.data_path + '/test', img_rows=args.input_height, img_cols=args.input_width, batch_size=args.batch_size, shuffle=False) image_generator = get_stereo_image_generators('data/train', 'data/test', batch_size=1, shuffle=False) input_image = image_generator[0].__next__()[0][0] input_shape = (args.input_height, args.input_width, 3) left_input = Input(input_shape) right_input = Input(input_shape) ae = AutoEncoderModel(left_input, right_input, args.learning_rate) ae.model.summary() plot_model(ae.model, show_shapes=True, to_file='scratch/ae.png') ae.model.fit_generator(train_gen, steps_per_epoch=train_samples // args.batch_size, # validation_data=test_gen, # validation_steps=test_samples // args.batch_size, epochs=args.num_epochs, verbose=1, callbacks=[VisualizeOutput(input_image), TensorBoard(log_dir=args.log_directory, batch_size=args.batch_size, write_graph=False), ModelCheckpoint(os.path.join(args.models_dir, args.model_name + '.h5'), monitor='loss', verbose=1, save_best_only=True)])
Example 25
Project: keras-pandas Author: bjherger File: Numerical.py License: MIT License | 5 votes |
def input_nub_generator(variable, transformed_observations): """ Generate an input layer and input 'nub' for a keras network. - input_layer: The input layer accepts data from the outside world. - input_nub: The input nub will always include the input_layer as its first layer. It may also include other layers for handling the data type in specific ways :param variable: Name of the variable :type variable: str :param transformed_observations: A dataframe, containing either the specified variable, or derived variables :type transformed_observations: pandas.DataFrame :return: A tuple containing the input layer, and the last layer of the nub """ # Get transformed data for shaping transformed = transformed_observations[variable].as_matrix() # Set up dimensions for input_layer layer if len(transformed.shape) >= 2: input_sequence_length = int(transformed.shape[1]) else: input_sequence_length = 1 # Create input_layer layer input_layer = keras.Input(shape=(input_sequence_length,), dtype='float32', name=lib.namespace_conversion('input_{}'.format(variable))) input_nub = input_layer # Return, in format of input_layer, last variable-specific layer return input_layer, input_nub
Example 26
Project: keras-pandas Author: bjherger File: Boolean.py License: MIT License | 5 votes |
def input_nub_generator(self, variable, transformed_observations): """ Generate an input layer and input 'nub' for a Keras network. - input_layer: The input layer accepts data from the outside world. - input_nub: The input nub will always include the input_layer as its first layer. It may also include other layers for handling the data type in specific ways :param variable: Name of the variable :type variable: str :param transformed_obervations: A dataframe, containing either the specified variable, or derived variables :type transformed_obervations: pandas.DataFrame :return: A tuple containing the input layer, and the last layer of the nub """ transformed = transformed_observations[variable].as_matrix() # Set up dimensions for input_layer layer if len(transformed.shape) >= 2: input_sequence_length = int(transformed.shape[1]) else: input_sequence_length = 1 # Create input_layer layer input_layer = keras.Input(shape=(input_sequence_length,), name=lib.namespace_conversion('input_{}'.format(variable))) input_nub = input_layer # Return, in format of input_layer, last variable-specific layer return input_layer, input_nub
Example 27
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_sequential_model.py License: MIT License | 5 votes |
def test_clone_sequential_model(): val_a = np.random.random((10, 4)) val_out = np.random.random((10, 4)) model = keras.models.Sequential() model.add(keras.layers.Dense(4, input_shape=(4,))) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.Dropout(0.5)) model.add(keras.layers.Dense(4)) if K.backend() == 'tensorflow': # Everything should work in a new session. K.clear_session() # With placeholder creation new_model = keras.models.clone_model(model) new_model.compile('rmsprop', 'mse') new_model.train_on_batch(val_a, val_out) # On top of new tensor input_a = keras.Input(shape=(4,)) new_model = keras.models.clone_model( model, input_tensors=input_a) new_model.compile('rmsprop', 'mse') new_model.train_on_batch(val_a, val_out) # On top of new, non-Keras tensor input_a = keras.backend.variable(val_a) new_model = keras.models.clone_model( model, input_tensors=input_a) new_model.compile('rmsprop', 'mse') new_model.train_on_batch(None, val_out)
Example 28
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: optimizers_test.py License: MIT License | 5 votes |
def _test_no_grad(optimizer): inp = Input([3]) x = Dense(10)(inp) x = Lambda(lambda l: 1.0 * K.reshape(K.cast(K.argmax(l), 'float32'), [-1, 1]))(x) mod = Model(inp, x) mod.compile(optimizer, 'mse') with pytest.raises(ValueError): mod.fit(np.zeros([10, 3]), np.zeros([10, 1], np.float32), batch_size=10, epochs=10)
Example 29
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: multi_gpu_test.py License: MIT License | 5 votes |
def multi_gpu_test_multi_io_model(): print('####### test multi-io model') num_samples = 1000 input_dim_a = 10 input_dim_b = 5 output_dim_a = 1 output_dim_b = 2 hidden_dim = 10 gpus = 8 target_gpu_id = [0, 2, 4] epochs = 2 input_a = keras.Input((input_dim_a,)) input_b = keras.Input((input_dim_b,)) a = keras.layers.Dense(hidden_dim)(input_a) b = keras.layers.Dense(hidden_dim)(input_b) c = keras.layers.concatenate([a, b]) output_a = keras.layers.Dense(output_dim_a)(c) output_b = keras.layers.Dense(output_dim_b)(c) model = keras.models.Model([input_a, input_b], [output_a, output_b]) a_x = np.random.random((num_samples, input_dim_a)) b_x = np.random.random((num_samples, input_dim_b)) a_y = np.random.random((num_samples, output_dim_a)) b_y = np.random.random((num_samples, output_dim_b)) parallel_model = multi_gpu_model(model, gpus=gpus) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs) parallel_model = multi_gpu_model(model, gpus=target_gpu_id) parallel_model.compile(loss='mse', optimizer='rmsprop') parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs)
Example 30
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: cudnn_recurrent_test.py License: MIT License | 5 votes |
def test_cudnn_rnn_timing(rnn_type): input_size = 1000 timesteps = 60 units = 256 num_samples = 10000 times = [] for use_cudnn in [True, False]: start_time = time.time() inputs = keras.layers.Input(shape=(None, input_size)) if use_cudnn: if rnn_type == 'lstm': layer = keras.layers.CuDNNLSTM(units) else: layer = keras.layers.CuDNNGRU(units) else: if rnn_type == 'lstm': layer = keras.layers.LSTM(units) else: layer = keras.layers.GRU(units) outputs = layer(inputs) model = keras.models.Model(inputs, outputs) model.compile('sgd', 'mse') x = np.random.random((num_samples, timesteps, input_size)) y = np.random.random((num_samples, units)) model.fit(x, y, epochs=4, batch_size=32) times.append(time.time() - start_time) speedup = times[1] / times[0] print(rnn_type, 'speedup', speedup) assert speedup > 3