Python keras.optimizers.RMSprop() Examples
The following are 30 code examples for showing how to use keras.optimizers.RMSprop(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
keras.optimizers
, or try the search function
.
Example 1
Project: reinforcement-learning Author: rlcode File: breakout_dqn.py License: MIT License | 6 votes |
def optimizer(self): a = K.placeholder(shape=(None,), dtype='int32') y = K.placeholder(shape=(None,), dtype='float32') py_x = self.model.output a_one_hot = K.one_hot(a, self.action_size) q_value = K.sum(py_x * a_one_hot, axis=1) error = K.abs(y - q_value) quadratic_part = K.clip(error, 0.0, 1.0) linear_part = error - quadratic_part loss = K.mean(0.5 * K.square(quadratic_part) + linear_part) optimizer = RMSprop(lr=0.00025, epsilon=0.01) updates = optimizer.get_updates(self.model.trainable_weights, [], loss) train = K.function([self.model.input, a, y], [loss], updates=updates) return train # approximate Q function using Convolution Neural Network # state is input and Q Value of each action is output of network
Example 2
Project: reinforcement-learning Author: rlcode File: breakout_a3c.py License: MIT License | 6 votes |
def actor_optimizer(self): action = K.placeholder(shape=[None, self.action_size]) advantages = K.placeholder(shape=[None, ]) policy = self.actor.output good_prob = K.sum(action * policy, axis=1) eligibility = K.log(good_prob + 1e-10) * advantages actor_loss = -K.sum(eligibility) entropy = K.sum(policy * K.log(policy + 1e-10), axis=1) entropy = K.sum(entropy) loss = actor_loss + 0.01*entropy optimizer = RMSprop(lr=self.actor_lr, rho=0.99, epsilon=0.01) updates = optimizer.get_updates(self.actor.trainable_weights, [], loss) train = K.function([self.actor.input, action, advantages], [loss], updates=updates) return train # make loss function for Value approximation
Example 3
Project: reinforcement-learning Author: rlcode File: breakout_dueling_ddqn.py License: MIT License | 6 votes |
def optimizer(self): a = K.placeholder(shape=(None, ), dtype='int32') y = K.placeholder(shape=(None, ), dtype='float32') py_x = self.model.output a_one_hot = K.one_hot(a, self.action_size) q_value = K.sum(py_x * a_one_hot, axis=1) error = K.abs(y - q_value) quadratic_part = K.clip(error, 0.0, 1.0) linear_part = error - quadratic_part loss = K.mean(0.5 * K.square(quadratic_part) + linear_part) optimizer = RMSprop(lr=0.00025, epsilon=0.01) updates = optimizer.get_updates(self.model.trainable_weights, [], loss) train = K.function([self.model.input, a, y], [loss], updates=updates) return train # approximate Q function using Convolution Neural Network # state is input and Q Value of each action is output of network # dueling network's Q Value is sum of advantages and state value
Example 4
Project: DogEmbeddings Author: ericzhao28 File: siamese.py License: MIT License | 6 votes |
def SiameseNetwork(input_shape=(5880,)): base_network = create_base_network(input_shape) input_a = Input(shape=input_shape) input_b = Input(shape=input_shape) processed_a = base_network(input_a) processed_b = base_network(input_b) distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b]) model = Model([input_a, input_b], distance) rms = RMSprop() model.compile(loss=contrastive_loss, optimizer=rms, metrics=[accuracy]) return model, base_network
Example 5
Project: A3C_Keras_FlappyBird Author: shalabhsingh File: train_network.py License: MIT License | 6 votes |
def buildmodel(): print("Model building begins") model = Sequential() keras.initializers.RandomUniform(minval=-0.1, maxval=0.1, seed=None) S = Input(shape = (IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS, ), name = 'Input') h0 = Convolution2D(16, kernel_size = (8,8), strides = (4,4), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(S) h1 = Convolution2D(32, kernel_size = (4,4), strides = (2,2), activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform')(h0) h2 = Flatten()(h1) h3 = Dense(256, activation = 'relu', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h2) P = Dense(1, name = 'o_P', activation = 'sigmoid', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h3) V = Dense(1, name = 'o_V', kernel_initializer = 'random_uniform', bias_initializer = 'random_uniform') (h3) model = Model(inputs = S, outputs = [P,V]) rms = RMSprop(lr = LEARNING_RATE, rho = 0.99, epsilon = 0.1) model.compile(loss = {'o_P': logloss, 'o_V': sumofsquares}, loss_weights = {'o_P': 1., 'o_V' : 0.5}, optimizer = rms) return model #function to preprocess an image before giving as input to the neural network
Example 6
Project: wtte-rnn Author: ragulpr File: test_keras.py License: MIT License | 6 votes |
def model_masking(discrete_time, init_alpha, max_beta): model = Sequential() model.add(Masking(mask_value=mask_value, input_shape=(n_timesteps, n_features))) model.add(TimeDistributed(Dense(2))) model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha, "max_beta_value": max_beta})) if discrete_time: loss = wtte.loss(kind='discrete', reduce_loss=False).loss_function else: loss = wtte.loss(kind='continuous', reduce_loss=False).loss_function model.compile(loss=loss, optimizer=RMSprop( lr=lr), sample_weight_mode='temporal') return model
Example 7
Project: docker-python Author: Kaggle File: test_keras.py License: Apache License 2.0 | 6 votes |
def test_train(self): train = pd.read_csv("/input/tests/data/train.csv") x_train = train.iloc[:,1:].values.astype('float32') y_train = to_categorical(train.iloc[:,0].astype('int32')) model = Sequential() model.add(Dense(units=10, input_dim=784, activation='softmax')) model.compile( loss='categorical_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['accuracy']) model.fit(x_train, y_train, epochs=1, batch_size=32) # Uses convnet which depends on libcudnn when running on GPU
Example 8
Project: async-rl Author: Grzego File: train.py License: MIT License | 6 votes |
def __init__(self, action_space, batch_size=32, screen=(84, 84), swap_freq=200): from keras.optimizers import RMSprop # ----- self.screen = screen self.input_depth = 1 self.past_range = 3 self.observation_shape = (self.input_depth * self.past_range,) + self.screen self.batch_size = batch_size self.action_value = build_network(self.observation_shape, action_space.n) self.action_value.compile(optimizer=RMSprop(clipnorm=1.), loss='mse') self.losses = deque(maxlen=25) self.q_values = deque(maxlen=25) self.swap_freq = swap_freq self.swap_counter = self.swap_freq self.unroll = np.arange(self.batch_size) self.frames = 0
Example 9
Project: async-rl Author: Grzego File: train.py License: MIT License | 6 votes |
def __init__(self, action_space, screen=(84, 84), n_step=8, discount=0.99): from keras.optimizers import RMSprop # ----- self.screen = screen self.input_depth = 1 self.past_range = 3 self.observation_shape = (self.input_depth * self.past_range,) + self.screen self.action_value = build_network(self.observation_shape, action_space.n) self.action_value.compile(optimizer=RMSprop(clipnorm=1.), loss='mse') # clipnorm=1. self.action_space = action_space self.observations = np.zeros(self.observation_shape) self.last_observations = np.zeros_like(self.observations) # ----- self.n_step_observations = deque(maxlen=n_step) self.n_step_actions = deque(maxlen=n_step) self.n_step_rewards = deque(maxlen=n_step) self.n_step = n_step self.discount = discount self.counter = 0
Example 10
Project: async-rl Author: Grzego File: train.py License: MIT License | 6 votes |
def __init__(self, action_space, batch_size=32, screen=(84, 84), swap_freq=200): from keras.optimizers import RMSprop # ----- self.screen = screen self.input_depth = 1 self.past_range = 3 self.observation_shape = (self.input_depth * self.past_range,) + self.screen self.batch_size = batch_size _, _, self.train_net, adventage = build_network(self.observation_shape, action_space.n) self.train_net.compile(optimizer=RMSprop(epsilon=0.1, rho=0.99), loss=[value_loss(), policy_loss(adventage, args.beta)]) self.pol_loss = deque(maxlen=25) self.val_loss = deque(maxlen=25) self.values = deque(maxlen=25) self.entropy = deque(maxlen=25) self.swap_freq = swap_freq self.swap_counter = self.swap_freq self.unroll = np.arange(self.batch_size) self.targets = np.zeros((self.batch_size, action_space.n)) self.counter = 0
Example 11
Project: Attention-Based-Aspect-Extraction Author: madrugado File: optimizers.py License: Apache License 2.0 | 6 votes |
def get_optimizer(args): clipvalue = 0 clipnorm = 10 if args.algorithm == 'rmsprop': optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue) elif args.algorithm == 'sgd': optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue) elif args.algorithm == 'adagrad': optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue) elif args.algorithm == 'adadelta': optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue) elif args.algorithm == 'adam': optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue) elif args.algorithm == 'adamax': optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue) return optimizer
Example 12
Project: image-segmentation Author: nearthlab File: trainer.py License: MIT License | 6 votes |
def get_optimizer(config): if config.OPTIMIZER == 'SGD': return SGD(lr=config.LEARNING_RATE, momentum=config.LEARNING_MOMENTUM, clipnorm=config.GRADIENT_CLIP_NORM, nesterov=config.NESTEROV) elif config.OPTIMIZER == 'RMSprop': return RMSprop(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM) elif config.OPTIMIZER == 'Adagrad': return Adagrad(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM) elif config.OPTIMIZER == 'Adadelta': return Adadelta(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM) elif config.OPTIMIZER == 'Adam': return Adam(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM, amsgrad=config.AMSGRAD) elif config.OPTIMIZER == 'Adamax': return Adamax(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM) elif config.OPTIMIZER == 'Nadam': return Nadam(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM) else: raise Exception('Unrecognized optimizer: {}'.format(config.OPTIMIZER))
Example 13
Project: plastering Author: plastering File: ir2tagsets_seq.py License: MIT License | 6 votes |
def fit_new(self, x, y=None): timesteps = x.shape[1] input_dim = x.shape[2] self.ae = Sequential() self.ae.add(Dense(self.latent_dim, input_shape=(timesteps,input_dim,), activation='relu', name='enc')) self.ae.add(Dropout(0.2)) self.ae.add(Dense(input_dim, activation='softmax', name='dec')) self.encoder = Model(inputs=self.ae.input, outputs=self.ae.get_layer('enc').output) #rmsprop = RMSprop(lr=0.05) self.ae.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'],) self.ae.fit(x, x, epochs=1)
Example 14
Project: plastering Author: plastering File: ir2tagsets_seq.py License: MIT License | 6 votes |
def fit_dep(self, x, y=None): timesteps = x.shape[1] input_dim = x.shape[2] inputs = Input(shape=(timesteps, input_dim)) encoded = LSTM(self.latent_dim)(inputs) decoded = RepeatVector(timesteps)(encoded) decoded = LSTM(input_dim, return_sequences=True)(decoded) encoded_input = Input(shape=(self.latent_dim,)) self.sequence_autoencoder = Model(inputs, decoded) self.encoder = Model(inputs, encoded) self.sequence_autoencoder.compile( #loss='binary_crossentropy', loss='categorical_crossentropy', optimizer='RMSprop', metrics=['binary_accuracy'] ) self.sequence_autoencoder.fit(x, x)
Example 15
Project: DeepCCA Author: VahidooX File: models.py License: MIT License | 6 votes |
def create_model(layer_sizes1, layer_sizes2, input_size1, input_size2, learning_rate, reg_par, outdim_size, use_all_singular_values): """ builds the whole model the structure of each sub-network is defined in build_mlp_net, and it can easily get substituted with a more efficient and powerful network like CNN """ view1_model = build_mlp_net(layer_sizes1, input_size1, reg_par) view2_model = build_mlp_net(layer_sizes2, input_size2, reg_par) model = Sequential() model.add(Merge([view1_model, view2_model], mode='concat')) model_optimizer = RMSprop(lr=learning_rate) model.compile(loss=cca_loss(outdim_size, use_all_singular_values), optimizer=model_optimizer) return model
Example 16
Project: deep_learning_ex Author: zatonovo File: cnn_mnist.py License: MIT License | 6 votes |
def init_model(): """ """ start_time = time.time() print 'Compiling model...' model = Sequential() model.add(Convolution2D(64, 3,3, border_mode='valid', input_shape=INPUT_SHAPE)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(.25)) model.add(Flatten()) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) print 'Model compiled in {0} seconds'.format(time.time() - start_time) model.summary() return model
Example 17
Project: deep_learning_ex Author: zatonovo File: ff_mnist.py License: MIT License | 6 votes |
def init_model(): start_time = time.time() print 'Compiling Model ... ' model = Sequential() model.add(Dense(500, input_dim=784)) model.add(Activation('relu')) model.add(Dropout(0.4)) model.add(Dense(300)) model.add(Activation('relu')) model.add(Dropout(0.4)) model.add(Dense(10)) model.add(Activation('softmax')) rms = RMSprop() model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy']) print 'Model compiled in {0} seconds'.format(time.time() - start_time) return model
Example 18
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_model_saving.py License: MIT License | 6 votes |
def test_saving_lambda_custom_objects(): inputs = Input(shape=(3,)) x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs) outputs = Dense(3)(x) model = Model(inputs, outputs) model.compile(loss=losses.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'square_fn': square_fn}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
Example 19
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_model_saving.py License: MIT License | 6 votes |
def test_saving_custom_activation_function(): x = Input(shape=(3,)) output = Dense(3, activation=K.cos)(x) model = Model(x, output) model.compile(loss=losses.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'cos': K.cos}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
Example 20
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_model_saving.py License: MIT License | 6 votes |
def test_saving_lambda_custom_objects(): inputs = Input(shape=(3,)) x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs) outputs = Dense(3)(x) model = Model(inputs, outputs) model.compile(loss=losses.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'square_fn': square_fn}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
Example 21
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_model_saving.py License: MIT License | 6 votes |
def test_saving_custom_activation_function(): x = Input(shape=(3,)) output = Dense(3, activation=K.cos)(x) model = Model(x, output) model.compile(loss=losses.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'cos': K.cos}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
Example 22
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_model_saving.py License: MIT License | 6 votes |
def test_saving_lambda_custom_objects(): inputs = Input(shape=(3,)) x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs) outputs = Dense(3)(x) model = Model(inputs, outputs) model.compile(loss=losses.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'square_fn': square_fn}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
Example 23
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_model_saving.py License: MIT License | 6 votes |
def test_saving_lambda_custom_objects(): inputs = Input(shape=(3,)) x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs) outputs = Dense(3)(x) model = Model(inputs, outputs) model.compile(loss=losses.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'square_fn': square_fn}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
Example 24
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_model_saving.py License: MIT License | 6 votes |
def test_saving_custom_activation_function(): x = Input(shape=(3,)) output = Dense(3, activation=K.cos)(x) model = Model(x, output) model.compile(loss=losses.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'cos': K.cos}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
Example 25
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_model_saving.py License: MIT License | 6 votes |
def test_saving_lambda_custom_objects(): inputs = Input(shape=(3,)) x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs) outputs = Dense(3)(x) model = Model(inputs, outputs) model.compile(loss=losses.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'square_fn': square_fn}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
Example 26
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_model_saving.py License: MIT License | 6 votes |
def test_saving_custom_activation_function(): x = Input(shape=(3,)) output = Dense(3, activation=K.cos)(x) model = Model(x, output) model.compile(loss=losses.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'cos': K.cos}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
Example 27
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_model_saving.py License: MIT License | 6 votes |
def test_saving_lambda_custom_objects(): inputs = Input(shape=(3,)) x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs) outputs = Dense(3)(x) model = Model(inputs, outputs) model.compile(loss=losses.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'square_fn': square_fn}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
Example 28
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_model_saving.py License: MIT License | 6 votes |
def test_saving_lambda_custom_objects(): inputs = Input(shape=(3,)) x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs) outputs = Dense(3)(x) model = Model(inputs, outputs) model.compile(loss=losses.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'square_fn': square_fn}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
Example 29
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_model_saving.py License: MIT License | 6 votes |
def test_saving_custom_activation_function(): x = Input(shape=(3,)) output = Dense(3, activation=K.cos)(x) model = Model(x, output) model.compile(loss=losses.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'cos': K.cos}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
Example 30
Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: test_model_saving.py License: MIT License | 6 votes |
def test_saving_lambda_custom_objects(): inputs = Input(shape=(3,)) x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs) outputs = Dense(3)(x) model = Model(inputs, outputs) model.compile(loss=losses.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'square_fn': square_fn}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)