Python keras.backend.get_value() Examples

The following are 30 code examples for showing how to use keras.backend.get_value(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.backend , or try the search function .

Example 1
Project: keras_lr_finder   Author: surmenok   File: lr_finder.py    License: MIT License 6 votes vote down vote up
def on_batch_end(self, batch, logs):
        # Log the learning rate
        lr = K.get_value(self.model.optimizer.lr)
        self.lrs.append(lr)

        # Log the loss
        loss = logs['loss']
        self.losses.append(loss)

        # Check whether the loss got too large or NaN
        if batch > 5 and (math.isnan(loss) or loss > self.best_loss * 4):
            self.model.stop_training = True
            return

        if loss < self.best_loss:
            self.best_loss = loss

        # Increase the learning rate for the next batch
        lr *= self.lr_mult
        K.set_value(self.model.optimizer.lr, lr) 
Example 2
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_225.py    License: MIT License 6 votes vote down vote up
def get_config(self):
        config = {
            'lr': float(K.get_value(self.lr)),
            'beta_1': float(K.get_value(self.beta_1)),
            'beta_2': float(K.get_value(self.beta_2)),
            'decay': float(K.get_value(self.decay)),
            'batch_size': int(self.batch_size),
            'total_iterations': int(self.total_iterations),
            'weight_decays': self.weight_decays,
            'lr_multipliers': self.lr_multipliers,
            'use_cosine_annealing': self.use_cosine_annealing,
            't_cur': int(K.get_value(self.t_cur)),
            'eta_t': float(K.eval(self.eta_t)),
            'eta_min': float(K.get_value(self.eta_min)),
            'eta_max': float(K.get_value(self.eta_max)),
            'init_verbose': self.init_verbose,
            'epsilon': self.epsilon,
            'amsgrad': self.amsgrad
        }
        base_config = super(AdamW, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example 3
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_225.py    License: MIT License 6 votes vote down vote up
def get_config(self):
        config = {
            'lr': float(K.get_value(self.lr)),
            'beta_1': float(K.get_value(self.beta_1)),
            'beta_2': float(K.get_value(self.beta_2)),
            'epsilon': self.epsilon,
            'schedule_decay': self.schedule_decay,
            'batch_size': int(self.batch_size),
            'total_iterations': int(self.total_iterations),
            'weight_decays': self.weight_decays,
            'lr_multipliers': self.lr_multipliers,
            'use_cosine_annealing': self.use_cosine_annealing,
            't_cur': int(K.get_value(self.t_cur)),
            'eta_t': float(K.eval(self.eta_t)),
            'eta_min': float(K.get_value(self.eta_min)),
            'eta_max': float(K.get_value(self.eta_max)),
            'init_verbose': self.init_verbose
        }
        base_config = super(NadamW, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example 4
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_225.py    License: MIT License 6 votes vote down vote up
def get_config(self):
        config = {
            'lr': float(K.get_value(self.lr)),
            'momentum': float(K.get_value(self.momentum)),
            'decay': float(K.get_value(self.decay)),
            'nesterov': self.nesterov,
            'batch_size': int(self.batch_size),
            'total_iterations': int(self.total_iterations),
            'weight_decays': self.weight_decays,
            'lr_multipliers': self.lr_multipliers,
            'use_cosine_annealing': self.use_cosine_annealing,
            't_cur': int(K.get_value(self.t_cur)),
            'eta_t': float(K.eval(self.eta_t)),
            'eta_min': float(K.get_value(self.eta_min)),
            'eta_max': float(K.get_value(self.eta_max)),
            'init_verbose': self.init_verbose
        }
        base_config = super(SGDW, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example 5
Project: keras_bn_library   Author: bnsnapper   File: recurrent.py    License: MIT License 6 votes vote down vote up
def build(self, input_shape):
		self.input_spec = [InputSpec(shape=input_shape)]
		self.input_dim = input_shape[2]

		self.W = self.init((self.output_dim, 4 * self.input_dim),
		                   name='{}_W'.format(self.name))
		self.U = self.inner_init((self.input_dim, 4 * self.input_dim),
		                         name='{}_U'.format(self.name))
		self.b = K.variable(np.hstack((np.zeros(self.input_dim),
		                               K.get_value(self.forget_bias_init((self.input_dim,))),
		                               np.zeros(self.input_dim),
		                               np.zeros(self.input_dim))),
		                    name='{}_b'.format(self.name))

		self.A = self.init((self.input_dim, self.output_dim),
		                    name='{}_A'.format(self.name))
		self.ba = K.zeros((self.output_dim,), name='{}_ba'.format(self.name))


		self.trainable_weights = [self.W, self.U, self.b, self.A, self.ba]

		if self.initial_weights is not None:
			self.set_weights(self.initial_weights)
			del self.initial_weights 
Example 6
Project: hacktoberfest2018   Author: ambujraj   File: DenseNet_CIFAR10.py    License: GNU General Public License v3.0 6 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
        current = logs.get(self.monitor)
        lr = self.model.optimizer.lr
        # If you want to apply decay.
        if k.get_value(self.model.optimizer.iterations) == 100:
          k.set_value(self.model.optimizer.lr, 0.01)
          print("Updating Learning rate", 0.01)
        print("Current learning rate", k.get_value(self.model.optimizer.lr))    
        if current is None:
            warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
        #if k.get_value(self.model.optimizer.iterations)%5 == 0:
        #save_to_drive(k.get_value(self.model.optimizer.iterations))        
        if current >= self.value:
            if self.verbose > 0:
                print("Epoch %05d: early stopping THR" % epoch)
            self.model.stop_training = True

# Load CIFAR10 Data 
Example 7
Project: keras-one-cycle   Author: titu1994   File: clr.py    License: MIT License 6 votes vote down vote up
def on_batch_end(self, epoch, logs=None):
        logs = logs or {}

        self.clr_iterations += 1
        new_lr = self.compute_lr()

        self.history.setdefault('lr', []).append(
            K.get_value(self.model.optimizer.lr))
        K.set_value(self.model.optimizer.lr, new_lr)

        if self._update_momentum:
            if not hasattr(self.model.optimizer, 'momentum'):
                raise ValueError("Momentum can be updated only on SGD optimizer !")

            new_momentum = self.compute_momentum()

            self.history.setdefault('momentum', []).append(
                K.get_value(self.model.optimizer.momentum))
            K.set_value(self.model.optimizer.momentum, new_momentum)

        for k, v in logs.items():
            self.history.setdefault(k, []).append(v) 
Example 8
Project: keras-contrib   Author: keras-team   File: initializers_test.py    License: MIT License 6 votes vote down vote up
def _runner(init, shape, target_mean=None, target_std=None,
            target_max=None, target_min=None, upper_bound=None, lower_bound=None):
    variable = init(shape)
    if not isinstance(variable, np.ndarray):
        output = K.get_value(variable)
    else:
        output = variable

    lim = 1e-2
    if target_std is not None:
        assert abs(output.std() - target_std) < lim
    if target_mean is not None:
        assert abs(output.mean() - target_mean) < lim
    if target_max is not None:
        assert abs(output.max() - target_max) < lim
    if target_min is not None:
        assert abs(output.min() - target_min) < lim
    if upper_bound is not None:
        assert output.max() < upper_bound
    if lower_bound is not None:
        assert output.min() > lower_bound 
Example 9
Project: maskrcnn   Author: shtamura   File: test_bbox.py    License: MIT License 6 votes vote down vote up
def test_get_iou(self):
        gtbox = K.variable([[1, 1, 3, 3], [2, 2, 4, 4]])
        anchor = K.variable([
            [1, 1, 3, 3],  # gtbox[0]とは完全に一致。つまりIoU=1。
            # gtbox[1]とは1/4重なる。つまりIoU=1/7。
            [1, 0, 3, 2],  # gtbox[0]とは半分重なる。つまりIoU=1/3。
            [2, 2, 4, 4],  # gtbox[0]とは1/4重なる。つまりIoU=1/7。gtbox[1]とは一致。
            [0, 3, 2, 5],  # gtbox[0]とは隣接。
            [4, 3, 6, 5],  # gtbox[0]とは接点無し。
        ])
        expected = np.array([
            [1, 1 / 7],
            [1 / 3, 0],
            [1 / 7, 1],
            [0, 0],
            [0, 0],
        ])
        iou = K.get_value(bbox.get_iou(anchor, gtbox))
        np.testing.assert_almost_equal(iou, expected, decimal=5) 
Example 10
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_callbacks.py    License: MIT License 6 votes vote down vote up
def test_LearningRateScheduler():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() 
Example 11
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_callbacks.py    License: MIT License 6 votes vote down vote up
def test_ReduceLROnPlateau_patience():
    class DummyOptimizer(object):
        def __init__(self):
            self.lr = K.variable(1.0)

    class DummyModel(object):
        def __init__(self):
            self.optimizer = DummyOptimizer()

    reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                    patience=2)
    reduce_on_plateau.model = DummyModel()

    losses = [0.0860, 0.1096, 0.1040]
    lrs = []

    for epoch in range(len(losses)):
        reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
        lrs.append(K.get_value(reduce_on_plateau.model.optimizer.lr))

    # The learning rates should be 1.0 except the last one
    assert all([lr == 1.0 for lr in lrs[:-1]]) and lrs[-1] < 1.0 
Example 12
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_callbacks.py    License: MIT License 6 votes vote down vote up
def test_LearningRateScheduler():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() 
Example 13
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_callbacks.py    License: MIT License 6 votes vote down vote up
def test_ReduceLROnPlateau_patience():
    class DummyOptimizer(object):
        def __init__(self):
            self.lr = K.variable(1.0)

    class DummyModel(object):
        def __init__(self):
            self.optimizer = DummyOptimizer()

    reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                    patience=2)
    reduce_on_plateau.model = DummyModel()

    losses = [0.0860, 0.1096, 0.1040]
    lrs = []

    for epoch in range(len(losses)):
        reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
        lrs.append(K.get_value(reduce_on_plateau.model.optimizer.lr))

    # The learning rates should be 1.0 except the last one
    assert all([lr == 1.0 for lr in lrs[:-1]]) and lrs[-1] < 1.0 
Example 14
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: test_callbacks.py    License: MIT License 6 votes vote down vote up
def test_LearningRateScheduler():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() 
Example 15
Project: steppy-toolkit   Author: minerva-ml   File: callbacks.py    License: MIT License 5 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
        if self.gamma is not None:
            K.set_value(self.model.optimizer.lr, self.gamma * K.get_value(self.model.optimizer.lr)) 
Example 16
Project: keras_lr_finder   Author: surmenok   File: lr_finder.py    License: MIT License 5 votes vote down vote up
def find(self, x_train, y_train, start_lr, end_lr, batch_size=64, epochs=1, **kw_fit):
        # If x_train contains data for multiple inputs, use length of the first input.
        # Assumption: the first element in the list is single input; NOT a list of inputs.
        N = x_train[0].shape[0] if isinstance(x_train, list) else x_train.shape[0]

        # Compute number of batches and LR multiplier
        num_batches = epochs * N / batch_size
        self.lr_mult = (float(end_lr) / float(start_lr)) ** (float(1) / float(num_batches))
        # Save weights into a file
        initial_weights = self.model.get_weights()

        # Remember the original learning rate
        original_lr = K.get_value(self.model.optimizer.lr)

        # Set the initial learning rate
        K.set_value(self.model.optimizer.lr, start_lr)

        callback = LambdaCallback(on_batch_end=lambda batch, logs: self.on_batch_end(batch, logs))

        self.model.fit(x_train, y_train,
                       batch_size=batch_size, epochs=epochs,
                       callbacks=[callback],
                       **kw_fit)

        # Restore the weights to the state before model fitting
        self.model.set_weights(initial_weights)

        # Restore the original learning rate
        K.set_value(self.model.optimizer.lr, original_lr) 
Example 17
Project: keras_lr_finder   Author: surmenok   File: lr_finder.py    License: MIT License 5 votes vote down vote up
def find_generator(self, generator, start_lr, end_lr, epochs=1, steps_per_epoch=None, **kw_fit):
        if steps_per_epoch is None:
            try:
                steps_per_epoch = len(generator)
            except (ValueError, NotImplementedError) as e:
                raise e('`steps_per_epoch=None` is only valid for a'
                        ' generator based on the '
                        '`keras.utils.Sequence`'
                        ' class. Please specify `steps_per_epoch` '
                        'or use the `keras.utils.Sequence` class.')
        self.lr_mult = (float(end_lr) / float(start_lr)) ** (float(1) / float(epochs * steps_per_epoch))

        # Save weights into a file
        initial_weights = self.model.get_weights()

        # Remember the original learning rate
        original_lr = K.get_value(self.model.optimizer.lr)

        # Set the initial learning rate
        K.set_value(self.model.optimizer.lr, start_lr)

        callback = LambdaCallback(on_batch_end=lambda batch,
                                                      logs: self.on_batch_end(batch, logs))

        self.model.fit_generator(generator=generator,
                                 epochs=epochs,
                                 steps_per_epoch=steps_per_epoch,
                                 callbacks=[callback],
                                 **kw_fit)

        # Restore the weights to the state before model fitting
        self.model.set_weights(initial_weights)

        # Restore the original learning rate
        K.set_value(self.model.optimizer.lr, original_lr) 
Example 18
Project: aetros-cli   Author: aetros   File: KerasCallback.py    License: MIT License 5 votes vote down vote up
def get_learning_rate(self):

        if hasattr(self.model, 'optimizer'):
            config = self.model.optimizer.get_config()

            from keras.optimizers import Adadelta, Adam, Adamax, Adagrad, RMSprop, SGD

            if isinstance(self.model.optimizer, Adadelta) or isinstance(self.model.optimizer, Adam) \
                    or isinstance(self.model.optimizer, Adamax) or isinstance(self.model.optimizer, Adagrad)\
                    or isinstance(self.model.optimizer, RMSprop) or isinstance(self.model.optimizer, SGD):
                return config['lr'] * (1. / (1. + config['decay'] * float(K.get_value(self.model.optimizer.iterations))))

            elif 'lr' in config:
                return config['lr'] 
Example 19
Project: PointNet-Keras   Author: TianzhongSong   File: callbacks.py    License: MIT License 5 votes vote down vote up
def change_lr(self, new_lr):
        old_lr = K.get_value(self.model.optimizer.lr)
        K.set_value(self.model.optimizer.lr, new_lr)
        if self.verbose == 1:
            print('Learning rate is %g' %new_lr) 
Example 20
Project: DOVE   Author: kiharalab   File: Keras_Loss.py    License: GNU General Public License v3.0 5 votes vote down vote up
def on_epoch_end(self, batch, logs={}):
        self.losses['epoch'].append(logs.get('loss'))
        self.accuracy['epoch'].append(logs.get('acc'))
        self.val_loss['epoch'].append(logs.get('val_loss'))
        self.val_acc['epoch'].append(logs.get('val_acc'))
        self.loss_plot('batch')
        self.loss_plot('epoch')
        tmp_path=os.path.join(self.model_path,str(len(self.accuracy['epoch']))+'_epoch.h5')
        self.model.save_weights(tmp_path)
        lr=K.get_value(self.model.optimizer.lr)
        tmp_path = os.path.join(self.model_path, str(len(self.accuracy['epoch'])) + '_lr.txt')
        lr_result=np.zeros(1)
        lr_result[0]=lr
        np.savetxt(tmp_path,lr_result) 
Example 21
Project: keras_bn_library   Author: bnsnapper   File: recurrent.py    License: MIT License 5 votes vote down vote up
def build(self, input_shape):
		self.input_spec = [InputSpec(shape=input_shape)]
		input_dim = input_shape[2]
		self.input_dim = input_dim
		
		if self.stateful:
			self.reset_states()
		else:
			self.states = [None, None]
			self.states_dim = [self.input_dim, self.output_dim]


		self.weight_size = self.output_dim * 4
		self.W = self.add_weight((input_dim, self.weight_size),
                                 initializer=self.init,
                                 name='{}_W'.format(self.name),
                                 regularizer=self.W_regularizer)
		self.U = self.add_weight((input_dim, self.weight_size),
                                 initializer=self.inner_init,
                                 name='{}_U'.format(self.name),
                                 regularizer=self.U_regularizer)

		def b_reg(shape, name=None):
			return K.variable(np.hstack((np.zeros(self.output_dim),
										K.get_value(self.forget_bias_init((self.output_dim,))),
										np.zeros(self.output_dim),
										np.zeros(self.output_dim))),
										name='{}_b'.format(self.name))
		self.b = self.add_weight((self.weight_size,),
                                     initializer=b_reg,
                                     name='{}_b'.format(self.name),
                                     regularizer=self.b_regularizer)


		if self.initial_weights is not None:
			self.set_weights(self.initial_weights)
			del self.initial_weights

		self.built = True 
Example 22
Project: PSPNet-Keras-tensorflow   Author: Vladkryvoruchko   File: callbacks.py    License: MIT License 5 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
    lr_now = K.get_value(self.model.optimizer.lr)
    new_lr = max(0.00001, min(self.base_lr * (1 - epoch / float(self.max_epoch))**self.power, lr_now))
    K.set_value(self.model.optimizer.lr, new_lr)
    if self.verbose:
        print(" - learning rate: %10f" % (new_lr)) 
Example 23
Project: Coloring-greyscale-images   Author: emilwallner   File: AdamAccumulate.py    License: MIT License 5 votes vote down vote up
def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'beta_1': float(K.get_value(self.beta_1)),
                  'beta_2': float(K.get_value(self.beta_2)),
                  'decay': float(K.get_value(self.decay)),
                  'epsilon': self.epsilon,
                  'amsgrad': self.amsgrad}
        base_config = super(AdamAccumulate, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example 24
Project: text-detection-ocr   Author: GlassyWing   File: callbacks.py    License: Apache License 2.0 5 votes vote down vote up
def on_batch_end(self, batch, logs=None):
        '''Record previous batch statistics and update the learning rate.'''
        logs = logs or {}
        self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
        for k, v in logs.items():
            self.history.setdefault(k, []).append(v)

        self.batch_since_restart += 1
        K.set_value(self.model.optimizer.lr, self.clr()) 
Example 25
Project: text-detection-ocr   Author: GlassyWing   File: callbacks.py    License: Apache License 2.0 5 votes vote down vote up
def on_epoch_begin(self, epoch, logs=None):
        logs = logs or {}
        logs['lr'] = K.get_value(self.model.optimizer.lr) 
Example 26
Project: text-detection-ocr   Author: GlassyWing   File: callbacks.py    License: Apache License 2.0 5 votes vote down vote up
def on_epoch_end(self, epoch, logs=None):
        lr = float(K.get_value(self.model.optimizer.lr))
        watch_value = logs.get(self.watch)
        if watch_value is None:
            raise ValueError("Watched value '" + self.watch + "' don't exist")

        self.history_cache.put(watch_value)

        if watch_value > self.history_cache.mean():
            lr = self.schedule(epoch, lr)
            print("Update learning rate: ", lr)
            K.set_value(self.model.optimizer.lr, lr) 
Example 27
Project: RecurrentGaze   Author: crisie   File: adamaccum.py    License: MIT License 5 votes vote down vote up
def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [(self.iterations, self.iterations + 1)]

        t = self.iterations + 1
        lr_t = self.lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))

        ms = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
        vs = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
        gs = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
        self.weights = ms + vs

        for p, g, m, v, gg in zip(params, grads, ms, vs, gs):

            flag = K.equal(self.iterations % self.accum_iters, 0)
            flag = K.cast(flag, dtype='float32')

            gg_t = (1 - flag) * (gg + g)
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * (gg + flag * g) / self.accum_iters
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square((gg + flag * g) / self.accum_iters)
            p_t = p - flag * lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

            self.updates.append((m, flag * m_t + (1 - flag) * m))
            self.updates.append((v, flag * v_t + (1 - flag) * v))
            self.updates.append((gg, gg_t))

            new_p = p_t
            # apply constraints
            if getattr(p, 'constraint', None) is not None:
                c = p.constraints(new_p)
                new_p = c(new_p)
            self.updates.append((p, new_p))
        return self.updates 
Example 28
Project: RecurrentGaze   Author: crisie   File: adamaccum.py    License: MIT License 5 votes vote down vote up
def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'beta_1': float(K.get_value(self.beta_1)),
                  'beta_2': float(K.get_value(self.beta_2)),
                  'epsilon': self.epsilon}
        base_config = super(Adam_accumulate, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example 29
Project: keras-snapshot_ensembles   Author: arthurdouillard   File: snapshot.py    License: MIT License 5 votes vote down vote up
def set_model(self, model):
        self.model = model
        if not hasattr(self.model.optimizer, 'lr'):
            raise ValueError('Optimizer must have a "lr" attribute.')

        # Get initial learning rate
        self.base_lr = float(K.get_value(self.model.optimizer.lr)) 
Example 30
Project: FSA-Net   Author: shamangary   File: TYY_callbacks.py    License: Apache License 2.0 5 votes vote down vote up
def on_epoch_begin(self, epoch, logs={}):
		
		if epoch in self.startEpoch:
			if epoch == 0:
				ratio = 1
			else:
				ratio = 0.1
			LR = K.get_value(self.model.optimizer.lr)
			K.set_value(self.model.optimizer.lr,LR*ratio)
		
		return