Python keras.backend.set_value() Examples

The following are code examples for showing how to use keras.backend.set_value(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: keras_lr_finder   Author: surmenok   File: lr_finder.py    MIT License 6 votes vote down vote up
def on_batch_end(self, batch, logs):
        # Log the learning rate
        lr = K.get_value(self.model.optimizer.learning_rate)
        self.lrs.append(lr)

        # Log the loss
        loss = logs['loss']
        self.losses.append(loss)

        # Check whether the loss got too large or NaN
        if batch > 5 and (math.isnan(loss) or loss > self.best_loss * 4):
            self.model.stop_training = True
            return

        if loss < self.best_loss:
            self.best_loss = loss

        # Increase the learning rate for the next batch
        lr *= self.lr_mult
        K.set_value(self.model.optimizer.learning_rate, lr) 
Example 2
Project: keras_lr_finder   Author: surmenok   File: lr_finder.py    MIT License 6 votes vote down vote up
def find(self, x_train, y_train, start_lr, end_lr, batch_size=64, epochs=1):
        # If x_train contains data for multiple inputs, use length of the first input.
        # Assumption: the first element in the list is single input; NOT a list of inputs.
        N = x_train[0].shape[0] if isinstance(x_train, list) else x_train.shape[0]

        # Compute number of batches and LR multiplier
        num_batches = epochs * N / batch_size
        self.lr_mult = (float(end_lr) / float(start_lr)) ** (float(1) / float(num_batches))
        # Save weights into a file
        self.model.save_weights('tmp.h5')

        # Remember the original learning rate
        original_lr = K.get_value(self.model.optimizer.learning_rate)

        # Set the initial learning rate
        K.set_value(self.model.optimizer.learning_rate, start_lr)

        callback = LambdaCallback(on_batch_end=lambda batch, logs: self.on_batch_end(batch, logs))

        self.model.fit(x_train, y_train,
                       batch_size=batch_size, epochs=epochs,
                       callbacks=[callback])

        # Restore the weights to the state before model fitting
        self.model.load_weights('tmp.h5')

        # Restore the original learning rate
        K.set_value(self.model.optimizer.learning_rate, original_lr) 
Example 3
Project: keras_bn_library   Author: bnsnapper   File: rnnrbm.py    MIT License 6 votes vote down vote up
def reset_states(self):
		assert self.stateful, 'Layer must be stateful.'
		input_shape = self.input_spec[0].shape

		if not input_shape[0]:
			raise Exception('If a RNN is stateful, a complete ' +
							'input_shape must be provided (including batch size).')

		if hasattr(self, 'states'):
			K.set_value(self.states[0],
			            np.zeros((input_shape[0], self.hidden_recurrent_dim)))
			K.set_value(self.states[1],
			            np.zeros((input_shape[0], self.input_dim)))
			K.set_value(self.states[2],
			            np.zeros((input_shape[0], self.hidden_dim)))
		else:
			self.states = [K.zeros((input_shape[0], self.hidden_recurrent_dim)),
							K.zeros((input_shape[0], self.input_dim)),
							K.zeros((input_shape[0], self.hidden_dim))] 
Example 4
Project: keras_bn_library   Author: bnsnapper   File: recurrent.py    MIT License 6 votes vote down vote up
def reset_states(self):
		assert self.stateful, 'Layer must be stateful.'
		input_shape = self.input_spec[0].shape
		if not input_shape[0]:
			raise ValueError('If a RNN is stateful, it needs to know '
			                 'its batch size. Specify the batch size '
			                 'of your input tensors: \n'
			                 '- If using a Sequential model, '
			                 'specify the batch size by passing '
			                 'a `batch_input_shape` '
			                 'argument to your first layer.\n'
			                 '- If using the functional API, specify '
			                 'the time dimension by passing a '
			                 '`batch_shape` argument to your Input layer.')
		if hasattr(self, 'states'):
			K.set_value(self.states[0],
			            np.zeros((input_shape[0], self.input_dim)))
			K.set_value(self.states[1],
			            np.zeros((input_shape[0], self.output_dim)))
		else:
			self.states = [K.zeros((input_shape[0], self.input_dim)),
							K.zeros((input_shape[0], self.output_dim))] 
Example 5
Project: yoctol-keras-layer-zoo   Author: Yoctol   File: rnn_cell.py    GNU General Public License v3.0 6 votes vote down vote up
def reset_states(self, states=None):
        if states is None:
            self.recurrent_layer.reset_states(states)
        else:
            self.recurrent_layer.reset_states(states[:-1])

        batch_size = self.recurrent_layer.input_spec[0].shape[0]
        if self.dense_state is None:
            self.dense_state = K.zeros((
                batch_size,
                self.dense_layer.units
            ))
        elif states is None:
            K.set_value(
                self.dense_state,
                np.zeros((batch_size, self.dense_layer.units))
            )
        else:
            K.set_value(
                self.dense_state,
                states[-1]
            ) 
Example 6
Project: keras-snapshot_ensembles   Author: arthurdouillard   File: snapshot.py    MIT License 6 votes vote down vote up
def on_epoch_end(self, epoch, logs=None):
        if epoch == 0 or (epoch + 1) % self.period != 0: return
        # Only save at the end of a cycle, a not at the beginning

        if not os.path.exists(self.folder_path):
            os.makedirs(self.folder_path)

        cycle = int(epoch / self.period)
        cycle_str = str(cycle).rjust(self.nb_digits, '0')
        self.model.save_weights(self.path_format.format(cycle_str), overwrite=True)

        # Resetting the learning rate
        K.set_value(self.model.optimizer.lr, self.base_lr)

        if self.verbose > 0:
            print('\nEpoch %05d: Reached %d-th cycle, saving model.' % (epoch, cycle)) 
Example 7
Project: training_results_v0.6   Author: mlperf   File: callbacks.py    Apache License 2.0 6 votes vote down vote up
def _average_metrics_in_place(self, logs):
        logs = logs or {}
        reduced_logs = {}
        # Reduce every metric among workers. Sort metrics by name
        # to ensure consistent order.
        for metric, value in sorted(logs.items()):
            if metric not in self.variables:
                self.variables[metric], self.allreduce_ops[metric] = \
                    self._make_variable(metric, value)
            else:
                K.set_value(self.variables[metric], value)
            reduced_logs[metric] = \
                K.get_session().run(self.allreduce_ops[metric])
        # Override the reduced values back into logs dictionary
        # for other callbacks to use.
        for metric, value in reduced_logs.items():
            logs[metric] = value 
Example 8
Project: dynamicgem   Author: Sujit-O   File: dynamic_triad.py    MIT License 6 votes vote down vote up
def restore_model(self, model, begin=None, end=None, copy=True):
        super(Model, self).restore_model(model, begin, end, copy=copy)
        if begin is None:
            begin = self.cur_train_begin
        if end is None:
            end = self.cur_train_end
        if self.cur_train_begin < self.init_train_begin + self.pretrain_size < self.cur_train_end:
            raise RuntimeError("current training process crosses the boarder of pretraining???")

        # store to keras resources
        if self.is_training:
            if end <= self.init_train_begin + self.pretrain_size:
                K.set_value(self.pretrain['vars'][0], self._sequence[self.init_train_begin:self.init_train_begin + self.pretrain_size])
                K.set_value(self.pretrain['vars'][1], self._tagged['theta'])
            else:
                K.set_value(self.online['vars'][0], self._sequence[begin]) 
Example 9
Project: hacktoberfest2018   Author: ambujraj   File: DenseNet_CIFAR10.py    GNU General Public License v3.0 6 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
        current = logs.get(self.monitor)
        lr = self.model.optimizer.lr
        # If you want to apply decay.
        if k.get_value(self.model.optimizer.iterations) == 100:
          k.set_value(self.model.optimizer.lr, 0.01)
          print("Updating Learning rate", 0.01)
        print("Current learning rate", k.get_value(self.model.optimizer.lr))    
        if current is None:
            warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
        #if k.get_value(self.model.optimizer.iterations)%5 == 0:
        #save_to_drive(k.get_value(self.model.optimizer.iterations))        
        if current >= self.value:
            if self.verbose > 0:
                print("Epoch %05d: early stopping THR" % epoch)
            self.model.stop_training = True

# Load CIFAR10 Data 
Example 10
Project: FractalAI   Author: Guillemdb   File: dnn_train.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def train_on_batch(self, action, n_repeat_action: int = 1, *args, **kwargs):
        losses, metrics = [], []
        # for i in range(n_repeat_action):
        data = self.get_next_batch(train=True)
        if not self.is_int(n_repeat_action):
            K.set_value(self.model.optimizer.lr, n_repeat_action)
        rate = n_repeat_action if self.is_int(n_repeat_action) else 10
        for i in range(rate):
            while len(data) < self.batch_size:
                data = self.get_next_batch(train=True)
            X, y = list(zip(*data))
            self.X, self.y = np.array(X), np.array(y)
            # Dynamic learning rate

            loss, metric = self.model.train_on_batch(self.X, self.y, *args, **kwargs)
            losses.append(loss)
            metrics.append(metric)

            old_weigths = self.model.get_weights()
            new_weights = self.update_weights(old_weigths, action, n_repeat_action)
            self.model.set_weights(new_weights)
        self.loss, self.metric = np.mean(losses), np.mean(metrics)
        return self.metric  # / self.loss 
Example 11
Project: intelligent-annotation   Author: shibing624   File: allconv.py    Apache License 2.0 6 votes vote down vote up
def fit(self, X_train, y_train, sample_weight=None):
        y_mat = self.create_y_mat(y_train)

        if self.model is None:
            self.build_model(X_train)

        # We don't want incremental fit so reset learning rate and weights
        K.set_value(self.model.optimizer.lr, self.learning_rate)
        self.model.set_weights(self.initial_weights)
        self.model.fit(
            X_train,
            y_mat,
            batch_size=self.batch_size,
            epochs=self.epochs,
            shuffle=True,
            sample_weight=sample_weight,
            verbose=0) 
Example 12
Project: intelligent-annotation   Author: shibing624   File: small_cnn.py    Apache License 2.0 6 votes vote down vote up
def fit(self, X_train, y_train, sample_weight=None):
        y_mat = self.create_y_mat(y_train)

        if self.model is None:
            self.build_model(X_train)

        # We don't want incremental fit so reset learning rate and weights
        K.set_value(self.model.optimizer.lr, self.learning_rate)
        self.model.set_weights(self.initial_weights)
        self.model.fit(
            X_train,
            y_mat,
            batch_size=self.batch_size,
            epochs=self.epochs,
            shuffle=True,
            sample_weight=sample_weight,
            verbose=0) 
Example 13
Project: deep-models   Author: LaurentMazare   File: rhn.py    Apache License 2.0 5 votes vote down vote up
def reset_states(self):
    assert self.stateful, 'Layer must be stateful.'
    input_shape = self.input_spec[0].shape
    if not input_shape[0]:
      raise Exception('If a RNN is stateful, a complete ' +
                      'input_shape must be provided (including batch size).')
    if hasattr(self, 'states'):
      K.set_value(self.states[0],
                  np.zeros((input_shape[0], self.output_dim)))
    else:
      self.states = [K.zeros((input_shape[0], self.output_dim))] 
Example 14
Project: gandlf   Author: codekansas   File: callbacks.py    MIT License 5 votes vote down vote up
def on_batch_end(self, epoch, logs={}):
        if not isinstance(self.model, GandlfModel):
            raise ValueError('The AdaptiveLearningRate callback only works '
                             'for Gandlf models.')

        if (not hasattr(self.model.gen_optimizer, 'lr') or
                not hasattr(self.model.dis_optimizer, 'lr')):
            raise ValueError('To use the Adaptive Learning Rate callback, '
                             'both the generator and discriminator optimizers '
                             'must have an "lr" attribute.')

        gen_loss, dis_loss = 0., 0.
        for key, val in logs.items():
            if key.endswith('gen_loss'):
                if val < 0:
                    raise ValueError('The adaptive learning rate callback '
                                     'doesn\'t work for negative losses.')
                gen_loss += val
            elif key.endswith('real_loss') or key.endswith('fake_loss'):
                if val < 0:
                    raise ValueError('The adaptive learning rate callback '
                                     'doesn\'t work for negative losses.')
                dis_loss += val

        dis_loss /= 2  # Double-counting real and fake data.
        total_loss = gen_loss + dis_loss + 1e-12
        gen_pct, dis_pct = gen_loss / total_loss, dis_loss / total_loss

        # Calculates the percentage to weight each one.
        generator_lr = self.generator_lr * gen_pct
        discriminator_lr = self.discriminator_lr * dis_pct

        # Updates the learning rates on both.
        K.set_value(self.model.gen_optimizer.lr, generator_lr)
        K.set_value(self.model.dis_optimizer.lr, discriminator_lr) 
Example 15
Project: C3D-Action-Recognition   Author: lianggyu   File: callbacks.py    MIT License 5 votes vote down vote up
def change_lr(self, new_lr):
        old_lr = K.get_value(self.model.optimizer.lr)
        K.set_value(self.model.optimizer.lr, new_lr)
        if self.verbose == 1:
            print('Learning rate is %g' %new_lr) 
Example 16
Project: C3D-Action-Recognition   Author: lianggyu   File: callbacks.py    MIT License 5 votes vote down vote up
def on_batch_begin(self, batch, logs={}):
        self.itr += 1
        cycle = 1 + self.itr/int(2*self.hc)
        x = self.itr - (2.*cycle - 1)*self.hc
        x /= self.hc
        new_lr = self.lr[0] + (self.lr[1] - self.lr[0])*(1 - abs(x))/cycle

        K.set_value(self.model.optimizer.lr, new_lr) 
Example 17
Project: Kutils   Author: ishank26   File: helper.py    MIT License 5 votes vote down vote up
def on_epoch_begin(self, epoch, logs={}):
        old_lr = self.model.optimizer.lr.get_value()
        if epoch > 1 and epoch % self.n_epoch == 0:
            new_lr = self.decay * old_lr
            k.set_value(self.model.optimizer.lr, new_lr)
        else:
            k.set_value(self.model.optimizer.lr, old_lr)


# keras integrated 
Example 18
Project: Kutils   Author: ishank26   File: helper.py    MIT License 5 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
        loss = logs.items()[1][1]  # get loss
        print "loss: ", loss
        old_lr = self.model.optimizer.lr.get_value()  # get old lr
        new_lr = old_lr * np.exp(loss)  # lr*exp(loss)
        k.set_value(self.model.optimizer.lr, new_lr)


# decaylr=LearningRateScheduler(decay_sch)


# checkpoint=ModelCheckpoint("weights/adam_noep{0}_batch{1}_seq_{2}.hdf5".format(\
# no_epochs,batch, seq_length), monitor='loss', verbose=0,
# save_best_only=True, save_weights_only=False, mode='min') 
Example 19
Project: keras_lr_finder   Author: surmenok   File: lr_finder.py    MIT License 5 votes vote down vote up
def find_generator(self, generator, start_lr, end_lr, epochs=1, steps_per_epoch=None, **kw_fit):
        if steps_per_epoch is None:
            try:
                steps_per_epoch = len(generator)
            except (ValueError, NotImplementedError) as e:
                raise e('`steps_per_epoch=None` is only valid for a'
                        ' generator based on the '
                        '`keras.utils.Sequence`'
                        ' class. Please specify `steps_per_epoch` '
                        'or use the `keras.utils.Sequence` class.')
        self.lr_mult = (float(end_lr) / float(start_lr)) ** (float(1) / float(epochs * steps_per_epoch))

        # Save weights into a file
        self.model.save_weights('tmp.h5')

        # Remember the original learning rate
        original_lr = K.get_value(self.model.optimizer.learning_rate)

        # Set the initial learning rate
        K.set_value(self.model.optimizer.learning_rate, start_lr)

        callback = LambdaCallback(on_batch_end=lambda batch,
                                                      logs: self.on_batch_end(batch, logs))

        self.model.fit_generator(generator=generator,
                                 epochs=epochs,
                                 steps_per_epoch=steps_per_epoch,
                                 callbacks=[callback],
                                 **kw_fit)

        # Restore the weights to the state before model fitting
        self.model.load_weights('tmp.h5')

        # Restore the original learning rate
        K.set_value(self.model.optimizer.learning_rate, original_lr) 
Example 20
Project: ODENet   Author: uqyge   File: utils.py    MIT License 5 votes vote down vote up
def on_train_begin(self, logs={}):
        '''Initialize the learning rate to the minimum value at the start of training.'''
        logs = logs or {}
        K.set_value(self.model.optimizer.lr, self.max_lr) 
Example 21
Project: ODENet   Author: uqyge   File: utils.py    MIT License 5 votes vote down vote up
def on_batch_end(self, batch, logs={}):
        '''Record previous batch statistics and update the learning rate.'''
        logs = logs or {}
        self.history.setdefault('lr', []).append(
            K.get_value(self.model.optimizer.lr))
        for k, v in logs.items():
            self.history.setdefault(k, []).append(v)

        self.batch_since_restart += 1
        K.set_value(self.model.optimizer.lr, self.clr()) 
Example 22
Project: ICASSP2019_TCN   Author: DSIP-UPatras   File: utils.py    MIT License 5 votes vote down vote up
def on_epoch_begin(self, epoch, logs=None):
        if not hasattr(self.model.optimizer, 'lr'):
            raise ValueError('Optimizer must have a "lr" attribute.')
        lr = self.schedule(epoch)
        if not isinstance(lr, (float, np.float32, np.float64)):
            raise ValueError('The output of the "schedule" function '
                             'should be float.')
        K.set_value(self.model.optimizer.lr, lr)
        if self.verbose > 0:
            print('\nEpoch %05d: LearningRateScheduler reducing learning '
                  'rate to %s.' % (epoch + 1, lr)) 
Example 23
Project: dialectal_arabic_segmenter   Author: qcri   File: callbacks.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def on_epoch_begin(self, epoch, logs=None):
        if not hasattr(self.model.optimizer, 'lr'):
            raise ValueError('Optimizer must have a "lr" attribute.')
        lr = self.schedule(epoch)
        if not isinstance(lr, (float, np.float32, np.float64)):
            raise ValueError('The output of the "schedule" function '
                             'should be float.')
        K.set_value(self.model.optimizer.lr, lr) 
Example 24
Project: dialectal_arabic_segmenter   Author: qcri   File: callbacks.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        logs['lr'] = K.get_value(self.model.optimizer.lr)
        current = logs.get(self.monitor)
        if current is None:
            warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
                          self.monitor, RuntimeWarning)
        else:
            if self.in_cooldown():
                self.cooldown_counter -= 1
                self.wait = 0

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            elif not self.in_cooldown():
                if self.wait >= self.patience:
                    old_lr = float(K.get_value(self.model.optimizer.lr))
                    if old_lr > self.min_lr + self.lr_epsilon:
                        new_lr = old_lr * self.factor
                        new_lr = max(new_lr, self.min_lr)
                        K.set_value(self.model.optimizer.lr, new_lr)
                        if self.verbose > 0:
                            print('\nEpoch %05d: reducing learning rate to %s.' % (epoch, new_lr))
                        self.cooldown_counter = self.cooldown
                        self.wait = 0
                self.wait += 1 
Example 25
Project: backdoor   Author: bolunwang   File: visualizer.py    MIT License 5 votes vote down vote up
def reset_opt(self):

        K.set_value(self.opt.iterations, 0)
        for w in self.opt.weights:
            K.set_value(w, np.zeros(K.int_shape(w)))

        pass 
Example 26
Project: backdoor   Author: bolunwang   File: visualizer.py    MIT License 5 votes vote down vote up
def reset_state(self, pattern_init, mask_init):

        print('resetting state')

        # setting cost
        if self.reset_cost_to_zero:
            self.cost = 0
        else:
            self.cost = self.init_cost
        K.set_value(self.cost_tensor, self.cost)

        # setting mask and pattern
        mask = np.array(mask_init)
        pattern = np.array(pattern_init)
        mask = np.clip(mask, self.mask_min, self.mask_max)
        pattern = np.clip(pattern, self.color_min, self.color_max)
        mask = np.expand_dims(mask, axis=2)

        # convert to tanh space
        mask_tanh = np.arctanh((mask - 0.5) * (2 - self.epsilon))
        pattern_tanh = np.arctanh((pattern / 255.0 - 0.5) * (2 - self.epsilon))
        print('mask_tanh', np.min(mask_tanh), np.max(mask_tanh))
        print('pattern_tanh', np.min(pattern_tanh), np.max(pattern_tanh))

        K.set_value(self.mask_tanh_tensor, mask_tanh)
        K.set_value(self.pattern_tanh_tensor, pattern_tanh)

        # resetting optimizer states
        self.reset_opt()

        pass 
Example 27
Project: MS-CMR2019   Author: Suiiyu   File: train.py    MIT License 5 votes vote down vote up
def lr_poly_decay(model, base_lr, curr_iter, max_iter, power=0.5):
    lrate = base_lr * (1.0 - (curr_iter / float(max_iter))) ** power
    K.set_value(model.optimizer.lr, lrate)

    return K.eval(model.optimizer.lr) 
Example 28
Project: MS-CMR2019   Author: Suiiyu   File: train.py    MIT License 5 votes vote down vote up
def lr_ep_decay(model, base_lr, curr_ep, step=0.1):
    
    lrate = base_lr * step**(curr_ep/40)
    K.set_value(model.optimizer.lr, lrate)
    return K.eval(model.optimizer.lr) 
Example 29
Project: Flipkart-Grid   Author: ad71   File: sgdr.py    MIT License 5 votes vote down vote up
def on_train_begin(self, logs={}):
		''' Initialize the learning rate to the minimum value at the start of training '''
		logs = logs or {}
		K.set_value(self.model.optimizer.lr, self.maximum) 
Example 30
Project: Flipkart-Grid   Author: ad71   File: sgdr.py    MIT License 5 votes vote down vote up
def on_batch_end(self, batch, logs={}):
		''' Record the previous batch statistics and update the learning rate '''
		logs = logs or {}
		self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
		for k, v in logs.items():
			self.history.setdefault(k, []).append(v)

		self.prev_restart += 1
		K.set_value(self.model.optimizer.lr, self.clr()) 
Example 31
Project: Flipkart-Grid   Author: ad71   File: lr_finder.py    MIT License 5 votes vote down vote up
def on_train_begin(self, logs=None):
		if logs is None:
			logs = {}
		K.set_value(self.model.optimizer.lr, self.minimum)

	# mandatory function for callbacks 
Example 32
Project: Flipkart-Grid   Author: ad71   File: lr_finder.py    MIT License 5 votes vote down vote up
def on_batch_end(self, epoch, logs=None):
		if logs is None:
			logs = {}

		self.i += 1

		self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
		self.history.setdefault('iterations', []).append(self.i)

		for key, value in logs.items():
			self.history.setdefault(key, []).append(value)

		K.set_value(self.model.optimizer.lr, self.calculate_learning_rate())

	# plot learning rate 
Example 33
Project: humpback-whale-4th-place   Author: daustingm1   File: ensemble.py    Apache License 2.0 5 votes vote down vote up
def set_lr(model, lr):
    K.set_value(model.optimizer.lr, float(lr)) 
Example 34
Project: humpback-whale-4th-place   Author: daustingm1   File: siamese_train.py    Apache License 2.0 5 votes vote down vote up
def set_lr(model, lr):
    K.set_value(model.optimizer.lr, float(lr)) 
Example 35
Project: humpback-whale-4th-place   Author: daustingm1   File: siamese_inference.py    Apache License 2.0 5 votes vote down vote up
def set_lr(model, lr):
    K.set_value(model.optimizer.lr, float(lr)) 
Example 36
Project: quickcnn   Author: CG1507   File: hist_callback.py    MIT License 5 votes vote down vote up
def on_epoch_begin(self, epoch, logs=None):
		if not hasattr(self.model.optimizer, 'lr'):
			raise ValueError('Optimizer must have a "lr" attribute.')
		lr = float(K.get_value(self.model.optimizer.lr))
		try:  # new API
			lr = self.schedule(epoch, lr)
		except TypeError:  # old API for backward compatibility
			lr = self.schedule(epoch)
		if not isinstance(lr, (float, np.float32, np.float64)):
			raise ValueError('The output of the "schedule" function '
							 'should be float.')
		K.set_value(self.model.optimizer.lr, lr)
		if self.verbose > 0:
			print('\nEpoch %05d: LearningRateScheduler setting learning '
				  'rate to %s.' % (epoch + 1, lr)) 
Example 37
Project: quickcnn   Author: CG1507   File: hist_callback.py    MIT License 5 votes vote down vote up
def on_epoch_end(self, epoch, logs=None):
		logs = logs or {}
		logs['lr'] = K.get_value(self.model.optimizer.lr)
		current = logs.get(self.monitor)
		if current is None:
			warnings.warn(
				'Reduce LR on plateau conditioned on metric `%s` '
				'which is not available. Available metrics are: %s' %
				(self.monitor, ','.join(list(logs.keys()))), RuntimeWarning
			)

		else:
			if self.in_cooldown():
				self.cooldown_counter -= 1
				self.wait = 0

			if self.monitor_op(current, self.best):
				self.best = current
				self.wait = 0
			elif not self.in_cooldown():
				self.wait += 1
				if self.wait >= self.patience:
					old_lr = float(K.get_value(self.model.optimizer.lr))
					if old_lr > self.min_lr:
						new_lr = old_lr * self.factor
						new_lr = max(new_lr, self.min_lr)
						K.set_value(self.model.optimizer.lr, new_lr)
						if self.verbose > 0:
							print('\nEpoch %05d: ReduceLROnPlateau reducing '
								  'learning rate to %s.' % (epoch + 1, new_lr))
						self.cooldown_counter = self.cooldown
						self.wait = 0 
Example 38
Project: PointNet-Keras   Author: TianzhongSong   File: callbacks.py    MIT License 5 votes vote down vote up
def change_lr(self, new_lr):
        old_lr = K.get_value(self.model.optimizer.lr)
        K.set_value(self.model.optimizer.lr, new_lr)
        if self.verbose == 1:
            print('Learning rate is %g' %new_lr) 
Example 39
Project: PointNet-Keras   Author: TianzhongSong   File: callbacks.py    MIT License 5 votes vote down vote up
def on_batch_begin(self, batch, logs={}):
        self.itr += 1
        cycle = 1 + self.itr/int(2*self.hc)
        x = self.itr - (2.*cycle - 1)*self.hc
        x /= self.hc
        new_lr = self.lr[0] + (self.lr[1] - self.lr[0])*(1 - abs(x))/cycle

        K.set_value(self.model.optimizer.lr, new_lr) 
Example 40
Project: Trident-Segmentation-CNN   Author: YalongLiu   File: self_utils.py    MIT License 5 votes vote down vote up
def on_batch_begin(self, epoch, logs=None):
        if epoch < self.warmup_epochs:
            # Learning rate turn from 0 to self.config_lr during self.warmup_epochs
            K.set_value(self.model.optimizer.lr, self.config_lr * (epoch + 1) / self.warmup_epochs) 
Example 41
Project: keras-swa   Author: simon-larsson   File: keras.py    MIT License 5 votes vote down vote up
def _update_lr(self, epoch):  
        
        if self.is_batch_norm_epoch:
            K.set_value(self.model.optimizer.lr, 0)
        elif self.lr_schedule == 'constant':
            lr = self._constant_schedule(epoch)
            K.set_value(self.model.optimizer.lr, lr)
        elif self.lr_schedule == 'cyclic':
            lr = self._cyclic_schedule(epoch)
            K.set_value(self.model.optimizer.lr, lr) 
Example 42
Project: poker   Author: surgebiswas   File: callbacks.py    MIT License 5 votes vote down vote up
def on_epoch_begin(self, epoch, logs=None):
        if not hasattr(self.model.optimizer, 'lr'):
            raise ValueError('Optimizer must have a "lr" attribute.')
        lr = self.schedule(epoch)
        if not isinstance(lr, (float, np.float32, np.float64)):
            raise ValueError('The output of the "schedule" function '
                             'should be float.')
        K.set_value(self.model.optimizer.lr, lr) 
Example 43
Project: poker   Author: surgebiswas   File: callbacks.py    MIT License 5 votes vote down vote up
def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        logs['lr'] = K.get_value(self.model.optimizer.lr)
        current = logs.get(self.monitor)
        if current is None:
            warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
                          self.monitor, RuntimeWarning)
        else:
            if self.in_cooldown():
                self.cooldown_counter -= 1
                self.wait = 0

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            elif not self.in_cooldown():
                if self.wait >= self.patience:
                    old_lr = float(K.get_value(self.model.optimizer.lr))
                    if old_lr > self.min_lr + self.lr_epsilon:
                        new_lr = old_lr * self.factor
                        new_lr = max(new_lr, self.min_lr)
                        K.set_value(self.model.optimizer.lr, new_lr)
                        if self.verbose > 0:
                            print('\nEpoch %05d: reducing learning rate to %s.' % (epoch, new_lr))
                        self.cooldown_counter = self.cooldown
                        self.wait = 0
                self.wait += 1 
Example 44
Project: PSPNet-Keras-tensorflow   Author: Vladkryvoruchko   File: callbacks.py    MIT License 5 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
    lr_now = K.get_value(self.model.optimizer.lr)
    new_lr = max(0.00001, min(self.base_lr * (1 - epoch / float(self.max_epoch))**self.power, lr_now))
    K.set_value(self.model.optimizer.lr, new_lr)
    if self.verbose:
        print(" - learning rate: %10f" % (new_lr)) 
Example 45
Project: diktya   Author: BioroboticsLab   File: callbacks.py    Apache License 2.0 5 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
        epoch = epoch
        if epoch in self.schedule:
            new_value = self.schedule[epoch]
            print()
            print("Setting learning rate to: {}".format(new_value))
            K.set_value(self.optimizer.lr, new_value) 
Example 46
Project: diktya   Author: BioroboticsLab   File: callbacks.py    Apache License 2.0 5 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
        mean_loss = np.array(self.epoch_log).mean()
        if mean_loss + self.min_improvement <= self.current_best:
            self.current_best = mean_loss
            self.current_best_epoch = epoch

        if epoch - self.current_best_epoch > self.epoch_patience:
            lr = K.get_value(self.optimizer.lr)
            new_lr = lr*self.factor
            self.min_improvement *= self.factor
            K.set_value(self.optimizer.lr, new_lr)
            print()
            print("Reduce learning rate to: {:08f}".format(new_lr))
            self.current_best_epoch = epoch 
Example 47
Project: WaveNILM   Author: picagrad   File: waveNILM.py    MIT License 5 votes vote down vote up
def reset_weights(model,lr):
	# Resetting weights of model for next iteration of cross validation
	session = K.get_session()
	for layer in model.layers: 
		if hasattr(layer, 'kernel_initializer'):
			layer.kernel.initializer.run(session=session)
	K.set_value(model.optimizer.lr, lr) #resetting learning rate (which is updated during training by optimizer) 
Example 48
Project: keras-snapshot_ensembles   Author: arthurdouillard   File: snapshot.py    MIT License 5 votes vote down vote up
def on_epoch_begin(self, epoch, logs=None):
        if epoch <= 0: return

        lr = self.schedule(epoch)
        K.set_value(self.model.optimizer.lr, lr)

        if self.verbose > 0:
            print('\nEpoch %05d: Snapchot modifying learning '
                  'rate to %s.' % (epoch + 1, lr)) 
Example 49
Project: NTM-Keras   Author: SigmaQuan   File: lstm2ntm.py    MIT License 5 votes vote down vote up
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided (including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
            K.set_value(self.states[1],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim)),
                           K.zeros((input_shape[0], self.output_dim))] 
Example 50
Project: StarGAN-Keras   Author: hoangthang1607   File: StarGAN.py    MIT License 5 votes vote down vote up
def train(self):
        data_iter = get_loader(self.Image_data_class.train_dataset, self.Image_data_class.train_dataset_label, self.Image_data_class.train_dataset_fix_label, 
                               image_size=self.image_size, batch_size=self.batch_size, mode=self.mode)

        # Training
        valid = -np.ones((self.batch_size, 2, 2, 1))
        fake =  np.ones((self.batch_size, 2, 2, 1))
        dummy = np.zeros((self.batch_size, 2, 2, 1)) # Dummy gt for gradient penalty
        for epoch in range(self.num_iters):
            imgs, orig_labels, target_labels, fix_labels, _ = next(data_iter)
    
            # Setting learning rate (linear decay)
            if epoch > (self.num_iters - self.num_iters_decay):
                K.set_value(self.train_D.optimizer.lr, self.d_lr*(self.num_iters - epoch)/(self.num_iters - self.num_iters_decay))
                K.set_value(self.train_G.optimizer.lr, self.g_lr*(self.num_iters - epoch)/(self.num_iters - self.num_iters_decay))
    
            # Training Discriminators        
            D_loss = self.train_D.train_on_batch(x = [imgs, target_labels], y = [valid, orig_labels, fake, dummy])
        
            # Training Generators
            if (epoch + 1) % self.n_critic == 0:
                G_loss = self.train_G.train_on_batch(x = [imgs, orig_labels, target_labels], y = [valid, target_labels, imgs])
        
            if (epoch + 1) % self.log_step == 0:
                print(f"Iteration: [{epoch + 1}/{self.num_iters}]")
                print(f"\tD/loss_real = [{D_loss[1]:.4f}], D/loss_fake = [{D_loss[3]:.4f}], D/loss_cls =  [{D_loss[2]:.4f}], D/loss_gp = [{D_loss[4]:.4f}]")
                print(f"\tG/loss_fake = [{G_loss[1]:.4f}], G/loss_rec = [{G_loss[3]:.4f}], G/loss_cls = [{G_loss[2]:.4f}]") 

            if (epoch + 1) % self.model_save_step == 0:  
                self.G.save_weights(os.path.join(self.model_save_dir, 'G_weights.hdf5'))
                self.D.save_weights(os.path.join(self.model_save_dir, 'D_weights.hdf5'))
                self.train_D.save_weights(os.path.join(self.model_save_dir, 'train_D_weights.hdf5'))
                self.train_G.save_weights(os.path.join(self.model_save_dir, 'train_G_weights.hdf5')) 
Example 51
Project: FSA-Net   Author: shamangary   File: TYY_callbacks.py    Apache License 2.0 5 votes vote down vote up
def on_epoch_begin(self, epoch, logs={}):
		
		if epoch in self.startEpoch:
			if epoch == 0:
				ratio = 1
			else:
				ratio = 0.1
			LR = K.get_value(self.model.optimizer.lr)
			K.set_value(self.model.optimizer.lr,LR*ratio)
		
		return 
Example 52
Project: sesemi   Author: vuptran   File: utils.py    MIT License 5 votes vote down vote up
def on_batch_begin(self, batch, logs={}):
        lr = self.base_lr * (1.0 - (self.batches / self.max_iter)) ** self.power
        K.set_value(self.model.optimizer.lr, lr)
        self.batches += 1 
Example 53
Project: training_results_v0.6   Author: mlperf   File: callbacks.py    Apache License 2.0 5 votes vote down vote up
def _adjust_learning_rate(self, epoch):
        old_lr = K.get_value(self.model.optimizer.lr)
        new_lr = self.initial_lr * self.multiplier(epoch)
        K.set_value(self.model.optimizer.lr, new_lr)

        if hasattr(self.model.optimizer, 'momentum') and self.momentum_correction:
            # See the paper cited above for more information about momentum correction.
            self.restore_momentum = K.get_value(self.model.optimizer.momentum)
            K.set_value(self.model.optimizer.momentum,
                        self.restore_momentum * new_lr / old_lr) 
Example 54
Project: training_results_v0.6   Author: mlperf   File: callbacks.py    Apache License 2.0 5 votes vote down vote up
def _restore_momentum_if_needed(self):
        if self.restore_momentum:
            K.set_value(self.model.optimizer.momentum, self.restore_momentum)
            self.restore_momentum = None 
Example 55
Project: dynamicgem   Author: Sujit-O   File: dynamic_triad.py    MIT License 5 votes vote down vote up
def online_begin(self, begin, end):
        TrainFlow.start_training(self, begin, end)

        Sampler.online_begin(self, begin, end)
        initv = np.random.uniform(0, 1, (self.dataset.nsize, self.flowargs['embdim'])).astype('float32')
        K.set_value(self.online['vars'][0], initv)
        self._sequence.append(None)

    # ends the current online training
    # store online training results
    # we need to reset online training variables 
Example 56
Project: MLT   Author: Maddosaurus   File: helper_keras.py    Apache License 2.0 5 votes vote down vote up
def keras_train_model_adaptive(model, epochs, batch_size, training_data, training_labels, test_data, test_labels, logdir, model_savename):
    """Train the given model with data and predict the run.

    This training reduces the learning rate on a fixed base every 30 epochs to 10% of the original value."""

    # see https://github.com/keras-team/keras/issues/888#issuecomment-150849433
    def adaptive_lr_scheduler(epoch):
        if (epoch > 0) and (epoch % 30 == 0):
            old_lr = K.get_value(model.optimizer.lr)
            new_lr = old_lr * 0.1
            print("Set LR to {:6.5f}".format(new_lr))
            K.set_value(model.optimizer.lr, new_lr)
        print("Current LR: {:6.5f}".format(K.get_value(model.optimizer.lr)))
        return K.get_value(model.optimizer.lr)


    starttime = datetime.now()
    history = model.fit(
        training_data, training_labels,
        epochs=epochs,
        batch_size=batch_size,
        verbose=1,
        validation_data=(test_data, test_labels),
        callbacks=[
            TensorBoard(log_dir=logdir),
            LearningRateScheduler(adaptive_lr_scheduler)
        ]
    )
    finishtime = datetime.now()
    runtime = finishtime - starttime

    test_predictions_probabilities = model.predict(test_data)

    test_predictions = test_predictions_probabilities.argmax(axis=-1)
    test_predictions_probabilities = test_predictions_probabilities[:, 1]
    # proba[:,1] returns just 1 of 2 columns. As they always add up, this is enough!

    keras_persist_model(model, model_savename)

    return pe.PredictionEntry(test_labels, test_predictions, test_predictions_probabilities, runtime) 
Example 57
Project: DSP_EMGDL_Chapter   Author: DSIP-UPatras   File: utils.py    MIT License 5 votes vote down vote up
def on_epoch_begin(self, epoch, logs=None):
        if not hasattr(self.model.optimizer, 'lr'):
            raise ValueError('Optimizer must have a "lr" attribute.')
        lr = self.schedule(epoch)
        if not isinstance(lr, (float, np.float32, np.float64)):
            raise ValueError('The output of the "schedule" function '
                             'should be float.')
        K.set_value(self.model.optimizer.lr, lr)
        if self.verbose > 0:
            print('\nEpoch %05d: LearningRateScheduler reducing learning '
                  'rate to %s.' % (epoch + 1, lr)) 
Example 58
Project: stochastic_depth_keras   Author: dblN   File: train.py    MIT License 5 votes vote down vote up
def open_all_gates():
    for t in add_tables:
        K.set_value(t["gate"], 1)


# setup death rate 
Example 59
Project: stochastic_depth_keras   Author: dblN   File: train.py    MIT License 5 votes vote down vote up
def on_batch_begin(self, batch, logs={}):
        open_all_gates()

        rands = np.random.uniform(size=len(add_tables))
        for t, rand in zip(add_tables, rands):
            if rand < K.get_value(t["death_rate"]):
                K.set_value(t["gate"], 0) 
Example 60
Project: async-rl   Author: Grzego   File: train.py    MIT License 5 votes vote down vote up
def learn(self, last_observations, actions, rewards, learning_rate=0.001):
        import keras.backend as K
        K.set_value(self.train_net.optimizer.lr, learning_rate)
        frames = len(last_observations)
        self.counter += frames
        # -----
        values, policy = self.train_net.predict([last_observations, self.unroll])
        # -----
        self.targets.fill(0.)
        adventage = rewards - values.flatten()
        self.targets[self.unroll, actions] = 1.
        # -----
        loss = self.train_net.train_on_batch([last_observations, adventage], [rewards, self.targets])
        entropy = np.mean(-policy * np.log(policy + 0.00000001))
        self.pol_loss.append(loss[2])
        self.val_loss.append(loss[1])
        self.entropy.append(entropy)
        self.values.append(np.mean(values))
        min_val, max_val, avg_val = min(self.values), max(self.values), np.mean(self.values)
        print('\rFrames: %8d; Policy-Loss: %10.6f; Avg: %10.6f '
              '--- Value-Loss: %10.6f; Avg: %10.6f '
              '--- Entropy: %7.6f; Avg: %7.6f '
              '--- V-value; Min: %6.3f; Max: %6.3f; Avg: %6.3f' % (
                  self.counter,
                  loss[2], np.mean(self.pol_loss),
                  loss[1], np.mean(self.val_loss),
                  entropy, np.mean(self.entropy),
                  min_val, max_val, avg_val), end='')
        # -----
        self.swap_counter -= frames
        if self.swap_counter < 0:
            self.swap_counter += self.swap_freq
            return True
        return False 
Example 61
Project: spec-img-finesse   Author: kilinco   File: final.py    MIT License 5 votes vote down vote up
def set_mask(self, mask):
        K.set_value(self.mask, mask) 
Example 62
Project: CycleGAN-Keras   Author: simontomaskarlsson   File: model.py    GNU General Public License v3.0 5 votes vote down vote up
def update_lr(self, model, decay):
        new_lr = K.get_value(model.optimizer.lr) - decay
        if new_lr < 0:
            new_lr = 0
        # print(K.get_value(model.optimizer.lr))
        K.set_value(model.optimizer.lr, new_lr) 
Example 63
Project: deep_pt_srl   Author: dfalci   File: lstm_model.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def load(self, modelFile, weightFile, learningRate=None):
        nn = self.modelPersistence.load(modelFile, weightFile)
        nn.compile(optimizer=self.config.optimizer, loss=self.config.lossFunction, metrics=['accuracy'])
        if learningRate != None:
            K.set_value(nn.optimizer.lr, learningRate)
        return nn 
Example 64
Project: deep_pt_srl   Author: dfalci   File: lr_reducer.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def setLearningRate(self, new_lr):
        print 'NEW LEARNING RATE : {}'.format(new_lr)
        K.set_value(self.nn.optimizer.lr, new_lr) 
Example 65
Project: deep_pt_srl   Author: dfalci   File: lr_reducer.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def on_train_begin(self, logs={}):
        logs = logs or {}

        if self.clr_iterations == 0:
            print 'learning rate : {}'.format(self.base_lr)
            K.set_value(self.model.optimizer.lr, self.base_lr)
        else:
            K.set_value(self.model.optimizer.lr, self.clr()) 
Example 66
Project: deep_pt_srl   Author: dfalci   File: lr_reducer.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def on_batch_end(self, epoch, logs=None):

        logs = logs or {}
        self.trn_iterations += 1
        self.clr_iterations += 1
        K.set_value(self.model.optimizer.lr, self.clr())

        self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
        self.history.setdefault('iterations', []).append(self.trn_iterations)

        for k, v in logs.items():
            self.history.setdefault(k, []).append(v) 
Example 67
Project: keras-contrib   Author: keras-team   File: cyclical_learning_rate.py    MIT License 5 votes vote down vote up
def on_train_begin(self, logs={}):
        logs = logs or {}

        if self.clr_iterations == 0:
            K.set_value(self.model.optimizer.lr, self.base_lr)
        else:
            K.set_value(self.model.optimizer.lr, self.clr()) 
Example 68
Project: keras-contrib   Author: keras-team   File: cyclical_learning_rate.py    MIT License 5 votes vote down vote up
def on_batch_end(self, epoch, logs=None):

        logs = logs or {}
        self.trn_iterations += 1
        self.clr_iterations += 1
        K.set_value(self.model.optimizer.lr, self.clr())

        self.history.setdefault(
            'lr', []).append(
            K.get_value(
                self.model.optimizer.lr))
        self.history.setdefault('iterations', []).append(self.trn_iterations)

        for k, v in logs.items():
            self.history.setdefault(k, []).append(v) 
Example 69
Project: keras-contrib   Author: keras-team   File: save_load_utils_test.py    MIT License 5 votes vote down vote up
def test_save_and_load_all_weights():
    '''
    Test save_all_weights and load_all_weights.
    Save and load optimizer and model weights but not configuration.
    '''

    def make_model():
        _x = Input((10,))
        _y = Dense(10)(_x)
        _m = Model(_x, _y)
        _m.compile('adam', 'mean_squared_error')
        _m._make_train_function()
        return _m

    # make a model
    m1 = make_model()
    # set weights
    w1 = m1.layers[1].kernel  # dense layer
    w1value = K.get_value(w1)
    w1value[0, 0:4] = [1, 3, 3, 7]
    K.set_value(w1, w1value)
    # set optimizer weights
    ow1 = m1.optimizer.weights[3]  # momentum weights
    ow1value = K.get_value(ow1)
    ow1value[0, 0:3] = [4, 2, 0]
    K.set_value(ow1, ow1value)
    # save all weights
    save_all_weights(m1, 'model.h5')
    # new model
    m2 = make_model()
    # load all weights
    load_all_weights(m2, 'model.h5')
    # check weights
    assert_allclose(K.get_value(m2.layers[1].kernel)[0, 0:4], [1, 3, 3, 7])
    # check optimizer weights
    assert_allclose(K.get_value(m2.optimizer.weights[3])[0, 0:3], [4, 2, 0])
    os.remove('model.h5') 
Example 70
Project: recurrentshop   Author: farizrahman4u   File: recurrent_highway_networks.py    MIT License 5 votes vote down vote up
def on_epoch_begin(self, epoch, logs=None):
        if epoch > 5:
            lr = self.lr / 1.04
            K.set_value(self.model.optimizer.lr, lr)

###########################################
# Build Model
########################################### 
Example 71
Project: EUNN-theano   Author: iguanaus   File: custom_layers.py    MIT License 5 votes vote down vote up
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided (including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim))] 
Example 72
Project: deepyeast   Author: tanelp   File: learning_rate_finder.py    MIT License 5 votes vote down vote up
def find(self, x_train, y_train, batch_size=64, iters=100, start_lr=1e-5, end_lr=10.0):
        self.lr_mult = (end_lr / start_lr) ** (1.0 / iters)
        K.set_value(self.model.optimizer.lr, start_lr)

        iters_per_epoch = x_train.shape[0] // batch_size
        for i in xrange(iters):
            # get batch
            j = i % (iters_per_epoch - 1)
            ix_start = j * batch_size
            ix_end = (j + 1) * batch_size
            x = x_train[ix_start:ix_end]
            y = y_train[ix_start:ix_end]

            # do 1 step of training
            loss = self.model.train_on_batch(x, y)

            # log metrics
            self.losses.append(loss)
            lr = K.get_value(self.model.optimizer.lr)
            self.lrs.append(lr)

            # stop training if loss too large
            if np.isnan(loss) or np.isinf(loss) or 5*np.min(loss) < loss:
                print("Invalid loss, terminating training")
                break

            # increase lr
            lr *= self.lr_mult
            K.set_value(self.model.optimizer.lr, lr) 
Example 73
Project: research   Author: commaai   File: layers.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def reset_states(self):
      assert self.stateful, 'Layer must be stateful.'
      input_shape = self.input_spec[0].shape
      if not input_shape[0]:
          raise Exception('If a RNN is stateful, a complete ' +
                          'input_shape must be provided (including batch size).')
      if hasattr(self, 'states'):
          K.set_value(self.states[0],
                      np.zeros((input_shape[0], self.output_dim)))
      else:
          self.states = [K.zeros((input_shape[0], self.output_dim))] 
Example 74
Project: research   Author: commaai   File: layers.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def reset_states(self):
      assert self.stateful, 'Layer must be stateful.'
      input_shape = self.input_spec[0].shape
      if not input_shape[0]:
          raise Exception('If a RNN is stateful, a complete ' +
                          'input_shape must be provided (including batch size).')
      if hasattr(self, 'states'):
          K.set_value(self.states[0],
                      np.zeros((input_shape[0], self.output_dim)))
      else:
          self.states = [K.zeros((input_shape[0], self.output_dim))] 
Example 75
Project: telegrad   Author: eyalzk   File: telegram_bot_callback.py    GNU General Public License v3.0 5 votes vote down vote up
def on_epoch_begin(self, epoch, logs=None):
        # Check if learning rate should be changed
        if self.kbot.modify_lr != 1:
            if not hasattr(self.model.optimizer, 'lr'):
                raise ValueError('Optimizer must have a "lr" attribute.')
            lr = float(K.get_value(self.model.optimizer.lr))  # get current lr
            # new LR
            lr = lr*self.kbot.modify_lr
            K.set_value(self.model.optimizer.lr, lr)
            self.kbot.modify_lr = 1  # Set multiplier back to 1

            message = '\nEpoch %05d: setting learning rate to %s.' % (epoch + 1, lr)
            print(message)
            self.kbot.send_message(message) 
Example 76
Project: convnet-study   Author: robertomest   File: callbacks.py    MIT License 5 votes vote down vote up
def change_lr(self, new_lr):
        old_lr = K.get_value(self.model.optimizer.lr)
        K.set_value(self.model.optimizer.lr, new_lr)
        if self.verbose == 1:
            print('Learning rate is %g' %new_lr) 
Example 77
Project: kernel-activation-functions   Author: ispamm   File: kafnets.py    MIT License 4 votes vote down vote up
def build(self, input_shape):

        # Initialize the fixed dictionary
        d = np.linspace(-self.boundary, self.boundary, self.D).astype(np.float32).reshape(-1, 1)
        
        if self.conv:
            self.dict = self.add_weight(name='dict', 
                                      shape=(1, 1, 1, 1, self.D),
                                      initializer='uniform',
                                      trainable=False)
            K.set_value(self.dict, d.reshape(1, 1, 1, 1, -1))
        else:
            self.dict = self.add_weight(name='dict', 
                                      shape=(1, 1, self.D),
                                      initializer='uniform',
                                      trainable=False)
            K.set_value(self.dict, d.reshape(1, 1, -1))
        
        if self.kernel == 'gaussian':
            self.kernel_fcn = self.gaussian_kernel
            # Rule of thumb for gamma
            interval = (d[1] - d[0])
            sigma = 2 * interval  # empirically chosen
            self.gamma = 0.5 / np.square(sigma)
        elif self.kernel == 'softplus':
            self.kernel_fcn = self.softplus_kernel
        else:
            self.kernel_fcn = self.relu_kernel
            
        
        # Mixing coefficients
        if self.conv:
            self.alpha = self.add_weight(name='alpha', 
                                         shape=(1, 1, 1, self.num_parameters, self.D),
                                         initializer='normal',
                                         trainable=True)
        else:
            self.alpha = self.add_weight(name='alpha', 
                                         shape=(1, self.num_parameters, self.D),
                                         initializer='normal',
                                         trainable=True)

        # Optional initialization with kernel ridge regression
        if self.init_fcn is not None:
            if self.kernel == 'gaussian':
              kernel_matrix = np.exp(- self.gamma*(d - d.T) ** 2)
            elif self.kernel == 'softplus':
              kernel_matrix = np.log(np.exp(d - d.T) + 1.0)
            else:
              raise ValueError('Cannot perform kernel ridge regression with ReLU kernel (singular matrix)')
            
            alpha_init = np.linalg.solve(kernel_matrix + 1e-5*np.eye(self.D), self.init_fcn(d)).reshape(-1)
            if self.conv:
                K.set_value(self.alpha, np.repeat(alpha_init.reshape(1, 1, 1, 1, -1), self.num_parameters, axis=3))
            else:
                K.set_value(self.alpha, np.repeat(alpha_init.reshape(1, 1, -1), self.num_parameters, axis=1))
        
        super(KAF, self).build(input_shape) 
Example 78
Project: recipe-summarization   Author: rtlee9   File: model.py    MIT License 4 votes vote down vote up
def create_model(vocab_size, embedding_size, LR, rnn_layers, rnn_size, embedding=None):
    """Construct and compile LSTM model."""
    # create a standard stacked LSTM
    if embedding is not None:
        embedding = [embedding]
    model = Sequential()
    model.add(Embedding(vocab_size, embedding_size,
                        input_length=maxlen,
                        W_regularizer=regularizer, dropout=p_emb, weights=embedding, mask_zero=True,
                        name='embedding_1'))
    for i in range(rnn_layers):
        lstm = LSTM(rnn_size, return_sequences=True,
                    W_regularizer=regularizer, U_regularizer=regularizer,
                    b_regularizer=regularizer, dropout_W=p_W, dropout_U=p_U,
                    name='lstm_{}'.format(i + 1))
        model.add(lstm)
        model.add(Dropout(p_dense, name='dropout_{}'.format(i + 1)))

    def simple_context(X, mask, n=activation_rnn_size):
        """Reduce the input just to its headline part (second half).

        For each word in this part it concatenate the output of the previous layer (RNN)
        with a weighted average of the outputs of the description part.
        In this only the last `rnn_size - activation_rnn_size` are used from each output.
        The first `activation_rnn_size` output is used to computer the weights for the averaging.
        """
        desc, head = X[:, :maxlend, :], X[:, maxlend:, :]
        head_activations, head_words = head[:, :, :n], head[:, :, n:]
        desc_activations, desc_words = desc[:, :, :n], desc[:, :, n:]

        # RTFM http://deeplearning.net/software/theano/library/tensor/basic.html#theano.tensor.batched_tensordot
        # activation for every head word and every desc word
        activation_energies = K.batch_dot(head_activations, desc_activations, axes=(2, 2))
        # make sure we dont use description words that are masked out
        activation_energies = activation_energies + -1e20 * K.expand_dims(
            1. - K.cast(mask[:, :maxlend], 'float32'), 1)

        # for every head word compute weights for every desc word
        activation_energies = K.reshape(activation_energies, (-1, maxlend))
        activation_weights = K.softmax(activation_energies)
        activation_weights = K.reshape(activation_weights, (-1, maxlenh, maxlend))

        # for every head word compute weighted average of desc words
        desc_avg_word = K.batch_dot(activation_weights, desc_words, axes=(2, 1))
        return K.concatenate((desc_avg_word, head_words))

    if activation_rnn_size:
        model.add(SimpleContext(simple_context, rnn_size, name='simplecontext_1'))

    model.add(TimeDistributed(Dense(
        vocab_size,
        W_regularizer=regularizer,
        b_regularizer=regularizer,
        name='timedistributed_1')))
    model.add(Activation('softmax', name='activation_1'))

    # opt = Adam(lr=LR)  # keep calm and reduce learning rate
    model.compile(loss='categorical_crossentropy', optimizer=optimizer)

    K.set_value(model.optimizer.lr, np.float32(LR))
    return model 
Example 79
Project: NTM-Keras   Author: SigmaQuan   File: ntm.py    MIT License 4 votes vote down vote up
def reset_states(self):
        print("begin reset_states(self)")
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        self.depth = 0
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided (including batch size).')
        if hasattr(self, 'states'):
            # K.set_value(self.states[0],
            #             np.zeros((input_shape[0], self.output_dim)))
            # K.set_value(self.states[1],
            #             np.zeros((input_shape[0], self.output_dim)))
            # add by Robot Steven ****************************************#
            # previous inner memory
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.controller_output_dim)))
            # previous inner cell
            K.set_value(self.states[1],
                        np.zeros((input_shape[0], self.controller_output_dim)))
            # previous memory
            K.set_value(self.states[2],
                        np.zeros((input_shape[0], self.memory_dim * self.memory_size)))
            # K.set_value(self.states[2],
            #             np.zeros((input_shape[0], self.memory_size, self.memory_dim)))
            # previous writing addresses
            K.set_value(self.states[3],
                        np.zeros((input_shape[0], self.num_write_head * self.memory_size)))
            # K.set_value(self.states[3],
            #             np.zeros((input_shape[0], self.num_write_head * self.memory_size)))
            # previous reading addresses
            K.set_value(self.states[4],
                        np.zeros((input_shape[0], self.num_read_head * self.memory_size)))
            # previous reading content
            K.set_value(self.states[5],
                        np.zeros((input_shape[0], self.num_read_head * self.memory_dim)))
            # add by Robot Steven ****************************************#
        else:
            # self.states = [K.zeros((input_shape[0], self.output_dim)),
            #                K.zeros((input_shape[0], self.output_dim))]
            # add by Robot Steven ****************************************#
            self.states = [K.zeros((input_shape[0], self.controller_output_dim)),  # h_tm1
                           K.zeros((input_shape[0], self.controller_output_dim)),  # c_tm1]
                           K.zeros((input_shape[0], self.memory_dim * self.memory_size)),
                           # K.zeros((input_shape[0], self.memory_size, self.memory_dim)),
                           K.zeros((input_shape[0], self.num_write_head * self.memory_size)),
                           K.zeros((input_shape[0], self.num_read_head * self.memory_size)),
                           K.zeros((input_shape[0], self.num_read_head * self.memory_dim))]
            # add by Robot Steven ****************************************#
        print("end reset_states(self)\n") 
Example 80
Project: embedding-as-service   Author: amansrivastava17   File: qrnn.py    MIT License 4 votes vote down vote up
def reset_states(self, states=None):
        if not self.stateful:
            raise AttributeError('Layer must be stateful.')
        if not self.input_spec:
            raise RuntimeError('Layer has never been called '
                               'and thus has no states.')

        batch_size = self.input_spec.shape[0]
        if not batch_size:
            raise ValueError('If a QRNN is stateful, it needs to know '
                             'its batch size. Specify the batch size '
                             'of your input tensors: \n'
                             '- If using a Sequential model, '
                             'specify the batch size by passing '
                             'a `batch_input_shape` '
                             'argument to your first layer.\n'
                             '- If using the functional API, specify '
                             'the time dimension by passing a '
                             '`batch_shape` argument to your Input layer.')

        if self.states[0] is None:
            self.states = [K.zeros((batch_size, self.units))
                           for _ in self.states]
        elif states is None:
            for state in self.states:
                K.set_value(state, np.zeros((batch_size, self.units)))
        else:
            if not isinstance(states, (list, tuple)):
                states = [states]
            if len(states) != len(self.states):
                raise ValueError('Layer ' + self.name + ' expects ' +
                                 str(len(self.states)) + ' states, '
                                                         'but it received ' + str(len(states)) +
                                 'state values. Input received: ' +
                                 str(states))
            for index, (value, state) in enumerate(zip(states, self.states)):
                if value.shape != (batch_size, self.units):
                    raise ValueError('State ' + str(index) +
                                     ' is incompatible with layer ' +
                                     self.name + ': expected shape=' +
                                     str((batch_size, self.units)) +
                                     ', found shape=' + str(value.shape))
                K.set_value(state, value)