Python tensorflow.keras.backend.set_value() Examples

The following are 16 code examples of tensorflow.keras.backend.set_value(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.backend , or try the search function .
Example #1
Source File: ttfs.py    From snn_toolbox with MIT License 6 votes vote down vote up
def reset_spikevars(self, sample_idx):
        """
        Reset variables present in spiking layers. Can be turned off for
        instance when a video sequence is tested.
        """

        mod = self.config.getint('simulation', 'reset_between_nth_sample')
        mod = mod if mod else sample_idx + 1
        do_reset = sample_idx % mod == 0
        if do_reset:
            k.set_value(self.mem, self.init_membrane_potential())
        k.set_value(self.time, np.float32(self.dt))
        zeros_output_shape = np.zeros(self.output_shape, k.floatx())
        if self.tau_refrac > 0:
            k.set_value(self.refrac_until, zeros_output_shape)
        if self.spiketrain is not None:
            k.set_value(self.spiketrain, zeros_output_shape)
        k.set_value(self.last_spiketimes, zeros_output_shape - 1) 
Example #2
Source File: ttfs_dyn_thresh.py    From snn_toolbox with MIT License 6 votes vote down vote up
def reset_spikevars(self, sample_idx):
        """
        Reset variables present in spiking layers. Can be turned off for
        instance when a video sequence is tested.
        """

        mod = self.config.getint('simulation', 'reset_between_nth_sample')
        mod = mod if mod else sample_idx + 1
        do_reset = sample_idx % mod == 0
        if do_reset:
            k.set_value(self.mem, self.init_membrane_potential())
        k.set_value(self.time, np.float32(self.dt))
        zeros_output_shape = np.zeros(self.output_shape, k.floatx())
        if self.tau_refrac > 0:
            k.set_value(self.refrac_until, zeros_output_shape)
        if self.spiketrain is not None:
            k.set_value(self.spiketrain, zeros_output_shape)
        k.set_value(self.last_spiketimes, zeros_output_shape - 1)
        k.set_value(self.v_thresh, zeros_output_shape + self._v_thresh)
        k.set_value(self.prospective_spikes, zeros_output_shape)
        k.set_value(self.missing_impulse, zeros_output_shape) 
Example #3
Source File: keras_model.py    From DeepPavlov with Apache License 2.0 6 votes vote down vote up
def _update_graph_variables(self, learning_rate: float = None, momentum: float = None):
        """
        Update graph variables setting giving `learning_rate` and `momentum`

        Args:
            learning_rate: learning rate value to be set in graph (set if not None)
            momentum: momentum value to be set in graph (set if not None)

        Returns:
            None
        """
        if learning_rate is not None:
            K.set_value(self.get_learning_rate_variable(), learning_rate)
            # log.info(f"Learning rate = {learning_rate}")
        if momentum is not None:
            K.set_value(self.get_momentum_variable(), momentum)
            # log.info(f"Momentum      = {momentum}") 
Example #4
Source File: keras_words_subtoken_metrics.py    From code2vec with MIT License 5 votes vote down vote up
def reset_states(self):
        for v in self.variables:
            K.set_value(v, 0) 
Example #5
Source File: utils.py    From bcnn with MIT License 5 votes vote down vote up
def on_epoch_end (self, epoch, logs={}):
        if epoch >= self.kl_start_epoch - 2:
            new_kl_alpha = min(K.get_value(self.kl_alpha) + self.kl_alpha_increase_per_epoch, 1.)
            K.set_value(self.kl_alpha, new_kl_alpha)
        print ("Current KL Weight is " + str(K.get_value(self.kl_alpha))) 
Example #6
Source File: cohens_kappa.py    From addons with Apache License 2.0 5 votes vote down vote up
def reset_states(self):
        """Resets all of the metric state variables."""

        for v in self.variables:
            K.set_value(
                v,
                np.zeros((self.num_classes, self.num_classes), v.dtype.as_numpy_dtype),
            ) 
Example #7
Source File: shapelets.py    From tslearn with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _set_model_layers(self, X, ts_sz, d, n_classes):
        super()._set_model_layers(X=X,
                                  ts_sz=ts_sz,
                                  d=d,
                                  n_classes=n_classes)
        K.set_value(self.model_.optimizer.lr, self.learning_rate) 
Example #8
Source File: ttfs.py    From snn_toolbox with MIT License 5 votes vote down vote up
def set_time(self, time):
        """Set simulation time variable.

        Parameters
        ----------

        time: float
            Current simulation time.
        """

        k.set_value(self.time, time) 
Example #9
Source File: ttfs_dyn_thresh.py    From snn_toolbox with MIT License 5 votes vote down vote up
def set_time(self, time):
        """Set simulation time variable.

        Parameters
        ----------

        time: float
            Current simulation time.
        """

        k.set_value(self.time, time) 
Example #10
Source File: ttfs_corrective.py    From snn_toolbox with MIT License 5 votes vote down vote up
def set_time(self, time):
        """Set simulation time variable.

        Parameters
        ----------

        time: float
            Current simulation time.
        """

        k.set_value(self.time, time) 
Example #11
Source File: callbacks.py    From EfficientDet with Apache License 2.0 5 votes vote down vote up
def on_batch_end(self, batch, logs):
        if self.iteration_id > self.start_iteration:
            # (1, 0)
            cosine_decay = 0.5 * (1 + np.cos(np.pi * (self.cycle_iteration_id / self.cycle_iterations)))
            decayed_lr = (self.max_lr - self.min_lr) * cosine_decay + self.min_lr
            K.set_value(self.model.optimizer.lr, decayed_lr)
            if self.cycle_iteration_id == self.cycle_iterations:
                self.cycle_iteration_id = 0
                self.cycle_iterations = int(self.cycle_iterations * self.t_mu)
            else:
                self.cycle_iteration_id = self.cycle_iteration_id + 1
            self.lrs.append(decayed_lr)
        elif self.iteration_id == self.start_iteration:
            self.max_lr = K.get_value(self.model.optimizer.lr)
        self.iteration_id += 1 
Example #12
Source File: callbacks.py    From EfficientDet with Apache License 2.0 5 votes vote down vote up
def on_batch_end(self, batch, logs):
        lr = K.get_value(self.model.optimizer.lr)
        self.lrs.append(lr)
        self.losses.append(logs["loss"])
        K.set_value(self.model.optimizer.lr, lr * self.factor) 
Example #13
Source File: callbacks.py    From EfficientDet with Apache License 2.0 5 votes vote down vote up
def on_train_begin(self, logs={}):
        K.set_value(self.model.optimizer.lr, self.min_lr) 
Example #14
Source File: callbacks.py    From EfficientDet with Apache License 2.0 5 votes vote down vote up
def on_batch_begin(self, batch, logs):
        if self.iteration_id < self.iterations:
            lr = (self.max_lr - self.min_lr) / self.iterations * (self.iteration_id + 1) + self.min_lr
            K.set_value(self.model.optimizer.lr, lr)
        self.iteration_id += 1
        self.lrs.append(K.get_value(self.model.optimizer.lr)) 
Example #15
Source File: callbacks.py    From EfficientDet with Apache License 2.0 5 votes vote down vote up
def on_train_begin(self, logs={}):
        self.max_lr = K.get_value(self.model.optimizer.lr)
        K.set_value(self.model.optimizer.lr, self.min_lr)
        self.lrs.append(K.get_value(self.model.optimizer.lr)) 
Example #16
Source File: model.py    From CVPR2019-DeepTreeLearningForZeroShotFaceAntispoofing with MIT License 4 votes vote down vote up
def train_one_step(self, data_batch, step, training):
        dtn = self.dtn
        dtn_op = self.dtn_op
        image, dmap, labels = data_batch
        with tf.GradientTape() as tape:
            dmap_pred, cls_pred, route_value, leaf_node_mask, tru_loss, mu_update, eigenvalue, trace =\
                dtn(image, labels, True)

            # supervised feature loss
            depth_map_loss = leaf_l1_loss(dmap_pred, tf.image.resize(dmap, [32, 32]), leaf_node_mask)
            class_loss = leaf_l1_loss(cls_pred, labels, leaf_node_mask)
            supervised_loss = depth_map_loss + 0.001*class_loss

            # unsupervised tree loss
            route_loss = tf.reduce_mean(tf.stack(tru_loss[0], axis=0) * [1., 0.5, 0.5, 0.25, 0.25, 0.25, 0.25])
            uniq_loss  = tf.reduce_mean(tf.stack(tru_loss[1], axis=0) * [1., 0.5, 0.5, 0.25, 0.25, 0.25, 0.25])
            eigenvalue = np.mean(np.stack(eigenvalue, axis=0) * [1., 0.5, 0.5, 0.25, 0.25, 0.25, 0.25])
            trace = np.mean(np.stack(trace, axis=0) * [1., 0.5, 0.5, 0.25, 0.25, 0.25, 0.25])
            unsupervised_loss = 2*route_loss + 0.001*uniq_loss

            # total loss
            if step > 10000:
                loss = supervised_loss + unsupervised_loss
            else:
                loss = supervised_loss

        if training:
            # back-propagate
            gradients = tape.gradient(loss, dtn.variables)
            dtn_op.apply_gradients(zip(gradients, dtn.variables))

            # Update mean values for each tree node
            mu_update_rate = self.config.TRU_PARAMETERS["mu_update_rate"]
            mu = [dtn.tru0.project.mu, dtn.tru1.project.mu, dtn.tru2.project.mu, dtn.tru3.project.mu,
                  dtn.tru4.project.mu, dtn.tru5.project.mu, dtn.tru6.project.mu]
            for mu, mu_of_visit in zip(mu, mu_update):
                if step == 0:
                    update_mu = mu_of_visit
                else:
                    update_mu = mu_of_visit * mu_update_rate + mu * (1 - mu_update_rate)
                K.set_value(mu, update_mu)

        # leaf counts
        spoof_counts = []
        for leaf in leaf_node_mask:
            spoof_count = tf.reduce_sum(leaf[:, 0]).numpy()
            spoof_counts.append(int(spoof_count))

        _to_plot = [image, dmap, dmap_pred[0]]

        return depth_map_loss, class_loss, route_loss, uniq_loss, spoof_counts, eigenvalue, trace, _to_plot