Python keras.backend.greater() Examples
The following are 17 code examples for showing how to use keras.backend.greater(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example 1
Project: snn_toolbox Author: NeuromorphicProcessorProject File: temporal_mean_rate_theano.py License: MIT License | 6 votes |
def set_reset_mem(self, mem, spikes): """ Reset membrane potential ``mem`` array where ``spikes`` array is nonzero. """ spike_idxs = k.T.nonzero(spikes) if (hasattr(self, 'activation_str') and self.activation_str == 'softmax'): new = mem.copy() # k.T.set_subtensor(mem[spike_idxs], 0.) elif self.config.get('cell', 'reset') == 'Reset by subtraction': if self.payloads: # Experimental. new = k.T.set_subtensor(mem[spike_idxs], 0.) else: pos_spike_idxs = k.T.nonzero(k.greater(spikes, 0)) neg_spike_idxs = k.T.nonzero(k.less(spikes, 0)) new = k.T.inc_subtensor(mem[pos_spike_idxs], -self.v_thresh) new = k.T.inc_subtensor(new[neg_spike_idxs], self.v_thresh) elif self.config.get('cell', 'reset') == 'Reset by modulo': new = k.T.set_subtensor(mem[spike_idxs], mem[spike_idxs] % self.v_thresh) else: # self.config.get('cell', 'reset') == 'Reset to zero': new = k.T.set_subtensor(mem[spike_idxs], 0.) self.add_update([(self.mem, new)])
Example 2
Project: cs-ranking Author: kiudee File: metrics.py License: Apache License 2.0 | 6 votes |
def zero_one_rank_loss(y_true, y_pred): y_true, y_pred = tensorify(y_true), tensorify(y_pred) mask = K.greater(y_true[:, None] - y_true[:, :, None], 0) # Count the number of mistakes (here position difference less than 0) mask2 = K.less(y_pred[:, None] - y_pred[:, :, None], 0) mask3 = K.equal(y_pred[:, None] - y_pred[:, :, None], 0) # Calculate Transpositions transpositions = tf.logical_and(mask, mask2) transpositions = K.sum(K.cast(transpositions, dtype="float32"), axis=[1, 2]) n_objects = K.max(y_true) + 1 transpositions += ( K.sum(K.cast(mask3, dtype="float32"), axis=[1, 2]) - n_objects ) / 4.0 denominator = K.cast((n_objects * (n_objects - 1.0)) / 2.0, dtype="float32") result = transpositions / denominator return K.mean(result)
Example 3
Project: cs-ranking Author: kiudee File: metrics.py License: Apache License 2.0 | 6 votes |
def zero_one_rank_loss_for_scores_ties(y_true, s_pred): y_true, s_pred = tensorify(y_true), tensorify(s_pred) n_objects = K.cast(K.max(y_true) + 1, dtype="float32") mask = K.greater(y_true[:, None] - y_true[:, :, None], 0) mask2 = K.greater(s_pred[:, None] - s_pred[:, :, None], 0) mask3 = K.equal(s_pred[:, None] - s_pred[:, :, None], 0) # Calculate Transpositions transpositions = tf.logical_and(mask, mask2) transpositions = K.sum(K.cast(transpositions, dtype="float32"), axis=[1, 2]) transpositions += ( K.sum(K.cast(mask3, dtype="float32"), axis=[1, 2]) - n_objects ) / 4.0 denominator = n_objects * (n_objects - 1.0) / 2.0 result = transpositions / denominator return K.mean(result)
Example 4
Project: icassp19 Author: edufonseca File: losses.py License: MIT License | 5 votes |
def crossentropy_max_wrap(_m): def crossentropy_max_core(y_true, y_pred): """ This function is based on the one proposed in Il-Young Jeong and Hyungui Lim, "AUDIO TAGGING SYSTEM FOR DCASE 2018: FOCUSING ON LABEL NOISE, DATA AUGMENTATION AND ITS EFFICIENT LEARNING", Tech Report, DCASE 2018 https://github.com/finejuly/dcase2018_task2_cochlearai :param y_true: :param y_pred: :return: """ # hyper param print(_m) y_pred = K.clip(y_pred, K.epsilon(), 1) # compute loss for every data point _loss = -K.sum(y_true * K.log(y_pred), axis=-1) # threshold t_m = K.max(_loss) * _m _mask_m = 1 - (K.cast(K.greater(_loss, t_m), 'float32')) _loss = _loss * _mask_m return _loss return crossentropy_max_core
Example 5
Project: icassp19 Author: edufonseca File: losses.py License: MIT License | 5 votes |
def crossentropy_outlier_wrap(_l): def crossentropy_outlier_core(y_true, y_pred): # hyper param print(_l) y_pred = K.clip(y_pred, K.epsilon(), 1) # compute loss for every data point _loss = -K.sum(y_true * K.log(y_pred), axis=-1) def _get_real_median(_v): """ given a tensor with shape (batch_size,), compute and return the median :param v: :return: """ _val = tf.nn.top_k(_v, 33).values return 0.5 * (_val[-1] + _val[-2]) _mean_loss, _var_loss = tf.nn.moments(_loss, axes=[0]) _median_loss = _get_real_median(_loss) _std_loss = tf.sqrt(_var_loss) # threshold t_l = _median_loss + _l*_std_loss _mask_l = 1 - (K.cast(K.greater(_loss, t_l), 'float32')) _loss = _loss * _mask_l return _loss return crossentropy_outlier_core ######################################################################### # from here on we distinguish data points in the batch, based on its origin # we only apply robustness measures to the data points coming from the noisy subset # Therefore, the next functions are used only when training with the entire train set #########################################################################
Example 6
Project: icassp19 Author: edufonseca File: losses.py License: MIT License | 5 votes |
def crossentropy_max_origin_wrap(_m): def crossentropy_max_origin_core(y_true, y_pred): # hyper param print(_m) # 1) determine the origin of the patch, as a boolean vector y_true_flag # (True = patch from noisy subset) _y_true_flag = K.greater(K.sum(y_true, axis=-1), 90) # 2) convert the input y_true (with flags inside) into a valid y_true one-hot-vector format # attenuating factor for data points that need it (those that came with a one-hot of 100) _mask_reduce = K.cast(_y_true_flag, 'float32') * 0.01 # identity factor for standard one-hot vectors _mask_keep = K.cast(K.equal(_y_true_flag, False), 'float32') # combine 2 masks _mask = _mask_reduce + _mask_keep _y_true_shape = K.shape(y_true) _mask = K.reshape(_mask, (_y_true_shape[0], 1)) # applying mask to have a valid y_true that we can use as always y_true = y_true * _mask y_true = K.clip(y_true, K.epsilon(), 1) y_pred = K.clip(y_pred, K.epsilon(), 1) # compute loss for every data point _loss = -K.sum(y_true * K.log(y_pred), axis=-1) # threshold m t_m = K.max(_loss) * _m _mask_m = 1 - (K.cast(K.greater(_loss, t_m), 'float32') * K.cast(_y_true_flag, 'float32')) _loss = _loss * _mask_m return _loss return crossentropy_max_origin_core
Example 7
Project: snn_toolbox Author: NeuromorphicProcessorProject File: temporal_mean_rate_theano.py License: MIT License | 5 votes |
def binary_sigmoid_activation(self, mem): """Binary sigmoid activation.""" return k.T.mul(k.greater(mem, 0), self.v_thresh)
Example 8
Project: snn_toolbox Author: NeuromorphicProcessorProject File: temporal_mean_rate_theano.py License: MIT License | 5 votes |
def binary_tanh_activation(self, mem): """Binary tanh activation.""" output_spikes = k.T.mul(k.greater(mem, 0), self.v_thresh) output_spikes += k.T.mul(k.less(mem, 0), -self.v_thresh) return output_spikes
Example 9
Project: snn_toolbox Author: NeuromorphicProcessorProject File: temporal_mean_rate_theano.py License: MIT License | 5 votes |
def get_new_mem(self): """Add input to membrane potential.""" # Destroy impulse if in refractory period masked_impulse = self.impulse if self.tau_refrac == 0 else \ k.T.set_subtensor( self.impulse[k.T.nonzero(self.refrac_until > self.time)], 0.) # Add impulse if clamp_var: # Experimental: Clamp the membrane potential to zero until the # presynaptic neurons fire at their steady-state rates. This helps # avoid a transient response. new_mem = theano.ifelse.ifelse( k.less(k.mean(self.var), 1e-4) + k.greater(self.time, self.duration / 2), self.mem + masked_impulse, self.mem) elif hasattr(self, 'clamp_idx'): # Set clamp-duration by a specific delay from layer to layer. new_mem = theano.ifelse.ifelse(k.less(self.time, self.clamp_idx), self.mem, self.mem + masked_impulse) elif v_clip: # Clip membrane potential to prevent too strong accumulation. new_mem = k.clip(self.mem + masked_impulse, -3, 3) else: new_mem = self.mem + masked_impulse if self.config.getboolean('cell', 'leak'): # Todo: Implement more flexible version of leak! new_mem = k.T.inc_subtensor( new_mem[k.T.nonzero(k.T.gt(new_mem, 0))], -0.1 * self.dt) return new_mem
Example 10
Project: snn_toolbox Author: NeuromorphicProcessorProject File: temporal_mean_rate_theano.py License: MIT License | 5 votes |
def get_new_thresh(self): """Get new threshhold.""" thr_min = self._v_thresh / 100 thr_max = self._v_thresh r_lim = 1 / self.dt return thr_min + (thr_max - thr_min) * self.max_spikerate / r_lim # return theano.ifelse.ifelse( # k.equal(self.time / self.dt % settings['timestep_fraction'], 0) * # k.greater(self.max_spikerate, settings['diff_to_min_rate']/1000)* # k.greater(1 / self.dt - self.max_spikerate, # settings['diff_to_max_rate'] / 1000), # self.max_spikerate, self.v_thresh)
Example 11
Project: cyclegan_keras Author: alecGraves File: losses.py License: The Unlicense | 5 votes |
def discriminator_loss(y_true, y_pred): loss = mean_squared_error(y_true, y_pred) is_large = k.greater(loss, k.constant(_disc_train_thresh)) # threshold is_large = k.cast(is_large, k.floatx()) return loss * is_large # binary threshold the loss to prevent overtraining the discriminator
Example 12
Project: onto-lstm Author: pdasigi File: preposition_predictors.py License: Apache License 2.0 | 5 votes |
def get_split_averages(input_tensor, input_mask, indices): # Splits input tensor into three parts based on the indices and # returns average of values prior to index, values at the index and # average of values after the index. # input_tensor: (batch_size, input_length, input_dim) # input_mask: (batch_size, input_length) # indices: (batch_size, 1) # (1, input_length) length_range = K.expand_dims(K.arange(K.shape(input_tensor)[1]), dim=0) # (batch_size, input_length) batched_range = K.repeat_elements(length_range, K.shape(input_tensor)[0], 0) tiled_indices = K.repeat_elements(indices, K.shape(input_tensor)[1], 1) # (batch_size, input_length) greater_mask = K.greater(batched_range, tiled_indices) # (batch_size, input_length) lesser_mask = K.lesser(batched_range, tiled_indices) # (batch_size, input_length) equal_mask = K.equal(batched_range, tiled_indices) # (batch_size, input_length) # We also need to mask these masks using the input mask. # (batch_size, input_length) if input_mask is not None: greater_mask = switch(input_mask, greater_mask, K.zeros_like(greater_mask)) lesser_mask = switch(input_mask, lesser_mask, K.zeros_like(lesser_mask)) post_sum = K.sum(switch(K.expand_dims(greater_mask), input_tensor, K.zeros_like(input_tensor)), axis=1) # (batch_size, input_dim) pre_sum = K.sum(switch(K.expand_dims(lesser_mask), input_tensor, K.zeros_like(input_tensor)), axis=1) # (batch_size, input_dim) values_at_indices = K.sum(switch(K.expand_dims(equal_mask), input_tensor, K.zeros_like(input_tensor)), axis=1) # (batch_size, input_dim) post_normalizer = K.expand_dims(K.sum(greater_mask, axis=1) + K.epsilon(), dim=1) # (batch_size, 1) pre_normalizer = K.expand_dims(K.sum(lesser_mask, axis=1) + K.epsilon(), dim=1) # (batch_size, 1) return K.cast(pre_sum / pre_normalizer, 'float32'), values_at_indices, K.cast(post_sum / post_normalizer, 'float32')
Example 13
Project: Benchmarks Author: ECP-CANDLE File: uq_keras_utils.py License: MIT License | 5 votes |
def acc_class1(y_true, y_pred): """ Function to estimate accuracy over the class 1 prediction. This estimation is global (i.e. abstaining samples are not removed) Parameters ---------- y_true : keras tensor True values to predict y_pred : keras tensor Prediction made by the model. It is assumed that this keras tensor includes extra columns to store the abstaining classes. """ # Find samples in ground truth belonging to class 1 ytrueint = K.argmax(y_true, axis=-1) # Compute total number of ground truth samples in class 1 total_true1 = K.sum(ytrueint) # Find samples in prediction belonging to class 1 ypredint = K.argmax(y_pred[:,:2], axis=-1) # Find correctly predicted class 1 samples true1_pred = K.sum(ytrueint*ypredint) # Compute accuracy in class 1 acc = true1_pred / total_true1 # Since there are so few samples in class 1 # it is possible that ground truth does not # have any sample in class 1, leading to a divide # by zero and not valid accuracy # Therefore, for the accuracy to be valid # total_true1 should be greater than zero # otherwise, return 0. condition = K.greater(total_true1, 0) return K.switch(condition, acc, K.zeros_like(acc, dtype=acc.dtype))
Example 14
Project: Benchmarks Author: ECP-CANDLE File: uq_keras_utils.py License: MIT License | 5 votes |
def abs_acc_class1(y_true, y_pred): """ Function to estimate accuracy over the class 1 prediction after removing the samples where the model is abstaining Parameters ---------- y_true : keras tensor True values to predict y_pred : keras tensor Prediction made by the model. It is assumed that this keras tensor includes extra columns to store the abstaining classes. """ # Find locations of true 1 prediction ytrueint = K.argmax(y_true, axis=-1) # Find locations that are predicted (not abstained) mask_pred = K.cast(K.not_equal(K.argmax(y_pred, axis=-1), nb_classes), 'int64') # Compute total number of ground truth samples in class 1 filtering abstaining predictions total_true1 = K.sum(ytrueint * mask_pred) # matching in original class 1 after removing abstention true1_pred = K.sum(mask_pred * ytrueint * K.cast(K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)), 'int64')) # Compute accuracy in class 1 acc = true1_pred / total_true1 # Since there are so few samples in class 1 # it is possible that ground truth does not # have any sample in class 1, leading to a divide # by zero and not valid accuracy # Therefore, for the accuracy to be valid # total_true1 should be greater than zero # otherwise, return 0. condition = K.greater(total_true1, 0) return K.switch(condition, acc, K.zeros_like(acc, dtype=acc.dtype))
Example 15
Project: icassp19 Author: edufonseca File: losses.py License: MIT License | 4 votes |
def crossentropy_reed_origin_wrap(_beta): def crossentropy_reed_origin_core(y_true, y_pred): # hyper param print(_beta) # 1) determine the origin of the patch, as a boolean vector in y_true_flag # (True = patch from noisy subset) _y_true_flag = K.greater(K.sum(y_true, axis=-1), 90) # 2) convert the input y_true (with flags inside) into a valid y_true one-hot-vector format # attenuating factor for data points that need it (those that came with a one-hot of 100) _mask_reduce = K.cast(_y_true_flag, 'float32') * 0.01 # identity factor for standard one-hot vectors _mask_keep = K.cast(K.equal(_y_true_flag, False), 'float32') # combine 2 masks _mask = _mask_reduce + _mask_keep _y_true_shape = K.shape(y_true) _mask = K.reshape(_mask, (_y_true_shape[0], 1)) # applying mask to have a valid y_true that we can use as always y_true = y_true * _mask y_true = K.clip(y_true, K.epsilon(), 1) y_pred = K.clip(y_pred, K.epsilon(), 1) # (1) dynamically update the targets based on the current state of the model: bootstrapped target tensor # use predicted class proba directly to generate regression targets y_true_bootstrapped = _beta * y_true + (1 - _beta) * y_pred # at this point we have 2 versions of y_true # decide which target label to use for each datapoint _mask_noisy = K.cast(_y_true_flag, 'float32') # only allows patches from noisy set _mask_clean = K.cast(K.equal(_y_true_flag, False), 'float32') # only allows patches from clean set _mask_noisy = K.reshape(_mask_noisy, (_y_true_shape[0], 1)) _mask_clean = K.reshape(_mask_clean, (_y_true_shape[0], 1)) # points coming from clean set use the standard true one-hot vector. dim is (batch_size, 1) # points coming from noisy set use the Reed bootstrapped target tensor y_true_final = y_true * _mask_clean + y_true_bootstrapped * _mask_noisy # (2) compute loss as always _loss = -K.sum(y_true_final * K.log(y_pred), axis=-1) return _loss return crossentropy_reed_origin_core
Example 16
Project: icassp19 Author: edufonseca File: losses.py License: MIT License | 4 votes |
def lq_loss_origin_wrap(_q): def lq_loss_origin_core(y_true, y_pred): # hyper param print(_q) # 1) determine the origin of the patch, as a boolean vector in y_true_flag # (True = patch from noisy subset) _y_true_flag = K.greater(K.sum(y_true, axis=-1), 90) # 2) convert the input y_true (with flags inside) into a valid y_true one-hot-vector format # attenuating factor for data points that need it (those that came with a one-hot of 100) _mask_reduce = K.cast(_y_true_flag, 'float32') * 0.01 # identity factor for standard one-hot vectors _mask_keep = K.cast(K.equal(_y_true_flag, False), 'float32') # combine 2 masks _mask = _mask_reduce + _mask_keep _y_true_shape = K.shape(y_true) _mask = K.reshape(_mask, (_y_true_shape[0], 1)) # applying mask to have a valid y_true that we can use as always y_true = y_true * _mask y_true = K.clip(y_true, K.epsilon(), 1) y_pred = K.clip(y_pred, K.epsilon(), 1) # compute two types of losses, for all the data points # (1) compute CCE loss for every data point _loss_CCE = -K.sum(y_true * K.log(y_pred), axis=-1) # (2) compute lq_loss for every data point _tmp = y_pred * y_true _loss_tmp = K.max(_tmp, axis=-1) # compute the Lq loss between the one-hot encoded label and the predictions _loss_q = (1 - (_loss_tmp + 10 ** (-8)) ** _q) / _q # decide which loss to take for each datapoint _mask_noisy = K.cast(_y_true_flag, 'float32') # only allows patches from noisy set _mask_clean = K.cast(K.equal(_y_true_flag, False), 'float32') # only allows patches from clean set # points coming from clean set contribute with CCE loss # points coming from noisy set contribute with lq_loss _loss_final = _loss_CCE * _mask_clean + _loss_q * _mask_noisy return _loss_final return lq_loss_origin_core
Example 17
Project: DeepLearningImplementations Author: tdeboissiere File: Eve.py License: MIT License | 4 votes |
def get_updates(self, params, loss): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] lr = self.lr if self.inital_decay > 0: lr *= (1. / (1. + self.decay * self.iterations)) t = self.iterations + 1 lr_t = lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t)) shapes = [K.get_variable_shape(p) for p in params] ms = [K.zeros(shape) for shape in shapes] vs = [K.zeros(shape) for shape in shapes] f = K.variable(0) d = K.variable(1) self.weights = [self.iterations] + ms + vs + [f, d] cond = K.greater(t, K.variable(1)) small_delta_t = K.switch(K.greater(loss, f), self.small_k + 1, 1. / (self.big_K + 1)) big_delta_t = K.switch(K.greater(loss, f), self.big_K + 1, 1. / (self.small_k + 1)) c_t = K.minimum(K.maximum(small_delta_t, loss / (f + self.epsilon)), big_delta_t) f_t = c_t * f r_t = K.abs(f_t - f) / (K.minimum(f_t, f)) d_t = self.beta_3 * d + (1 - self.beta_3) * r_t f_t = K.switch(cond, f_t, loss) d_t = K.switch(cond, d_t, K.variable(1.)) self.updates.append(K.update(f, f_t)) self.updates.append(K.update(d, d_t)) for p, g, m, v in zip(params, grads, ms, vs): m_t = (self.beta_1 * m) + (1. - self.beta_1) * g v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g) p_t = p - lr_t * m_t / (d_t * K.sqrt(v_t) + self.epsilon) self.updates.append(K.update(m, m_t)) self.updates.append(K.update(v, v_t)) new_p = p_t self.updates.append(K.update(p, new_p)) return self.updates