Python keras.backend.max() Examples
The following are 30 code examples for showing how to use keras.backend.max(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example 1
Project: object-detection Author: kaka-lin File: test_tiny_yolo.py License: MIT License | 19 votes |
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6): # Compute box scores box_scores = box_confidence * box_class_probs # Find the box_classes thanks to the max box_scores, keep track of the corresponding score box_classes = K.argmax(box_scores, axis=-1) box_class_scores = K.max(box_scores, axis=-1, keepdims=False) # Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold) filtering_mask = box_class_scores >= threshold # Apply the mask to scores, boxes and classes scores = tf.boolean_mask(box_class_scores, filtering_mask) boxes = tf.boolean_mask(boxes, filtering_mask) classes = tf.boolean_mask(box_classes, filtering_mask) return scores, boxes, classes
Example 2
Project: steppy-toolkit Author: minerva-ml File: contrib.py License: MIT License | 6 votes |
def call(self, x, mask=None): # computes a probability distribution over the timesteps # uses 'max trick' for numerical stability # reshape is done to avoid issue with Tensorflow # and 1-dimensional weights logits = K.dot(x, self.W) x_shape = K.shape(x) logits = K.reshape(logits, (x_shape[0], x_shape[1])) ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True)) # masked timesteps have zero weight if mask is not None: mask = K.cast(mask, K.floatx()) ai = ai * mask att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon()) weighted_input = x * K.expand_dims(att_weights) result = K.sum(weighted_input, axis=1) if self.return_attention: return [result, att_weights] return result
Example 3
Project: visual_turing_test-tutorial Author: mateuszmalinowski File: keras_extensions.py License: MIT License | 6 votes |
def time_distributed_nonzero_max_pooling(x): """ Computes maximum along the first (time) dimension. It ignores the mask m. In: x - input; a 3D tensor mask_value - value to mask out, if None then no masking; by default 0.0, """ import theano.tensor as T mask_value=0.0 x = T.switch(T.eq(x, mask_value), -numpy.inf, x) masked_max_x = x.max(axis=1) # replace infinities with mask_value masked_max_x = T.switch(T.eq(masked_max_x, -numpy.inf), 0, masked_max_x) return masked_max_x
Example 4
Project: visual_turing_test-tutorial Author: mateuszmalinowski File: keras_extensions.py License: MIT License | 6 votes |
def time_distributed_masked_max(x, m): """ Computes max along the first (time) dimension. In: x - input; a 3D tensor m - mask m_value - value for masking """ # place infinities where mask is off m_value = 0.0 tmp = K.switch(K.equal(m, 0.0), -numpy.inf, 0.0) x_with_inf = x + K.expand_dims(tmp) x_max = K.max(x_with_inf, axis=1) r = K.switch(K.equal(x_max, -numpy.inf), m_value, x_max) return r ## classes ## # Transforms existing layers to masked layers
Example 5
Project: blackbox-attacks Author: sunblaze-ucb File: attack_utils.py License: MIT License | 6 votes |
def gen_adv_loss(logits, y, loss='logloss', mean=False): """ Generate the loss function. """ if loss == 'training': # use the model's output instead of the true labels to avoid # label leaking at training time y = K.cast(K.equal(logits, K.max(logits, 1, keepdims=True)), "float32") y = y / K.sum(y, 1, keepdims=True) out = K.categorical_crossentropy(y, logits, from_logits=True) elif loss == 'logloss': out = K.categorical_crossentropy(y, logits, from_logits=True) else: raise ValueError("Unknown loss: {}".format(loss)) if mean: out = K.mean(out) # else: # out = K.sum(out) return out
Example 6
Project: blackbox-attacks Author: sunblaze-ucb File: attack_utils.py License: MIT License | 6 votes |
def gen_adv_loss(logits, y, loss='logloss', mean=False): """ Generate the loss function. """ if loss == 'training': # use the model's output instead of the true labels to avoid # label leaking at training time y = K.cast(K.equal(logits, K.max(logits, 1, keepdims=True)), "float32") y = y / K.sum(y, 1, keepdims=True) out = K.categorical_crossentropy(logits, y, from_logits=True) elif loss == 'logloss': # out = K.categorical_crossentropy(logits, y, from_logits=True) out = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y) out = tf.reduce_mean(out) else: raise ValueError("Unknown loss: {}".format(loss)) if mean: out = tf.mean(out) # else: # out = K.sum(out) return out
Example 7
Project: face_classification Author: oarriaga File: grad_cam.py License: MIT License | 6 votes |
def calculate_gradient_weighted_CAM(gradient_function, image): output, evaluated_gradients = gradient_function([image, False]) output, evaluated_gradients = output[0, :], evaluated_gradients[0, :, :, :] weights = np.mean(evaluated_gradients, axis=(0, 1)) CAM = np.ones(output.shape[0: 2], dtype=np.float32) for weight_arg, weight in enumerate(weights): CAM = CAM + (weight * output[:, :, weight_arg]) CAM = cv2.resize(CAM, (64, 64)) CAM = np.maximum(CAM, 0) heatmap = CAM / np.max(CAM) # Return to BGR [0..255] from the preprocessed image image = image[0, :] image = image - np.min(image) image = np.minimum(image, 255) CAM = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET) CAM = np.float32(CAM) + np.float32(image) CAM = 255 * CAM / np.max(CAM) return np.uint8(CAM), heatmap
Example 8
Project: object-detection Author: kaka-lin File: test_tiny_yolo.py License: MIT License | 6 votes |
def yolo_eval(yolo_outputs, image_shape=(720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5): # Retrieve outputs of the YOLO model (≈1 line) box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs # Convert boxes to be ready for filtering functions boxes = yolo_boxes_to_corners(box_xy, box_wh) # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line) scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, score_threshold) # Scale boxes back to original image shape. boxes = scale_boxes(boxes, image_shape) # boxes: [y1, x1, y2, x2] # Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line) scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes, iou_threshold) ### END CODE HERE ### return scores, boxes, classes
Example 9
Project: Emotion Author: petercunha File: grad_cam.py License: MIT License | 6 votes |
def calculate_gradient_weighted_CAM(gradient_function, image): output, evaluated_gradients = gradient_function([image, False]) output, evaluated_gradients = output[0, :], evaluated_gradients[0, :, :, :] weights = np.mean(evaluated_gradients, axis = (0, 1)) CAM = np.ones(output.shape[0 : 2], dtype=np.float32) for weight_arg, weight in enumerate(weights): CAM = CAM + (weight * output[:, :, weight_arg]) CAM = cv2.resize(CAM, (64, 64)) CAM = np.maximum(CAM, 0) heatmap = CAM / np.max(CAM) #Return to BGR [0..255] from the preprocessed image image = image[0, :] image = image - np.min(image) image = np.minimum(image, 255) CAM = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET) CAM = np.float32(CAM) + np.float32(image) CAM = 255 * CAM / np.max(CAM) return np.uint8(CAM), heatmap
Example 10
Project: Face-and-Emotion-Recognition Author: vjgpt File: grad_cam.py License: MIT License | 6 votes |
def calculate_gradient_weighted_CAM(gradient_function, image): output, evaluated_gradients = gradient_function([image, False]) output, evaluated_gradients = output[0, :], evaluated_gradients[0, :, :, :] weights = np.mean(evaluated_gradients, axis = (0, 1)) CAM = np.ones(output.shape[0 : 2], dtype=np.float32) for weight_arg, weight in enumerate(weights): CAM = CAM + (weight * output[:, :, weight_arg]) CAM = cv2.resize(CAM, (64, 64)) CAM = np.maximum(CAM, 0) heatmap = CAM / np.max(CAM) #Return to BGR [0..255] from the preprocessed image image = image[0, :] image = image - np.min(image) image = np.minimum(image, 255) CAM = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET) CAM = np.float32(CAM) + np.float32(image) CAM = 255 * CAM / np.max(CAM) return np.uint8(CAM), heatmap
Example 11
Project: icassp19 Author: edufonseca File: losses.py License: MIT License | 6 votes |
def lq_loss_wrap(_q): def lq_loss_core(y_true, y_pred): """ This loss function is proposed in: Zhilu Zhang and Mert R. Sabuncu, "Generalized Cross Entropy Loss for Training Deep Neural Networks with Noisy Labels", 2018 https://arxiv.org/pdf/1805.07836.pdf :param y_true: :param y_pred: :return: """ # hyper param print(_q) _tmp = y_pred * y_true _loss = K.max(_tmp, axis=-1) # compute the Lq loss between the one-hot encoded label and the prediction _loss = (1 - (_loss + 10 ** (-8)) ** _q) / _q return _loss return lq_loss_core
Example 12
Project: voxelmorph Author: voxelmorph File: models.py License: GNU General Public License v3.0 | 6 votes |
def _softmax(x, axis=-1, alpha=1): """ building on keras implementation, allow alpha parameter Softmax activation function. # Arguments x : Tensor. axis: Integer, axis along which the softmax normalization is applied. alpha: a value to multiply all x # Returns Tensor, output of softmax transformation. # Raises ValueError: In case `dim(x) == 1`. """ x = alpha * x ndim = K.ndim(x) if ndim == 2: return K.softmax(x) elif ndim > 2: e = K.exp(x - K.max(x, axis=axis, keepdims=True)) s = K.sum(e, axis=axis, keepdims=True) return e / s else: raise ValueError('Cannot apply softmax to a tensor that is 1D')
Example 13
Project: DeepMoji Author: bfelbo File: attlayer.py License: MIT License | 6 votes |
def call(self, x, mask=None): # computes a probability distribution over the timesteps # uses 'max trick' for numerical stability # reshape is done to avoid issue with Tensorflow # and 1-dimensional weights logits = K.dot(x, self.W) x_shape = K.shape(x) logits = K.reshape(logits, (x_shape[0], x_shape[1])) ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True)) # masked timesteps have zero weight if mask is not None: mask = K.cast(mask, K.floatx()) ai = ai * mask att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon()) weighted_input = x * K.expand_dims(att_weights) result = K.sum(weighted_input, axis=1) if self.return_attention: return [result, att_weights] return result
Example 14
Project: Keras-TextClassification Author: yongzhuo File: scale_dot_product_attention.py License: MIT License | 6 votes |
def call(self, inputs, mask=None, **kwargs): if isinstance(inputs, list): query, key, value = inputs else: query = key = value = inputs if isinstance(mask, list): mask = mask[1] feature_dim = K.shape(query)[-1] e = K.batch_dot(query, key, axes=2) / K.sqrt(K.cast(feature_dim, dtype=K.floatx())) e = K.exp(e - K.max(e, axis=-1, keepdims=True)) if self.history_only: query_len, key_len = K.shape(query)[1], K.shape(key)[1] indices = K.tile(K.expand_dims(K.arange(key_len), axis=0), [query_len, 1]) upper = K.expand_dims(K.arange(key_len), axis=-1) e *= K.expand_dims(K.cast(indices <= upper, K.floatx()), axis=0) if mask is not None: e *= K.cast(K.expand_dims(mask, axis=-2), K.floatx()) a = e / (K.sum(e, axis=-1, keepdims=True) + K.epsilon()) v = K.batch_dot(a, value) if self.return_attention: return [v, a] return v
Example 15
Project: Keras-TextClassification Author: yongzhuo File: graph.py License: MIT License | 6 votes |
def call(self, x, mask=None): # computes a probability distribution over the timesteps # uses 'max trick' for numerical stability # reshape is done to avoid issue with Tensorflow # and 1-dimensional weights logits = K.dot(x, self.W) x_shape = K.shape(x) logits = K.reshape(logits, (x_shape[0], x_shape[1])) ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True)) # masked timesteps have zero weight if mask is not None: mask = K.cast(mask, K.floatx()) ai = ai * mask att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon()) weighted_input = x * K.expand_dims(att_weights) result = K.sum(weighted_input, axis=1) if self.return_attention: return [result, att_weights] return result
Example 16
Project: stock-price-predict Author: kaka-lin File: seq2seq_attention_2.py License: MIT License | 6 votes |
def softmax(x, axis=1): """Softmax activation function. # Arguments x : Tensor. axis: Integer, axis along which the softmax normalization is applied. # Returns Tensor, output of softmax transformation. # Raises ValueError: In case `dim(x) == 1`. """ ndim = K.ndim(x) if ndim == 2: return K.softmax(x) elif ndim > 2: e = K.exp(x - K.max(x, axis=axis, keepdims=True)) s = K.sum(e, axis=axis, keepdims=True) return e / s else: raise ValueError('Cannot apply softmax to a tensor that is 1D')
Example 17
Project: stock-price-predict Author: kaka-lin File: seq2seq_attention.py License: MIT License | 6 votes |
def softmax(x, axis=1): """Softmax activation function. # Arguments x : Tensor. axis: Integer, axis along which the softmax normalization is applied. # Returns Tensor, output of softmax transformation. # Raises ValueError: In case `dim(x) == 1`. """ ndim = K.ndim(x) if ndim == 2: return K.softmax(x) elif ndim > 2: e = K.exp(x - K.max(x, axis=axis, keepdims=True)) s = K.sum(e, axis=axis, keepdims=True) return e / s else: raise ValueError('Cannot apply softmax to a tensor that is 1D')
Example 18
Project: qlearning4k Author: farizrahman4u File: memory.py License: MIT License | 6 votes |
def get_batch(self, model, batch_size, gamma=0.9): if self.fast: return self.get_batch_fast(model, batch_size, gamma) if len(self.memory) < batch_size: batch_size = len(self.memory) nb_actions = model.get_output_shape_at(0)[-1] samples = np.array(sample(self.memory, batch_size)) input_dim = np.prod(self.input_shape) S = samples[:, 0 : input_dim] a = samples[:, input_dim] r = samples[:, input_dim + 1] S_prime = samples[:, input_dim + 2 : 2 * input_dim + 2] game_over = samples[:, 2 * input_dim + 2] r = r.repeat(nb_actions).reshape((batch_size, nb_actions)) game_over = game_over.repeat(nb_actions).reshape((batch_size, nb_actions)) S = S.reshape((batch_size, ) + self.input_shape) S_prime = S_prime.reshape((batch_size, ) + self.input_shape) X = np.concatenate([S, S_prime], axis=0) Y = model.predict(X) Qsa = np.max(Y[batch_size:], axis=1).repeat(nb_actions).reshape((batch_size, nb_actions)) delta = np.zeros((batch_size, nb_actions)) a = np.cast['int'](a) delta[np.arange(batch_size), a] = 1 targets = (1 - delta) * Y[:batch_size] + delta * (r + gamma * (1 - game_over) * Qsa) return S, targets
Example 19
Project: qlearning4k Author: farizrahman4u File: memory.py License: MIT License | 6 votes |
def set_batch_function(self, model, input_shape, batch_size, nb_actions, gamma): input_dim = np.prod(input_shape) samples = K.placeholder(shape=(batch_size, input_dim * 2 + 3)) S = samples[:, 0 : input_dim] a = samples[:, input_dim] r = samples[:, input_dim + 1] S_prime = samples[:, input_dim + 2 : 2 * input_dim + 2] game_over = samples[:, 2 * input_dim + 2 : 2 * input_dim + 3] r = K.reshape(r, (batch_size, 1)) r = K.repeat(r, nb_actions) r = K.reshape(r, (batch_size, nb_actions)) game_over = K.repeat(game_over, nb_actions) game_over = K.reshape(game_over, (batch_size, nb_actions)) S = K.reshape(S, (batch_size, ) + input_shape) S_prime = K.reshape(S_prime, (batch_size, ) + input_shape) X = K.concatenate([S, S_prime], axis=0) Y = model(X) Qsa = K.max(Y[batch_size:], axis=1) Qsa = K.reshape(Qsa, (batch_size, 1)) Qsa = K.repeat(Qsa, nb_actions) Qsa = K.reshape(Qsa, (batch_size, nb_actions)) delta = K.reshape(self.one_hot(a, nb_actions), (batch_size, nb_actions)) targets = (1 - delta) * Y[:batch_size] + delta * (r + gamma * (1 - game_over) * Qsa) self.batch_function = K.function(inputs=[samples], outputs=[S, targets])
Example 20
Project: sam Author: marcellacornia File: models.py License: MIT License | 6 votes |
def kl_divergence(y_true, y_pred): max_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)), shape_r_out, axis=-1)), shape_c_out, axis=-1) y_pred /= max_y_pred sum_y_true = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.sum(K.sum(y_true, axis=2), axis=2)), shape_r_out, axis=-1)), shape_c_out, axis=-1) sum_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.sum(K.sum(y_pred, axis=2), axis=2)), shape_r_out, axis=-1)), shape_c_out, axis=-1) y_true /= (sum_y_true + K.epsilon()) y_pred /= (sum_y_pred + K.epsilon()) return 10 * K.sum(K.sum(y_true * K.log((y_true / (y_pred + K.epsilon())) + K.epsilon()), axis=-1), axis=-1) # Correlation Coefficient Loss
Example 21
Project: sam Author: marcellacornia File: models.py License: MIT License | 6 votes |
def nss(y_true, y_pred): max_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)), shape_r_out, axis=-1)), shape_c_out, axis=-1) y_pred /= max_y_pred y_pred_flatten = K.batch_flatten(y_pred) y_mean = K.mean(y_pred_flatten, axis=-1) y_mean = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_mean)), shape_r_out, axis=-1)), shape_c_out, axis=-1) y_std = K.std(y_pred_flatten, axis=-1) y_std = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_std)), shape_r_out, axis=-1)), shape_c_out, axis=-1) y_pred = (y_pred - y_mean) / (y_std + K.epsilon()) return -(K.sum(K.sum(y_true * y_pred, axis=2), axis=2) / K.sum(K.sum(y_true, axis=2), axis=2)) # Gaussian priors initialization
Example 22
Project: CapsNet Author: l11x0m7 File: capsule.py License: MIT License | 5 votes |
def softmax(x, axis=-1): """ Self-defined softmax function """ x = K.exp(x - K.max(x, axis=axis, keepdims=True)) x /= K.sum(x, axis=axis, keepdims=True) return x
Example 23
Project: blackbox-attacks Author: sunblaze-ucb File: attack_utils.py License: MIT License | 5 votes |
def linf_loss(X1, X2): return np.max(np.abs(X1 - X2), axis=(1, 2, 3))
Example 24
Project: blackbox-attacks Author: sunblaze-ucb File: attack_utils.py License: MIT License | 5 votes |
def linf_loss(X1, X2): return np.max(np.abs(X1 - X2), axis=(1, 2, 3))
Example 25
Project: PiCamNN Author: PiSimo File: keras_yolo.py License: MIT License | 5 votes |
def yolo_filter_boxes(boxes, box_confidence, box_class_probs, threshold=.6): """Filter YOLO boxes based on object and class confidence.""" box_scores = box_confidence * box_class_probs box_classes = K.argmax(box_scores, axis=-1) box_class_scores = K.max(box_scores, axis=-1) prediction_mask = box_class_scores >= threshold # TODO: Expose tf.boolean_mask to Keras backend? boxes = tf.boolean_mask(boxes, prediction_mask) scores = tf.boolean_mask(box_class_scores, prediction_mask) classes = tf.boolean_mask(box_classes, prediction_mask) return boxes, scores, classes
Example 26
Project: DeepLearn Author: GauravBh1010tt File: p3_cnn.py License: MIT License | 5 votes |
def max_1d(X): return K.max(X, axis=1)
Example 27
Project: DeepLearn Author: GauravBh1010tt File: p3_lstm.py License: MIT License | 5 votes |
def max_1d(X): return K.max(X, axis=1)
Example 28
Project: DeepLearn Author: GauravBh1010tt File: fnc_libs.py License: MIT License | 5 votes |
def max_1d(X): return K.max(X, axis=1)
Example 29
Project: face_classification Author: oarriaga File: grad_cam.py License: MIT License | 5 votes |
def compile_saliency_function(model, activation_layer='conv2d_7'): input_image = model.input layer_output = model.get_layer(activation_layer).output max_output = K.max(layer_output, axis=3) saliency = K.gradients(K.sum(max_output), input_image)[0] return K.function([input_image, K.learning_phase()], [saliency])
Example 30
Project: autopool Author: marl File: autopool.py License: MIT License | 5 votes |
def call(self, x, mask=None): scaled = self.kernel * x max_val = K.max(scaled, axis=self.axis, keepdims=True) softmax = K.exp(scaled - max_val) weights = softmax / K.sum(softmax, axis=self.axis, keepdims=True) return K.sum(x * weights, axis=self.axis, keepdims=False)