Python keras.backend.max() Examples

The following are code examples for showing how to use keras.backend.max(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: 360_aware_saliency   Author: MikhailStartsev   File: models.py    GNU General Public License v3.0 7 votes vote down vote up
def nss(y_true, y_pred):
    max_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    y_pred /= max_y_pred
    y_pred_flatten = K.batch_flatten(y_pred)

    y_mean = K.mean(y_pred_flatten, axis=-1)
    y_mean = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_mean)), 
                                                               shape_r_out, axis=-1)), shape_c_out, axis=-1)

    y_std = K.std(y_pred_flatten, axis=-1)
    y_std = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_std)), 
                                                              shape_r_out, axis=-1)), shape_c_out, axis=-1)

    y_pred = (y_pred - y_mean) / (y_std + K.epsilon())

    return -(K.sum(K.sum(y_true * y_pred, axis=2), axis=2) / K.sum(K.sum(y_true, axis=2), axis=2))


# Gaussian priors initialization 
Example 2
Project: visual_turing_test-tutorial   Author: mateuszmalinowski   File: keras_extensions.py    MIT License 6 votes vote down vote up
def time_distributed_nonzero_max_pooling(x):
    """
    Computes maximum along the first (time) dimension.
    It ignores the mask m.

    In:
        x - input; a 3D tensor
        mask_value - value to mask out, if None then no masking; 
            by default 0.0, 
    """

    import theano.tensor as T

    mask_value=0.0
    x = T.switch(T.eq(x, mask_value), -numpy.inf, x)
    masked_max_x = x.max(axis=1)
    # replace infinities with mask_value
    masked_max_x = T.switch(T.eq(masked_max_x, -numpy.inf), 0, masked_max_x)
    return masked_max_x 
Example 3
Project: visual_turing_test-tutorial   Author: mateuszmalinowski   File: keras_extensions.py    MIT License 6 votes vote down vote up
def time_distributed_masked_max(x, m):
    """
    Computes max along the first (time) dimension.

    In:
        x - input; a 3D tensor
        m - mask
        m_value - value for masking
    """
    # place infinities where mask is off
    m_value = 0.0
    tmp = K.switch(K.equal(m, 0.0), -numpy.inf, 0.0)
    x_with_inf = x + K.expand_dims(tmp)
    x_max = K.max(x_with_inf, axis=1) 
    r = K.switch(K.equal(x_max, -numpy.inf), m_value, x_max)
    return r 


## classes  ##

# Transforms existing layers to masked layers 
Example 4
Project: blackbox-attacks   Author: sunblaze-ucb   File: attack_utils.py    MIT License 6 votes vote down vote up
def gen_adv_loss(logits, y, loss='logloss', mean=False):
    """
    Generate the loss function.
    """

    if loss == 'training':
        # use the model's output instead of the true labels to avoid
        # label leaking at training time
        y = K.cast(K.equal(logits, K.max(logits, 1, keepdims=True)), "float32")
        y = y / K.sum(y, 1, keepdims=True)
        out = K.categorical_crossentropy(y, logits, from_logits=True)
    elif loss == 'logloss':
        out = K.categorical_crossentropy(y, logits, from_logits=True)
    else:
        raise ValueError("Unknown loss: {}".format(loss))

    if mean:
        out = K.mean(out)
    # else:
    #     out = K.sum(out)
    return out 
Example 5
Project: blackbox-attacks   Author: sunblaze-ucb   File: attack_utils.py    MIT License 6 votes vote down vote up
def gen_adv_loss(logits, y, loss='logloss', mean=False):
    """
    Generate the loss function.
    """

    if loss == 'training':
        # use the model's output instead of the true labels to avoid
        # label leaking at training time
        y = K.cast(K.equal(logits, K.max(logits, 1, keepdims=True)), "float32")
        y = y / K.sum(y, 1, keepdims=True)
        out = K.categorical_crossentropy(logits, y, from_logits=True)
    elif loss == 'logloss':
        # out = K.categorical_crossentropy(logits, y, from_logits=True)
        out = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)
        out = tf.reduce_mean(out)
    else:
        raise ValueError("Unknown loss: {}".format(loss))

    if mean:
        out = tf.mean(out)
    # else:
    #     out = K.sum(out)
    return out 
Example 6
Project: cbc_networks   Author: saralajew   File: reasoning_layers.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def softmax(tensors, axis=-1):
    """Implementation of softmax with maximum stabilization and multiple
    axis support.

    # Arguments
        tensors: Input tensor.
        axis: An integer or tuple/list of integers, specifying the
            axis for the normalization

    # Input shape
        tensor with arbitrary shape

    # Output shape
        tensor with the same shape as the input tensor
    """
    with K.name_scope('softmax'):
        tensors = tensors - K.max(tensors, axis=axis, keepdims=True)
        exp = K.exp(tensors)
        return exp / K.sum(exp, axis=axis, keepdims=True) 
Example 7
Project: 360_aware_saliency   Author: MikhailStartsev   File: models.py    GNU General Public License v3.0 6 votes vote down vote up
def kl_divergence(y_true, y_pred):
    max_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    y_pred /= max_y_pred

    sum_y_true = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.sum(K.sum(y_true, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    sum_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.sum(K.sum(y_pred, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    y_true /= (sum_y_true + K.epsilon())
    y_pred /= (sum_y_pred + K.epsilon())

    return 10 * K.sum(K.sum(y_true * K.log((y_true / (y_pred + K.epsilon())) + K.epsilon()), axis=-1), axis=-1)


# Correlation Coefficient Loss 
Example 8
Project: GlyphsScripts   Author: simoncozens   File: Autokern.py    MIT License 6 votes vote down vote up
def w_categorical_crossentropy(self, y_true, y_pred):
    nb_cl = len(self.weights)
    final_mask = K.zeros_like(y_pred[..., 0])
    y_pred_max = K.max(y_pred, axis=-1)
    y_pred_max = K.expand_dims(y_pred_max, axis=-1)
    y_pred_max_mat = K.equal(y_pred, y_pred_max)
    for c_p, c_t in itertools.product(range(nb_cl), range(nb_cl)):
        w = K.cast(self.weights[c_t, c_p], K.floatx())
        y_p = K.cast(y_pred_max_mat[..., c_p], K.floatx())
        y_t = K.cast(y_true[..., c_t], K.floatx())
        final_mask += w * y_p * y_t
    return K.categorical_crossentropy(y_pred, y_true) * final_mask 
Example 9
Project: GlyphsScripts   Author: simoncozens   File: Autokern.py    MIT License 6 votes vote down vote up
def go(self):
    self.w.open()
    with open(filename, 'wb') as f:
      response = requests.get(url, stream=True)
      total = response.headers.get('content-length')

      if total is None:
          f.write(response.content)
      else:
          downloaded = 0
          total = int(total)
          for data in response.iter_content(chunk_size=max(int(total/1000), 1024*1024)):
              downloaded += len(data)
              f.write(data)
              done = 100*downloaded/total
              self.w.progressBar.set( done )
    if done < total:
      self.w.text_anchorL.set("Download failed :(")
      os.remove(filename)
    else:
      self.w.close()
      self.next.go() 
Example 10
Project: keras_nade   Author: jgrnt   File: utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def mog_layer(input_layer, previous_layer, n_mixtures, regularize_activations=False):
    if regularize_activations:
        activity_regularizer = ExpConstraint(-2, 10)
    else:
        activity_regularizer = None
    n_outputs = K.int_shape(input_layer)[1]
    mu = Reshape((n_outputs, n_mixtures))(
        Dense(n_outputs * n_mixtures, name="mog_mu")(
            previous_layer))
    sigma = Reshape((n_outputs, n_mixtures))(
        Lambda(lambda x: K.exp(x), output_shape=(n_outputs * n_mixtures,))(
            Dense(n_outputs * n_mixtures, name="mog_sigma",
                  activity_regularizer=activity_regularizer,
                  bias_initializer=keras.initializers.Ones())(
                previous_layer)))
    # Implement softmax here as it has to work on n_mixtures
    temp_alpha = Lambda(lambda x: K.exp(x - K.max(x, axis=2, keepdims=True)), output_shape=(n_outputs, n_mixtures))(
        Reshape((n_outputs, n_mixtures))(Dense(n_outputs * n_mixtures, name="mog_alpha",
                                               activity_regularizer=activity_regularizer,
                                               bias_initializer=keras.initializers.Zeros())(
            previous_layer)))
    alpha = Lambda(lambda x: x / K.expand_dims(K.sum(x, axis=2), 2), output_shape=(n_outputs, n_mixtures))(temp_alpha)
    output = Lambda(_merge_mog, output_shape=(n_outputs,))([input_layer, mu, sigma, alpha])
    return output 
Example 11
Project: face_classification   Author: oarriaga   File: grad_cam.py    MIT License 6 votes vote down vote up
def calculate_gradient_weighted_CAM(gradient_function, image):
    output, evaluated_gradients = gradient_function([image, False])
    output, evaluated_gradients = output[0, :], evaluated_gradients[0, :, :, :]
    weights = np.mean(evaluated_gradients, axis=(0, 1))
    CAM = np.ones(output.shape[0: 2], dtype=np.float32)
    for weight_arg, weight in enumerate(weights):
        CAM = CAM + (weight * output[:, :, weight_arg])
    CAM = cv2.resize(CAM, (64, 64))
    CAM = np.maximum(CAM, 0)
    heatmap = CAM / np.max(CAM)

    # Return to BGR [0..255] from the preprocessed image
    image = image[0, :]
    image = image - np.min(image)
    image = np.minimum(image, 255)

    CAM = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
    CAM = np.float32(CAM) + np.float32(image)
    CAM = 255 * CAM / np.max(CAM)
    return np.uint8(CAM), heatmap 
Example 12
Project: object-detection   Author: kaka-lin   File: test_tiny_yolo.py    MIT License 6 votes vote down vote up
def yolo_eval(yolo_outputs, image_shape=(720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):    
    # Retrieve outputs of the YOLO model (≈1 line)
    box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs

    # Convert boxes to be ready for filtering functions 
    boxes = yolo_boxes_to_corners(box_xy, box_wh)

    # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
    scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, score_threshold)
    
    # Scale boxes back to original image shape.
    boxes = scale_boxes(boxes, image_shape) # boxes: [y1, x1, y2, x2]

    # Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line)
    scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes, iou_threshold)
    
    ### END CODE HERE ###
    
    return scores, boxes, classes 
Example 13
Project: object-detection   Author: kaka-lin   File: test_tiny_yolo.py    MIT License 6 votes vote down vote up
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):    
    # Compute box scores
    box_scores = box_confidence * box_class_probs
    
    # Find the box_classes thanks to the max box_scores, keep track of the corresponding score
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1, keepdims=False)
    
    # Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
    # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
    filtering_mask = box_class_scores >= threshold
    
    # Apply the mask to scores, boxes and classes
    scores = tf.boolean_mask(box_class_scores, filtering_mask)
    boxes = tf.boolean_mask(boxes, filtering_mask)
    classes = tf.boolean_mask(box_classes, filtering_mask)
    
    return scores, boxes, classes 
Example 14
Project: deepGroupv2   Author: albu5   File: networks.py    MIT License 6 votes vote down vote up
def min_max_group(y_true, y_pred):
    diag = tf.eye(num_rows=tf.shape(y_true)[2], batch_shape=kb.expand_dims(tf.shape(y_true)[0], axis=0))

    in_frame_row = kb.max(y_true, axis=1, keepdims=True)

    in_frame_col = kb.max(y_true, axis=2, keepdims=True)

    mask = kb.batch_dot(in_frame_col, in_frame_row, axes=(2, 1))

    intra_max = kb.max(y_pred + y_true + mask - diag - 2, axis=2)

    intra_min = kb.min(y_pred - y_true - mask + diag + 2, axis=2)

    inter_max = kb.max(y_pred - y_true + mask - 1, axis=2)

    return (kb.sum(inter_max - intra_max, axis=-1) + kb.epsilon()) / (kb.sum(in_frame_row, axis=-1) + kb.epsilon()) 
Example 15
Project: Emotion   Author: petercunha   File: grad_cam.py    MIT License 6 votes vote down vote up
def calculate_gradient_weighted_CAM(gradient_function, image):
    output, evaluated_gradients = gradient_function([image, False])
    output, evaluated_gradients = output[0, :], evaluated_gradients[0, :, :, :]
    weights = np.mean(evaluated_gradients, axis = (0, 1))
    CAM = np.ones(output.shape[0 : 2], dtype=np.float32)
    for weight_arg, weight in enumerate(weights):
        CAM = CAM + (weight * output[:, :, weight_arg])
    CAM = cv2.resize(CAM, (64, 64))
    CAM = np.maximum(CAM, 0)
    heatmap = CAM / np.max(CAM)

    #Return to BGR [0..255] from the preprocessed image
    image = image[0, :]
    image = image - np.min(image)
    image = np.minimum(image, 255)

    CAM = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
    CAM = np.float32(CAM) + np.float32(image)
    CAM = 255 * CAM / np.max(CAM)
    return np.uint8(CAM), heatmap 
Example 16
Project: isic2018-skin-lesion-classifier-tensorflow   Author: abhishekrana   File: w_categorical_crossentropy.py    Apache License 2.0 6 votes vote down vote up
def w_categorical_crossentropy(y_true, y_pred, weights):
    print(y_true)
    nb_cl = len(weights)
    final_mask = K.zeros_like(y_pred[:, 0])
    y_pred_max = K.max(y_pred, axis=1)
    y_pred_max = K.reshape(y_pred_max, (K.shape(y_pred)[0], 1))
    y_pred_max_mat = K.equal(y_pred, y_pred_max)
    for c_p, c_t in product(range(nb_cl), range(nb_cl)):
        final_mask += (weights[c_t, c_p] * y_pred_max_mat[:, c_p] * y_true[:, c_t])
    return K.categorical_crossentropy(y_pred, y_true) * final_mask 
Example 17
Project: Face-and-Emotion-Recognition   Author: vjgpt   File: grad_cam.py    MIT License 6 votes vote down vote up
def calculate_gradient_weighted_CAM(gradient_function, image):
    output, evaluated_gradients = gradient_function([image, False])
    output, evaluated_gradients = output[0, :], evaluated_gradients[0, :, :, :]
    weights = np.mean(evaluated_gradients, axis = (0, 1))
    CAM = np.ones(output.shape[0 : 2], dtype=np.float32)
    for weight_arg, weight in enumerate(weights):
        CAM = CAM + (weight * output[:, :, weight_arg])
    CAM = cv2.resize(CAM, (64, 64))
    CAM = np.maximum(CAM, 0)
    heatmap = CAM / np.max(CAM)

    #Return to BGR [0..255] from the preprocessed image
    image = image[0, :]
    image = image - np.min(image)
    image = np.minimum(image, 255)

    CAM = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
    CAM = np.float32(CAM) + np.float32(image)
    CAM = 255 * CAM / np.max(CAM)
    return np.uint8(CAM), heatmap 
Example 18
Project: VisualNN   Author: angelhunt   File: cifar10_cnn_capsule.py    GNU General Public License v3.0 5 votes vote down vote up
def softmax(x, axis=-1):
    ex = K.exp(x - K.max(x, axis=axis, keepdims=True))
    return ex / K.sum(ex, axis=axis, keepdims=True)


# define the margin loss like hinge loss 
Example 19
Project: AI_Competition   Author: Decalogue   File: attention.py    MIT License 5 votes vote down vote up
def call(self, inputs, mask=None):
        en = inputs[0]
        de = inputs[1]
        de_shape = K.int_shape(de)
        step_dim = de_shape[1]

        hid_en = K.dot(en, self.W_en1)
        hid_de = K.dot(de, self.W_en2)
        if self.bias:
            hid_en += self.b_en1
            hid_de += self.b_en2
        hid = K.tanh(K.expand_dims(hid_en, axis=1) + hid_de)
        eij = K.reshape(K.dot(hid, K.reshape(self.W_de, (self.hid_size, 1))), (-1, step_dim))
        if self.bias:
            eij += self.b_de[:step_dim]

        a = K.exp(eij - K.max(eij, axis=-1, keepdims=True))

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask[1], K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = de * a
        return K.sum(weighted_input, axis=1) 
Example 20
Project: CapsNet   Author: l11x0m7   File: capsule.py    MIT License 5 votes vote down vote up
def softmax(x, axis=-1):
    """
    Self-defined softmax function
    """
    x = K.exp(x - K.max(x, axis=axis, keepdims=True))
    x /= K.sum(x, axis=axis, keepdims=True)
    return x 
Example 21
Project: blackbox-attacks   Author: sunblaze-ucb   File: attack_utils.py    MIT License 5 votes vote down vote up
def linf_loss(X1, X2):
    return np.max(np.abs(X1 - X2), axis=(1, 2, 3)) 
Example 22
Project: blackbox-attacks   Author: sunblaze-ucb   File: attack_utils.py    MIT License 5 votes vote down vote up
def linf_loss(X1, X2):
    return np.max(np.abs(X1 - X2), axis=(1, 2, 3)) 
Example 23
Project: PiCamNN   Author: PiSimo   File: keras_yolo.py    MIT License 5 votes vote down vote up
def yolo_filter_boxes(boxes, box_confidence, box_class_probs, threshold=.6):
    """Filter YOLO boxes based on object and class confidence."""
    box_scores = box_confidence * box_class_probs
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1)
    prediction_mask = box_class_scores >= threshold

    # TODO: Expose tf.boolean_mask to Keras backend?
    boxes = tf.boolean_mask(boxes, prediction_mask)
    scores = tf.boolean_mask(box_class_scores, prediction_mask)
    classes = tf.boolean_mask(box_classes, prediction_mask)
    return boxes, scores, classes 
Example 24
Project: trVAE   Author: theislab   File: _losses.py    MIT License 5 votes vote down vote up
def kl_recon(mu, log_var, alpha=0.1, eta=1.0):
    def kl_recon_loss(y_true, y_pred):
        kl_loss = 0.5 * K.mean(K.exp(log_var) + K.square(mu) - 1. - log_var, 1)
        y_true_min, y_true_max = K.min(y_true), K.max(y_true)
        recon_loss = K.switch(K.equal(y_true_min, y_true_max),
                              then_expression=lambda: 0.5 * K.sum(K.zeros_like(y_true), axis=1),
                              else_expression=lambda: 0.5 * K.sum(K.square((y_true - y_pred)), axis=1)
                              )
        return _nan2inf(eta * recon_loss + alpha * kl_loss)

    return kl_recon_loss 
Example 25
Project: cbc_networks   Author: saralajew   File: losses.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __call__(self, y_true, y_pred):
        dp = K.sum(y_true * y_pred, axis=-1)
        dm = K.max(y_pred - y_true, axis=-1)
        return K.relu(dm - dp + self.margin) 
Example 26
Project: cbc_networks   Author: saralajew   File: losses.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def elu_loss(y_true, y_pred):
    """ELU loss.

    This loss is the probability gap activated by the ELU activation.

    # Arguments
        y_true: tensor of true targets.
        y_pred: tensor of predicted targets.

    # Returns
        Tensor with one scalar loss entry per sample.
    """
    dp = K.sum(y_true * y_pred, axis=-1)
    dm = K.max(y_pred - y_true, axis=-1)
    return K.elu(dm - dp) 
Example 27
Project: Keras_MedicalImgAI   Author: taoyilee   File: grad_cam.py    MIT License 5 votes vote down vote up
def compile_saliency_function(model, activation_layer='block5_conv3'):
    input_img = model.input
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
    layer_output = layer_dict[activation_layer].output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_img)[0]
    return K.function([input_img, K.learning_phase()], [saliency]) 
Example 28
Project: Keras_MedicalImgAI   Author: taoyilee   File: grad_cam.py    MIT License 5 votes vote down vote up
def grad_cam(input_model, model_x, orig_x, category_index, layer_name, class_names):
    output = input_model.output

    final_layer = Lambda(lambda x: target_category_loss(x, category_index, len(class_names)))
    output = final_layer(output)
    model = Model(inputs=input_model.input, outputs=output)
    loss = K.sum(model.layers[-1].output)
    conv_output = model.get_layer(layer_name).output
    grads = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input, K.learning_phase()], [conv_output, grads])

    output, grads_val = gradient_function([model_x, 0])
    output, grads_val = output[0, :], grads_val[0, :, :, :]

    weights = np.mean(grads_val, axis=(0, 1))
    cam = np.zeros(output.shape[0: 2], dtype=np.float32)

    for i, w in enumerate(weights):
        cam += w * output[:, :, i]

    cam = np.maximum(cam, np.zeros(output.shape[0: 2], dtype=np.float32))
    cam = cam.squeeze()
    cam = cv2.applyColorMap(np.uint8(255 * cam / np.max(cam)), cv2.COLORMAP_JET)
    cam = cv2.resize(cam, (np.shape(orig_x)[0], np.shape(orig_x)[1]))
    cam = 0.4 * cam + 0.6 * orig_x
    return np.uint8(cam) 
Example 29
Project: 360_aware_saliency   Author: MikhailStartsev   File: gaussian_prior.py    GNU General Public License v3.0 5 votes vote down vote up
def call(self, x, mask=None):
        mu_x = self.W[:self.nb_gaussian]
        mu_y = self.W[self.nb_gaussian:self.nb_gaussian*2]
        sigma_x = self.W[self.nb_gaussian*2:self.nb_gaussian*3]
        sigma_y = self.W[self.nb_gaussian*3:]

        self.b_s = x.shape[0]
        self.height = x.shape[2]
        self.width = x.shape[3]

        e = self.height / self.width
        e1 = (1 - e) / 2
        e2 = e1 + e

        mu_x = K.clip(mu_x, 0.25, 0.75)
        mu_y = K.clip(mu_y, 0.35, 0.65)

        sigma_x = K.clip(sigma_x, 0.1, 0.9)
        sigma_y = K.clip(sigma_y, 0.2, 0.8)

        x_t = T.dot(T.ones((self.height, 1)), self._linspace(0, 1.0, self.width).dimshuffle('x', 0))
        y_t = T.dot(self._linspace(e1, e2, self.height).dimshuffle(0, 'x'), T.ones((1, self.width)))

        x_t = K.repeat_elements(K.expand_dims(x_t, dim=-1), self.nb_gaussian, axis=-1)
        y_t = K.repeat_elements(K.expand_dims(y_t, dim=-1), self.nb_gaussian, axis=-1)

        gaussian = 1 / (2 * np.pi * sigma_x * sigma_y + K.epsilon()) * \
                   T.exp(-((x_t - mu_x) ** 2 / (2 * sigma_x ** 2 + K.epsilon()) +
                           (y_t - mu_y) ** 2 / (2 * sigma_y ** 2 + K.epsilon())))

        gaussian = K.permute_dimensions(gaussian, (2, 0, 1))
        max_gauss = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(gaussian, axis=1), axis=1)), self.height, axis=-1)), self.width, axis=-1)
        gaussian = gaussian / max_gauss

        output = K.repeat_elements(K.expand_dims(gaussian, dim=0), self.b_s, axis=0)

        return output 
Example 30
Project: 360_aware_saliency   Author: MikhailStartsev   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def correlation_coefficient(y_true, y_pred):
    max_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    y_pred /= max_y_pred

    sum_y_true = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.sum(K.sum(y_true, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    sum_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.sum(K.sum(y_pred, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)

    y_true /= (sum_y_true + K.epsilon())
    y_pred /= (sum_y_pred + K.epsilon())

    N = shape_r_out * shape_c_out
    sum_prod = K.sum(K.sum(y_true * y_pred, axis=2), axis=2)
    sum_x = K.sum(K.sum(y_true, axis=2), axis=2)
    sum_y = K.sum(K.sum(y_pred, axis=2), axis=2)
    sum_x_square = K.sum(K.sum(K.square(y_true), axis=2), axis=2)
    sum_y_square = K.sum(K.sum(K.square(y_pred), axis=2), axis=2)

    num = sum_prod - ((sum_x * sum_y) / N)
    den = K.sqrt((sum_x_square - K.square(sum_x) / N) * (sum_y_square - K.square(sum_y) / N))

    return -2 * num / den


# Normalized Scanpath Saliency Loss 
Example 31
Project: DisplaceNet   Author: GKalliatakis   File: generic_utils.py    MIT License 5 votes vote down vote up
def plot_confusion_matrix(cm, classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        print("Normalized confusion matrix")
    else:
        print('Confusion matrix, without normalization')

    print(cm)

    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    fmt = '.2f' if normalize else 'd'
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, format(cm[i, j], fmt),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')


# Print iterations progress
# reference https://gist.github.com/aubricus/f91fb55dc6ba5557fbab06119420dd6a 
Example 32
Project: DisplaceNet   Author: GKalliatakis   File: generic_utils.py    MIT License 5 votes vote down vote up
def weighted_euclidean_distance(self, y_true, y_pred):
        nb_cl = len(self.weights)
        final_mask = K.zeros_like(y_pred[..., 0])
        y_pred_max = K.max(y_pred, axis=-1)
        y_pred_max = K.expand_dims(y_pred_max, axis=-1)
        y_pred_max_mat = K.equal(y_pred, y_pred_max)
        for c_p, c_t in itertools.product(range(nb_cl), range(nb_cl)):
            w = K.cast(self.weights[c_t, c_p], K.floatx())
            y_p = K.cast(y_pred_max_mat[..., c_p], K.floatx())
            y_t = K.cast(y_pred_max_mat[..., c_t], K.floatx())
            final_mask += w * y_p * y_t
        return K.sqrt(K.sum(K.square(y_pred - y_true), axis=-1)) * final_mask 
Example 33
Project: DisplaceNet   Author: GKalliatakis   File: generic_utils.py    MIT License 5 votes vote down vote up
def weighted_binary_crossentropy(self, y_true, y_pred):
        nb_cl = len(self.weights)
        final_mask = K.zeros_like(y_pred[..., 0])
        y_pred_max = K.max(y_pred, axis=-1)
        y_pred_max = K.expand_dims(y_pred_max, axis=-1)
        y_pred_max_mat = K.equal(y_pred, y_pred_max)
        for c_p, c_t in itertools.product(range(nb_cl), range(nb_cl)):
            w = K.cast(self.weights[c_t, c_p], K.floatx())
            y_p = K.cast(y_pred_max_mat[..., c_p], K.floatx())
            y_t = K.cast(y_pred_max_mat[..., c_t], K.floatx())
            final_mask += w * y_p * y_t
        return K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1) * final_mask 
Example 34
Project: dialectal_arabic_segmenter   Author: qcri   File: ChainCRF.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def logsumexp(x, axis=None):
        '''Returns `log(sum(exp(x), axis=axis))` with improved numerical stability.
        '''
        xmax = K.max(x, axis=axis, keepdims=True)
        xmax_ = K.max(x, axis=axis)
        return xmax_ + K.log(K.sum(K.exp(x - xmax), axis=axis)) 
Example 35
Project: dialectal_arabic_segmenter   Author: qcri   File: ChainCRF.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def viterbi_decode(x, U, b_start=None, b_end=None, mask=None):
    '''Computes the best tag sequence y for a given input x, i.e. the one that
    maximizes the value of path_energy.'''
    x = add_boundary_energy(x, b_start, b_end, mask)

    alpha_0 = x[:, 0, :]
    gamma_0 = K.zeros_like(alpha_0)
    initial_states = [gamma_0, alpha_0]
    _, gamma = _forward(x,
                        lambda B: [K.cast(K.argmax(B, axis=1), K.floatx()), K.max(B, axis=1)],
                        initial_states,
                        U,
                        mask)
    y = _backward(gamma, mask)
    return y 
Example 36
Project: CapsNet-Fashion-MNIST   Author: subarnop   File: capsulelayers.py    GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs, **kwargs):
        # use true label to select target capsule, shape=[batch_size, num_capsule]
        if type(inputs) is list:  # true label is provided with shape = [batch_size, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of vectors of capsules
            x = inputs
            # Enlarge the range of values in x to make max(new_x)=1 and others < 0
            x = (x - K.max(x, 1, True)) / K.epsilon() + 1
            mask = K.clip(x, 0, 1)  # the max value in x clipped to 1 and other to 0

        # masked inputs, shape = [batch_size, dim_vector]
        inputs_masked = K.batch_dot(inputs, mask, [1, 1])
        return inputs_masked 
Example 37
Project: knowledgeflow   Author: 3rduncle   File: lcd.py    MIT License 5 votes vote down vote up
def linkFeature(self, input_name, conv_name, activation='tanh'):
        print('Am I called')
        filters = self.params.get('filters')
        nb_filter = self.params.get('nb_filter')
        convs = self.layers.get(conv_name)
        assert filters
        assert convs
        features = []
        for fsz, conv in zip(filters, convs):
            conv_output = conv(self.tensors[input_name])
            if type(activation) == type(''):
                act = Activation(
                    activation, name='%s-act-%d' % (input_name, fsz)
                )(conv_output)
            else:
                act = activation(
                    name='%s-advanced-act-%d' % (input_name, fsz)
                )(conv_output)
            maxpool = Lambda(
                lambda x: K.max(x[:,:,:,0], axis=2),
                output_shape=(nb_filter,),
                name='%s-maxpool-%d' % (input_name, fsz)
            )(act)
            features.append(maxpool)
        if len(features) > 1:
            return Merge(mode='concat', name='%s-feature' % input_name)(features)
        else:
            return features[0] 
Example 38
Project: knowledgeflow   Author: 3rduncle   File: lcd.py    MIT License 5 votes vote down vote up
def doubleFeature(self, pos, neg, conv_name, activation='tanh'):
        name = '%s+%s' % (pos, neg)
        filters = self.params['filters']
        nb_filter = self.params['nb_filter']
        convs = self.layers[conv_name]
        features = []
        pos = self.tensors[pos]
        neg = self.tensors[neg]
        for fsz, conv in zip(filters, convs):
            sum = Merge(
                mode='sum',
            )([conv(pos), conv(neg)])
            if type(activation) == type(''):
                act = Activation(
                    activation, name='%s-act-%d' % ('+'.join(input_names), fsz)
                )(sum)
            else:
                act = activation(
                    name='%s-advanced-act-%d' % (name, fsz)
                )(sum)
            maxpool = Lambda(
                lambda x: K.max(x, axis=1),
                output_shape=(nb_filter,),
                name='%s-maxpool-%d' % (name, fsz)
            )(act)
            print('maxpool', maxpool._keras_shape)
            features.append(maxpool)
        if len(features) > 1:
            return Merge(
                mode='concat', 
                name='%s-feature' % name,
            )(features)
        else:
            return features[0] 
Example 39
Project: knowledgeflow   Author: 3rduncle   File: lcd.py    MIT License 5 votes vote down vote up
def linkFeature(self, input_name, conv_name, activation='tanh'):
        filters = self.params.get('filters')
        nb_filter = self.params.get('nb_filter')
        convs = self.layers.get(conv_name)
        assert filters
        assert convs
        features = []
        for fsz, conv in zip(filters, convs):
            conv_output = conv(self.tensors[input_name])
            if type(activation) == type(''):
                act = Activation(
                    activation, name='%s-act-%d' % (input_name, fsz)
                )(conv_output)
            else:
                act = activation(
                    name='%s-advanced-act-%d' % (input_name, fsz)
                )(conv_output)
            maxpool = Lambda(
                lambda x: K.max(x, axis=1),
                output_shape=(nb_filter,),
                name='%s-maxpool-%d' % (input_name, fsz)
            )(act)
            features.append(maxpool)
        if len(features) > 1:
            return Merge(mode='concat', name='%s-feature' % input_name)(features)
        else:
            return features[0] 
Example 40
Project: DeepLearn   Author: GauravBh1010tt   File: p3_cnn.py    MIT License 5 votes vote down vote up
def max_1d(X):
    return K.max(X, axis=1) 
Example 41
Project: DeepLearn   Author: GauravBh1010tt   File: p3_lstm.py    MIT License 5 votes vote down vote up
def max_1d(X):
    return K.max(X, axis=1) 
Example 42
Project: DeepLearn   Author: GauravBh1010tt   File: fnc_libs.py    MIT License 5 votes vote down vote up
def max_1d(X):
    return K.max(X, axis=1) 
Example 43
Project: keras_nade   Author: jgrnt   File: utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def logsumexp(x, axis=None):
    max_x = K.max(x, axis=axis)
    return max_x + K.log(K.sum(K.exp(x - K.expand_dims(max_x, axis=axis)), axis=axis)) 
Example 44
Project: keras_nade   Author: jgrnt   File: utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, min, max):
        self.min = K.cast_to_floatx(min)
        self.max = K.cast_to_floatx(max) 
Example 45
Project: keras_nade   Author: jgrnt   File: utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __call__(self, x):
        return K.sum(K.exp(1 * (K.relu(self.min - x) + K.relu(x - self.max)))) - K.prod(K.cast(K.shape(x), K.floatx())) 
Example 46
Project: keras_nade   Author: jgrnt   File: utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_config(self):
        return {'min': float(self.min), 'max': float(self.max)} 
Example 47
Project: face_classification   Author: oarriaga   File: grad_cam.py    MIT License 5 votes vote down vote up
def compile_saliency_function(model, activation_layer='conv2d_7'):
    input_image = model.input
    layer_output = model.get_layer(activation_layer).output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_image)[0]
    return K.function([input_image, K.learning_phase()], [saliency]) 
Example 48
Project: autopool   Author: marl   File: autopool.py    MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        scaled = self.kernel * x
        max_val = K.max(scaled, axis=self.axis, keepdims=True)
        softmax = K.exp(scaled - max_val)
        weights = softmax / K.sum(softmax, axis=self.axis, keepdims=True)
        return K.sum(x * weights, axis=self.axis, keepdims=False) 
Example 49
Project: autopool   Author: marl   File: autopool.py    MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        max_val = K.max(x, axis=self.axis, keepdims=True)
        softmax = K.exp((x - max_val))
        weights = softmax / K.sum(softmax, axis=self.axis, keepdims=True)
        return K.sum(x * weights, axis=self.axis, keepdims=False) 
Example 50
Project: object-detection   Author: kaka-lin   File: model.py    MIT License 5 votes vote down vote up
def yolo_filter_boxes(boxes, box_scores, box_class_probs, threshold = .6):
    # Find the box_classes thanks to the max box_scores, keep track of the corresponding score
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1, keepdims=False)

    # Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
    # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
    filtering_mask = box_class_scores >= threshold # (3549, 3)

    # Apply the mask to scores, boxes and classes
    scores = tf.boolean_mask(box_class_scores, filtering_mask)
    boxes = tf.boolean_mask(boxes, filtering_mask)
    classes = tf.boolean_mask(box_classes, filtering_mask)
    
    return scores, boxes, classes 
Example 51
Project: object-detection   Author: kaka-lin   File: model.py    MIT License 5 votes vote down vote up
def yolo_eval(
        yolo_outputs, 
        anchors, 
        num_classes,
        image_shape=(720., 1280.), 
        max_boxes=10, 
        score_threshold=.6, 
        iou_threshold=.5):
    #  Get three scales outputs of the YOLO model
    for i in range(0,3):
        _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[i], anchors[6-3*i:9-3*i], num_classes, i)
        if i==0:
            boxes, box_scores= _boxes, _box_scores
        else:
            boxes = K.concatenate([boxes,_boxes], axis=0)
            box_scores = K.concatenate([box_scores,_box_scores], axis=0)
    
    # Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
    scores, boxes, classes = yolo_filter_boxes(boxes, box_scores, score_threshold)

    # Scale boxes back to original image shape.
    boxes = scale_boxes(boxes, image_shape)

    # Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line)
    scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes, iou_threshold)

    return scores, boxes, classes 
Example 52
Project: object-detection   Author: kaka-lin   File: keras_yolo.py    MIT License 5 votes vote down vote up
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):
    """Filter YOLO boxes based on object and class confidence."""

    box_scores = box_confidence * box_class_probs
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1)
    prediction_mask = box_class_scores >= threshold

    # TODO: Expose tf.boolean_mask to Keras backend?
    boxes = tf.boolean_mask(boxes, prediction_mask)
    scores = tf.boolean_mask(box_class_scores, prediction_mask)
    classes = tf.boolean_mask(box_classes, prediction_mask)

    return boxes, scores, classes 
Example 53
Project: Emotion   Author: petercunha   File: grad_cam.py    MIT License 5 votes vote down vote up
def compile_saliency_function(model, activation_layer='conv2d_7'):
    input_image = model.input
    layer_output = model.get_layer(activation_layer).output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_image)[0]
    return K.function([input_image, K.learning_phase()], [saliency]) 
Example 54
Project: Dropout_BBalpha   Author: YingzhenLi   File: BBalpha_dropout.py    MIT License 5 votes vote down vote up
def logsumexp(x, axis=None):
    x_max = K.max(x, axis=axis, keepdims=True)
    return K.log(K.sum(K.exp(x - x_max), axis=axis, keepdims=True)) + x_max 
Example 55
Project: Dropout_BBalpha   Author: YingzhenLi   File: BBalpha_dropout.py    MIT License 5 votes vote down vote up
def bbalpha_softmax_cross_entropy_with_mc_logits(alpha):
    alpha = K.cast_to_floatx(alpha)
    def loss(y_true, mc_logits):
        # log(p_ij), p_ij = softmax(logit_ij)
        #assert mc_logits.ndim == 3
        mc_log_softmax = mc_logits - K.max(mc_logits, axis=2, keepdims=True)
        mc_log_softmax = mc_log_softmax - K.log(K.sum(K.exp(mc_log_softmax), axis=2, keepdims=True))
        mc_ll = K.sum(y_true * mc_log_softmax, -1)  # N x K
        K_mc = mc_ll.get_shape().as_list()[1]	# only for tensorflow
        return - 1. / alpha * (logsumexp(alpha * mc_ll, 1) + K.log(1.0 / K_mc))
    return loss


###################################################################
# the model 
Example 56
Project: Dropout_BBalpha   Author: YingzhenLi   File: BBalpha_dropout.py    MIT License 5 votes vote down vote up
def get_logit_cnn_layers(nb_units, p, wd, nb_classes, layers = [], dropout = False):
    # number of convolutional filters to use
    nb_filters = 32
    # size of pooling area for max pooling
    pool_size = (2, 2)
    # convolution kernel size
    kernel_size = (3, 3)

    if dropout == 'MC':
        D = Dropout_mc
    if dropout == 'pW':
        D = pW
    if dropout == 'none':
        D = Identity

    layers.append(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                                border_mode='valid', W_regularizer=l2(wd)))
    layers.append(Activation('relu'))
    layers.append(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
                                W_regularizer=l2(wd)))
    layers.append(Activation('relu'))
    layers.append(MaxPooling2D(pool_size=pool_size))

    layers.append(Flatten())
    layers.append(D(p))
    layers.append(Dense(nb_units, W_regularizer=l2(wd)))
    layers.append(Activation('relu'))
    layers.append(D(p))
    layers.append(Dense(nb_classes, W_regularizer=l2(wd)))
    return layers 
Example 57
Project: iMIMIC-RCVs   Author: medgift   File: regularizers.py    MIT License 5 votes vote down vote up
def build_loss(self):
        # Infinity norm
        if np.isinf(self.p):
            value = K.max(self.img)
        else:
            value = K.pow(K.sum(K.pow(K.abs(self.img), self.p)), 1. / self.p)

        return normalize(self.img, value) 
Example 58
Project: auckland-ai-meetup-x-triage   Author: a-i-joe   File: vis.py    MIT License 5 votes vote down vote up
def get_saliency(image, model):
    """Returns a saliency map with same shape as image. """
    K.set_learning_phase(0)
    K._LEARNING_PHASE = tf.constant(0)
    image = np.expand_dims(image, 0)
    loss = K.variable(0.)
    loss += K.sum(K.square(model.output))
    grads = K.abs(K.gradients(loss, model.input)[0])
    saliency = K.max(grads, axis=3)
    fetch_saliency = K.function([model.input], [loss, saliency])
    outputs, saliency = fetch_saliency([image])
    K.set_learning_phase(True)
    return saliency 
Example 59
Project: auckland-ai-meetup-x-triage   Author: a-i-joe   File: visualizations.py    MIT License 5 votes vote down vote up
def get_saliency(image,model):
    """Returns a saliency map with same shape as image. """
    K.set_learning_phase(0)
    K._LEARNING_PHASE = tf.constant(0)
    image = np.expand_dims(image,0)
    loss = K.variable(0.)
    loss += K.sum(K.square(model.output))
    grads = K.abs(K.gradients(loss,model.input)[0])
    saliency = K.max(grads,axis=3)
    fetch_saliency = K.function([model.input,K.learning_phase()],[loss,saliency])
    outputs, saliency = fetch_saliency([image,0])
    K.set_learning_phase(True)
    return saliency 
Example 60
Project: deep-mil-for-whole-mammogram-classification   Author: wentaozhu   File: customlayers.py    MIT License 5 votes vote down vote up
def call(self, x,mask=None):
        e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        s = K.sum(e, axis=self.axis, keepdims=True)
        return e / s 
Example 61
Project: deep-mil-for-whole-mammogram-classification   Author: wentaozhu   File: customlayers.py    MIT License 5 votes vote down vote up
def call(self, x,mask=None):
        response = K.reshape(x[:,self.axis], (-1,1))
        return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s 
Example 62
Project: deep-mil-for-whole-mammogram-classification   Author: wentaozhu   File: customlayers.py    MIT License 5 votes vote down vote up
def call(self, x,mask=None):
        response = K.max(x, axis=-1, keepdims=True) #K.reshape(x, (-1,1))
        return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s 
Example 63
Project: deep-mil-for-whole-mammogram-classification   Author: wentaozhu   File: customlayers.py    MIT License 5 votes vote down vote up
def call(self, x,mask=None):
        import theano.tensor as T
        newx = T.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return newx
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s 
Example 64
Project: deep-mil-for-whole-mammogram-classification   Author: wentaozhu   File: customlayers.py    MIT License 5 votes vote down vote up
def call(self, x,mask=None):
        newx = K.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1)
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s 
Example 65
Project: deep-mil-for-whole-mammogram-classification   Author: wentaozhu   File: customlayers.py    MIT License 5 votes vote down vote up
def call(self, x,mask=None):
        response = K.reshape(x[:,self.axis], (-1,1))
        return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s 
Example 66
Project: deep-mil-for-whole-mammogram-classification   Author: wentaozhu   File: customlayers.py    MIT License 5 votes vote down vote up
def call(self, x,mask=None):
        response = K.max(x, axis=-1, keepdims=True) #K.reshape(x, (-1,1))
        return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s 
Example 67
Project: deep-mil-for-whole-mammogram-classification   Author: wentaozhu   File: customlayers.py    MIT License 5 votes vote down vote up
def call(self, x,mask=None):
        import theano.tensor as T
        newx = T.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return newx
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s 
Example 68
Project: deep-mil-for-whole-mammogram-classification   Author: wentaozhu   File: customlayers.py    MIT License 5 votes vote down vote up
def call(self, x,mask=None):
        newx = K.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1)
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s 
Example 69
Project: GewitterGefahr   Author: thunderhoser   File: gradcam.py    MIT License 5 votes vote down vote up
def _make_saliency_function(model_object, target_layer_name,
                            input_layer_indices):
    """Creates saliency function.

    This function computes the gradient of activations in the target layer with
    respect to each input value in the specified layers.

    :param model_object: Instance of `keras.models.Model` or
        `keras.models.Sequential`.
    :param target_layer_name: Target layer (numerator in gradient will be based
        on activations in this layer).
    :param input_layer_indices: 1-D numpy array of indices.  If the array
        contains j, the gradient will be computed with respect to every value in
        the [j]th input layer.
    :return: saliency_function: Instance of `keras.backend.function`.
    """

    output_tensor = model_object.get_layer(name=target_layer_name).output
    filter_maxxed_output_tensor = K.max(output_tensor, axis=-1)

    if isinstance(model_object.input, list):
        list_of_input_tensors = model_object.input
    else:
        list_of_input_tensors = [model_object.input]

    list_of_saliency_tensors = K.gradients(
        K.sum(filter_maxxed_output_tensor),
        [list_of_input_tensors[i] for i in input_layer_indices]
    )

    return K.function(
        list_of_input_tensors + [K.learning_phase()],
        list_of_saliency_tensors
    ) 
Example 70
Project: elmo-bilstm-cnn-crf   Author: UKPLab   File: ChainCRF.py    Apache License 2.0 5 votes vote down vote up
def viterbi_decode(x, U, b_start=None, b_end=None, mask=None):
    '''Computes the best tag sequence y for a given input x, i.e. the one that
    maximizes the value of path_energy.'''
    x = add_boundary_energy(x, b_start, b_end, mask)

    alpha_0 = x[:, 0, :]
    gamma_0 = K.zeros_like(alpha_0)
    initial_states = [gamma_0, alpha_0]
    _, gamma = _forward(x,
                        lambda B: [K.cast(K.argmax(B, axis=1), K.floatx()), K.max(B, axis=1)],
                        initial_states,
                        U,
                        mask)
    y = _backward(gamma, mask)
    return y 
Example 71
Project: Face-and-Emotion-Recognition   Author: vjgpt   File: grad_cam.py    MIT License 5 votes vote down vote up
def compile_saliency_function(model, activation_layer='conv2d_7'):
    input_image = model.input
    layer_output = model.get_layer(activation_layer).output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_image)[0]
    return K.function([input_image, K.learning_phase()], [saliency]) 
Example 72
Project: CarCrash_forecasting_and_detection   Author: ankitshah009   File: layers.py    GNU General Public License v3.0 5 votes vote down vote up
def call(self, x, mask=None):
        assert (len(x) == 2)

        img = x[0]
        rois = x[1]

        if self.dim_ordering == 'th':
            raise NotImplementedError("We don't use Theano backend.")

        outputs = []
        s = K.shape(img)

        for roi_idx in range(self.num_rois):
            contexts = []
            for i in range(self.n_contexts):
                stride = i * self.step_stride

                x = rois[0, roi_idx, 0] - stride
                y = rois[0, roi_idx, 1] - stride
                w = rois[0, roi_idx, 2] + 2 * stride
                h = rois[0, roi_idx, 3] + 2 * stride
                x = K.cast(x, 'int32')
                if (x < 0) is not None: x = 0
                y = K.cast(y, 'int32')
                if (y < 0) is not None: y = 0
                w = K.cast(w, 'int32')
                if (w > s[0]) is not None: w = s[0]
                h = K.cast(h, 'int32')
                if (h > s[1]) is not None: h = s[1]

                rs = tf.image.resize_images(img[:, y:y + h, x:x + w, :], (self.pool_size, self.pool_size))
                contexts.append(rs)
            # Apply maxout
            C = K.concatenate(contexts, axis=0)
            T = K.max(C, axis=0)
            outputs.append(T)

        final_output = K.concatenate(outputs, axis=0)
        final_output = K.reshape(final_output, (1, self.num_rois, self.pool_size, self.pool_size, self.nb_channels))

        return final_output 
Example 73
Project: TCFPN-ISBA   Author: Zephyr-D   File: tf_models.py    MIT License 5 votes vote down vote up
def max_filter(x):
    # Max over the best filter score (like ICRA paper)
    max_values = K.max(x, 2, keepdims=True)
    max_flag = tf.greater_equal(x, max_values)
    out = x * tf.cast(max_flag, tf.float32)
    return out 
Example 74
Project: TCFPN-ISBA   Author: Zephyr-D   File: tf_models.py    MIT License 5 votes vote down vote up
def channel_normalization(x):
    # Normalize by the highest activation
    max_values = K.max(K.abs(x), 2, keepdims=True) + 1e-5
    out = x / max_values
    return out 
Example 75
Project: TCFPN-ISBA   Author: Zephyr-D   File: weak_model.py    MIT License 5 votes vote down vote up
def sigmoid_cross_entropy(y_true, y_pred):
    z = K.flatten(y_true)
    x = K.flatten(y_pred)
    q = 10
    l = (1 + (q - 1) * z)
    loss = (K.sum((1 - z) * x) + K.sum(l * (K.log(1 + K.exp(- K.abs(x))) + K.max(-x, 0)))) / 500
    return loss 
Example 76
Project: VoxelNet-Keras   Author: baudm   File: model.py    MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        return K.max(inputs, axis=T_AXIS, keepdims=self.keepdims) 
Example 77
Project: VoxelNet-Keras   Author: baudm   File: model.py    MIT License 5 votes vote down vote up
def vfe_block(x, cout, name='vfe'):
    assert cout % 2 == 0
    x = fcn_block(x, cout // 2, name)
    max = ElementwiseMaxPool(keepdims=True, name=name + '_maxpool')(x)
    max = RepeatElements(x.shape[T_AXIS].value, axis=T_AXIS, name=name + '_repeat')(max)
    x = Concatenate(name=name + '_concat')([max, x])
    return x 
Example 78
Project: multi-object-tracking   Author: jguoaj   File: model.py    GNU General Public License v3.0 4 votes vote down vote up
def yolo_loss(args, anchors, num_classes, ignore_thresh=.5):
    '''Return yolo_loss tensor

    Parameters
    ----------
    yolo_outputs: list of tensor, the output of yolo_body
    y_true: list of array, the output of preprocess_true_boxes
    anchors: array, shape=(T, 2), wh
    num_classes: integer
    ignore_thresh: float, the iou threshold whether to ignore object confidence loss

    Returns
    -------
    loss: tensor, shape=(1,)

    '''
    yolo_outputs = args[:3]
    y_true = args[3:]
    anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]
    input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
    grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(3)]
    loss = 0
    m = K.shape(yolo_outputs[0])[0]

    for l in range(3):
        object_mask = y_true[l][..., 4:5]
        true_class_probs = y_true[l][..., 5:]

        pred_xy, pred_wh, pred_confidence, pred_class_probs = yolo_head(yolo_outputs[l],
             anchors[anchor_mask[l]], num_classes, input_shape)
        pred_box = K.concatenate([pred_xy, pred_wh])

        # Darknet box loss.
        xy_delta = (y_true[l][..., :2]-pred_xy)*grid_shapes[l][::-1]
        wh_delta = K.log(y_true[l][..., 2:4]) - K.log(pred_wh)
        # Avoid log(0)=-inf.
        wh_delta = K.switch(object_mask, wh_delta, K.zeros_like(wh_delta))
        box_delta = K.concatenate([xy_delta, wh_delta], axis=-1)
        box_delta_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]

        # Find ignore mask, iterate over each of batch.
        ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
        object_mask_bool = K.cast(object_mask, 'bool')
        def loop_body(b, ignore_mask):
            true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])
            iou = box_iou(pred_box[b], true_box)
            best_iou = K.max(iou, axis=-1)
            ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))
            return b+1, ignore_mask
        _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])
        ignore_mask = ignore_mask.stack()
        ignore_mask = K.expand_dims(ignore_mask, -1)

        box_loss = object_mask * K.square(box_delta*box_delta_scale)
        confidence_loss = object_mask * K.square(1-pred_confidence) + \
            (1-object_mask) * K.square(0-pred_confidence) * ignore_mask
        class_loss = object_mask * K.square(true_class_probs-pred_class_probs)
        loss += K.sum(box_loss) + K.sum(confidence_loss) + K.sum(class_loss)
    return loss / K.cast(m, K.dtype(loss)) 
Example 79
Project: knowledgeflow   Author: 3rduncle   File: lcd.py    MIT License 4 votes vote down vote up
def match_matrix(vectors, match, axis=0, w=3):
    # if axis = 0 
    # source_length = amax
    # target_length = qmax 
    # results shape=(batch_size, qmax, wdim)
    # vectors shape=(batch_size, amax, wdim)
    # match   shape=(batch_size, qmax, amax)
    batch_size, qmax, amax = match.shape
    _, _, wdim = vectors.shape
    if axis == 0:
        source_length = amax
        target_length = qmax
        dims = [0,1,2]
    elif axis == 1:
        source_length = qmax
        target_length = amax
        dims = [0,2,1]
    match = K.permute_dimensions(match, dims)
    source_length = (qmax, amax)[1 - axis]
    target_length = (qmax, amax)[axis]
    m = source_length - 1
    batched_length = batch_size * target_length
    # reshaped match shape=(batch_size * qmax, amax)
    batched_match = match.reshape((batched_length, source_length))
    # shape=(batch_size * qmax,), range in [0,1]
    value = batched_match.max(axis=1)
    # shape=(batch_size * qmax, ), range in [0, amax) 
    index = batched_match.argmax(axis=1)
    params = []
    params.append((value, index))
    for j in range(1, w + 1):
        ib = index - j
        ibs = T.set_subtensor(ib[(ib < 0).nonzero()], 0)
        iu = index + j
        ius = T.set_subtensor(iu[(iu > m).nonzero()], m)
        params.append((batched_match[T.arange(batched_length), ibs], ibs))
        params.append((batched_match[T.arange(batched_length), ius], ius))
    i0 = T.repeat(T.arange(batch_size), target_length).flatten()
    indexed = 0
    weights = 0
    for value, index in params:
        # shape=(batch_size * qmax,) => shape=(batch_size * qmax, 1) 
        value = K.expand_dims(value, 1)
        # shape=(batch_size * qmax, wdim)
        indexed += vectors[i0, index, :] * value
        weights += value
    results = (indexed / weights).reshape((batch_size, target_length, wdim))
    return results 
Example 80
Project: YOLO-3D-Box   Author: scutan90   File: model.py    MIT License 4 votes vote down vote up
def yolo_loss(args, anchors, num_classes, ignore_thresh=.5):
    '''Return yolo_loss tensor

    Parameters
    ----------
    yolo_outputs: list of tensor, the output of yolo_body
    y_true: list of array, the output of preprocess_true_boxes
    anchors: array, shape=(T, 2), wh
    num_classes: integer
    ignore_thresh: float, the iou threshold whether to ignore object confidence loss

    Returns
    -------
    loss: tensor, shape=(1,)

    '''
    yolo_outputs = args[:3]
    y_true = args[3:]
    anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]
    input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
    grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(3)]
    loss = 0
    m = K.shape(yolo_outputs[0])[0]

    for l in range(3):
        object_mask = y_true[l][..., 4:5]
        true_class_probs = y_true[l][..., 5:]

        pred_xy, pred_wh, pred_confidence, pred_class_probs = yolo_head(yolo_outputs[l],
             anchors[anchor_mask[l]], num_classes, input_shape)
        pred_box = K.concatenate([pred_xy, pred_wh])

        # Darknet box loss.
        xy_delta = (y_true[l][..., :2]-pred_xy)*grid_shapes[l][::-1]
        wh_delta = K.log(y_true[l][..., 2:4]) - K.log(pred_wh)
        # Avoid log(0)=-inf.
        wh_delta = K.switch(object_mask, wh_delta, K.zeros_like(wh_delta))
        box_delta = K.concatenate([xy_delta, wh_delta], axis=-1)
        box_delta_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]

        # Find ignore mask, iterate over each of batch.
        ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
        object_mask_bool = K.cast(object_mask, 'bool')
        def loop_body(b, ignore_mask):
            true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])
            iou = box_iou(pred_box[b], true_box)
            best_iou = K.max(iou, axis=-1)
            ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))
            return b+1, ignore_mask
        _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])
        ignore_mask = ignore_mask.stack()
        ignore_mask = K.expand_dims(ignore_mask, -1)

        box_loss = object_mask * K.square(box_delta*box_delta_scale)
        confidence_loss = object_mask * K.square(1-pred_confidence) + \
            (1-object_mask) * K.square(0-pred_confidence) * ignore_mask
        class_loss = object_mask * K.square(true_class_probs-pred_class_probs)
        loss += K.sum(box_loss) + K.sum(confidence_loss) + K.sum(class_loss)
    return loss / K.cast(m, K.dtype(loss))