Python keras.backend.argmax() Examples

The following are code examples for showing how to use keras.backend.argmax(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Scene-Understanding   Author: foamliu   File: utils.py    MIT License 7 votes vote down vote up
def categorical_crossentropy_with_class_rebal(y_true, y_pred):
    y_true = K.reshape(y_true, (-1, num_classes))
    y_pred = K.reshape(y_pred, (-1, num_classes))

    idx_max = K.argmax(y_true, axis=1)
    weights = K.gather(factor, idx_max)
    weights = K.reshape(weights, (-1, 1))

    # multiply y_true by weights
    y_true = y_true * weights

    cross_ent = K.categorical_crossentropy(y_pred, y_true)
    cross_ent = K.mean(cross_ent, axis=-1)

    return cross_ent


# getting the number of GPUs 
Example 2
Project: cloudFCN   Author: aliFrancis   File: weighted_loss.py    Apache License 2.0 7 votes vote down vote up
def w_categorical_crossentropy(y_true, y_pred, weights):
    """
    Keras-style categorical crossentropy loss function, with weighting for each class.

    Parameters
    ----------
    y_true : Tensor
        Truth labels.
    y_pred : Tensor
        Predicted values.
    weights: Tensor
        Multiplicative factor for loss per class.

    Returns
    -------
    loss : Tensor
        Weighted crossentropy loss between labels and predictions.

    """
    y_true_max = K.argmax(y_true, axis=-1)
    weighted_true = K.gather(weights, y_true_max)
    loss = K.categorical_crossentropy(y_pred, y_true) * weighted_true
    return loss 
Example 3
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulelayers.py    MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        if type(inputs) is list:  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
Example 4
Project: CapsAttnNet   Author: rstager   File: train.py    MIT License 6 votes vote down vote up
def margin_loss(y_true, y_pred):
    """
    Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
    :param y_true: [None, n_classes, n_instance]
    :param y_pred: [None, n_classes, n_instance]
    :return: a scalar loss value.
    """

    L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
        0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))

    loss = K.mean(K.sum(L, 1))

    acc = K.equal(K.argmax(y_true, axis=1), K.argmax(y_pred, axis=1))

    # loss = tf.Print(loss,[tf.shape(y_true)],message=" margin loss y_true shape",summarize=6,first_n=1)
    # loss = tf.Print(loss,[tf.shape(y_pred)],message=" margin loss y_pred shape",summarize=6,first_n=1)
    # loss = tf.Print(loss,[tf.shape(L)],message=" margin loss L shape",summarize=6,first_n=1)
    # loss = tf.Print(loss,[tf.shape(acc)],message=" margin loss acc shape",summarize=6,first_n=1)
    # loss = tf.Print(loss,[y_true[0,0,:],y_pred[0,0,:]],message=" margin loss y_true/y_pred",summarize=20)
    # loss = tf.Print(loss,[L[0,0,:]],message=" margin loss L",summarize=6)
    # loss = tf.Print(loss,[loss],message=" margin loss loss",summarize=6)
    # loss = tf.Print(loss,[acc[0,0]],message=" margin loss acc",summarize=6)

    return loss 
Example 5
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 6 votes vote down vote up
def sparse_accuracy(y_true, y_pred):
    classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[0], tf.bool)
    y_true = tf.stack(unpacked[1:], axis=-1)

    return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels))


# Define different models


# 3D-FCN model 
Example 6
Project: Colorful-Image-Colorization   Author: foamliu   File: utils.py    MIT License 6 votes vote down vote up
def categorical_crossentropy_color(y_true, y_pred):
    q = 313
    y_true = K.reshape(y_true, (-1, q))
    y_pred = K.reshape(y_pred, (-1, q))

    idx_max = K.argmax(y_true, axis=1)
    weights = K.gather(prior_factor, idx_max)
    weights = K.reshape(weights, (-1, 1))

    # multiply y_true by weights
    y_true = y_true * weights

    cross_ent = K.categorical_crossentropy(y_pred, y_true)
    cross_ent = K.mean(cross_ent, axis=-1)

    return cross_ent


# getting the number of GPUs 
Example 7
Project: object-detection   Author: kaka-lin   File: test_tiny_yolo.py    MIT License 6 votes vote down vote up
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):    
    # Compute box scores
    box_scores = box_confidence * box_class_probs
    
    # Find the box_classes thanks to the max box_scores, keep track of the corresponding score
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1, keepdims=False)
    
    # Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
    # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
    filtering_mask = box_class_scores >= threshold
    
    # Apply the mask to scores, boxes and classes
    scores = tf.boolean_mask(box_class_scores, filtering_mask)
    boxes = tf.boolean_mask(boxes, filtering_mask)
    classes = tf.boolean_mask(box_classes, filtering_mask)
    
    return scores, boxes, classes 
Example 8
Project: CDAE4InfoExtraction   Author: grassknoted   File: capsulelayers.py    MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        if type(inputs) is list:  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
Example 9
Project: keras-fcn   Author: JihongJu   File: score.py    MIT License 6 votes vote down vote up
def compute_error_matrix(y_true, y_pred):
    """Compute Confusion matrix (a.k.a. error matrix).

    a       predicted
    c       0   1   2
    t  0 [[ 5,  3,  0],
    u  1  [ 2,  3,  1],
    a  2  [ 0,  2, 11]]
    l

    Note true positves are in diagonal
    """
    # Find channel axis given backend
    if K.image_data_format() == 'channels_last':
        ax_chn = 3
    else:
        ax_chn = 1
    classes = y_true.shape[ax_chn]
    confusion = get_confusion(K.argmax(y_true, axis=ax_chn).flatten(),
                              K.argmax(y_pred, axis=ax_chn).flatten(),
                              classes)
    return confusion 
Example 10
Project: Multi-level-DCNet   Author: ssrp   File: capsulelayers.py    GNU General Public License v3.0 6 votes vote down vote up
def call(self, inputs, **kwargs):
        if type(inputs) is list:  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
Example 11
Project: semantic-tagging   Author: bjerva   File: semtagger.py    GNU General Public License v3.0 6 votes vote down vote up
def make_weight_matrix(X_train):
    '''
    Create sample weights
    '''
    X_weights = np.zeros_like(X_train, dtype=np.float32)
    for idx, sentence in enumerate(X_train):
        for idy, word in enumerate(sentence):
            curr_class = np.argmax(y_train[idx, idy])
            if curr_class == 0:
                X_weights[idx, idy] = 1#e-8
            elif curr_class <= 1:
                X_weights[idx, idy] = 1#e-4
            else:
                X_weights[idx, idy] = 1#0

    return X_weights 
Example 12
Project: CapsNet-Fashion-MNIST   Author: XifengGuo   File: capsulelayers.py    MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        if type(inputs) is list:  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
Example 13
Project: Keras-TextClassification   Author: yongzhuo   File: capsule.py    MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        if type(inputs) is list:  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
Example 14
Project: unet-pspnet-unet2d-segment-implement   Author: qianmingduowan   File: metrics.py    GNU General Public License v3.0 6 votes vote down vote up
def iou(y_true, y_pred, label: int):
    """
    Return the Intersection over Union (IoU) for a given label.
    Args:
        y_true: the expected y values as a one-hot
        y_pred: the predicted y values as a one-hot or softmax output
        label: the label to return the IoU for
    Returns:
        the IoU for the given label
    """
    # extract the label values using the argmax operator then
    # calculate equality of the predictions and truths to the label
    y_true = K.cast(K.equal(K.argmax(y_true), label), K.floatx())
    y_pred = K.cast(K.equal(K.argmax(y_pred), label), K.floatx())
    # calculate the |intersection| (AND) of the labels
    intersection = K.sum(y_true * y_pred)
    # calculate the |union| (OR) of the labels
    union = K.sum(y_true) + K.sum(y_pred) - intersection
    # avoid divide by zero - if the union is zero, return 1
    # otherwise, return the intersection over union
    return K.switch(K.equal(union, 0), 1.0, intersection / union) 
Example 15
Project: deep-mlsa   Author: spinningbytes   File: evaluation_metrics_theano.py    Apache License 2.0 6 votes vote down vote up
def f1_score_keras(y_true, y_pred):
    #convert probas to 0,1
    y_ppred = K.zeros_like(y_true)
    y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    #precision for each class
    precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred/pred_cnt)

    #recall for each class
    recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred/gold_cnt)

    #f1 for each class
    f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))

    #return average f1 score over all classes
    return K.mean(f1_class) 
Example 16
Project: deep-mlsa   Author: spinningbytes   File: evaluation_metrics_theano.py    Apache License 2.0 6 votes vote down vote up
def f1_score_taskB(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    #precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)

    #recall for each class
    recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred/gold_cnt)

    #f1 for each class
    f1_class = K.switch(K.equal(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))

    #return average f1 score over all classes
    return f1_class 
Example 17
Project: deep-mlsa   Author: spinningbytes   File: evaluation_metrics_theano.py    Apache License 2.0 6 votes vote down vote up
def f1_score_semeval(y_true, y_pred):
    # convert probas to 0,1
    y_ppred = K.zeros_like(y_true)
    y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # recall for each class
    recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred / gold_cnt)

    # f1 for each class
    f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))

    #return average f1 score over all classes
    return (f1_class[0] + f1_class[2])/2.0 
Example 18
Project: deep-mlsa   Author: spinningbytes   File: evaluation_metrics_theano.py    Apache License 2.0 6 votes vote down vote up
def precision_keras(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)
    
    #return average f1 score over all classes
    return K.mean(precision) 
Example 19
Project: deep-mlsa   Author: spinningbytes   File: evaluation_metrics_theano.py    Apache License 2.0 6 votes vote down vote up
def f1_score_task3(y_true, y_pred):
    #convert probas to 0,1
    y_ppred = K.zeros_like(y_true)
    y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    #precision for each class
    precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred/pred_cnt)

    #recall for each class
    recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred/gold_cnt)

    #f1 for each class
    f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))

    #return average f1 score over all classes
    return f1_class[1] 
Example 20
Project: deep-mlsa   Author: spinningbytes   File: evaluation_metrics_tf.py    Apache License 2.0 6 votes vote down vote up
def f1_score_taskB(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # recall for each class
    recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred / gold_cnt)

    # f1 for each class
    f1_class = K.switch(K.equal(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))

    # return average f1 score over all classes
    return f1_class 
Example 21
Project: deep-mlsa   Author: spinningbytes   File: evaluation_metrics_tf.py    Apache License 2.0 6 votes vote down vote up
def precision_keras(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # return average f1 score over all classes
    return K.mean(precision) 
Example 22
Project: deep-mlsa   Author: spinningbytes   File: evaluation_metrics.py    Apache License 2.0 6 votes vote down vote up
def precision_keras(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)
    
    #return average f1 score over all classes
    return K.mean(precision) 
Example 23
Project: DeepLearning-OCR   Author: xingjian-f   File: util.py    Apache License 2.0 6 votes vote down vote up
def get_sample_weight(label, whole_set):
	if label.ndim < 3: # in case output_size==1
		return None
	ret = []
	for i in label:
		ret.append([])
		tag = False
		for j in i:
			cha = whole_set[np.argmax(j)]
			weight = 0
			if cha == 'empty' and tag == False:
				weight = 1 # TODO
				tag = True 
			if cha != 'empty':
				weight = 1
			ret[-1].append(weight)
	ret = np.asarray(ret)
	return ret 
Example 24
Project: Crack-Detection-Capsule-Network   Author: neil7   File: capsulelayers.py    The Unlicense 6 votes vote down vote up
def call(self, inputs, **kwargs):
        if type(inputs) is list:  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
Example 25
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn_all.py    MIT License 5 votes vote down vote up
def sparse_accuracy(y_true, y_pred):
    classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[0], tf.bool)
    y_true = tf.stack(unpacked[1:], axis=-1)

    return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels)) 
Example 26
Project: CapsNet   Author: l11x0m7   File: capsule.py    MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        # inputs -> (X, y), then output the mask of y
        # inputs -> X, then output the mask of prediction
        if type(inputs) is list or tuple:
            inputs, mask = inputs
        else:
            pred = K.sqrt(K.sum(K.square(inputs), axis=-1) + K.epsilon())
            mask = K.one_hot(indices=K.argmax(pred, 1), num_classes=pred.get_shape().as_list()[1])
        return K.batch_flatten(inputs * K.expand_dims(mask, axis=-1)) 
Example 27
Project: PiCamNN   Author: PiSimo   File: keras_yolo.py    MIT License 5 votes vote down vote up
def yolo_filter_boxes(boxes, box_confidence, box_class_probs, threshold=.6):
    """Filter YOLO boxes based on object and class confidence."""
    box_scores = box_confidence * box_class_probs
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1)
    prediction_mask = box_class_scores >= threshold

    # TODO: Expose tf.boolean_mask to Keras backend?
    boxes = tf.boolean_mask(boxes, prediction_mask)
    scores = tf.boolean_mask(box_class_scores, prediction_mask)
    classes = tf.boolean_mask(box_classes, prediction_mask)
    return boxes, scores, classes 
Example 28
Project: dialectal_arabic_segmenter   Author: qcri   File: ChainCRF.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def chain_crf_loss(y, x, U, b_start=None, b_end=None, mask=None):
    '''Variant of sparse_chain_crf_loss but with one-hot encoded tags y.'''
    y_sparse = K.argmax(y, -1)
    y_sparse = K.cast(y_sparse, 'int32')
    return sparse_chain_crf_loss(y_sparse, x, U, b_start, b_end, mask) 
Example 29
Project: dialectal_arabic_segmenter   Author: qcri   File: ChainCRF.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def viterbi_decode(x, U, b_start=None, b_end=None, mask=None):
    '''Computes the best tag sequence y for a given input x, i.e. the one that
    maximizes the value of path_energy.'''
    x = add_boundary_energy(x, b_start, b_end, mask)

    alpha_0 = x[:, 0, :]
    gamma_0 = K.zeros_like(alpha_0)
    initial_states = [gamma_0, alpha_0]
    _, gamma = _forward(x,
                        lambda B: [K.cast(K.argmax(B, axis=1), K.floatx()), K.max(B, axis=1)],
                        initial_states,
                        U,
                        mask)
    y = _backward(gamma, mask)
    return y 
Example 30
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 5 votes vote down vote up
def REINFORCE(y_true, y_pred):
    correct = K.argmax(y_true, axis=1)
    guess = K.argmax(y_pred, axis=1)  # gradients don't flow through this
    adv = K.equal(correct, guess)  # reward
    baseline = K.mean(adv)  # baseline
    adv = adv - baseline  # advantage
    logit = K.log(K.max(y_pred, axis=1)) # log probability of action taken, this makes our model and advantage actor critic
    # Keras does cost minimization, but we want to maximize reward probability, thus this minus sign
    return -adv*logit  # gradient will be -(r-b)*grad(log(pi))



# the data, shuffled and split between train and test sets 
Example 31
Project: object-detection   Author: kaka-lin   File: model.py    MIT License 5 votes vote down vote up
def yolo_filter_boxes(boxes, box_scores, box_class_probs, threshold = .6):
    # Find the box_classes thanks to the max box_scores, keep track of the corresponding score
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1, keepdims=False)

    # Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
    # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
    filtering_mask = box_class_scores >= threshold # (3549, 3)

    # Apply the mask to scores, boxes and classes
    scores = tf.boolean_mask(box_class_scores, filtering_mask)
    boxes = tf.boolean_mask(boxes, filtering_mask)
    classes = tf.boolean_mask(box_classes, filtering_mask)
    
    return scores, boxes, classes 
Example 32
Project: object-detection   Author: kaka-lin   File: keras_yolo.py    MIT License 5 votes vote down vote up
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):
    """Filter YOLO boxes based on object and class confidence."""

    box_scores = box_confidence * box_class_probs
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1)
    prediction_mask = box_class_scores >= threshold

    # TODO: Expose tf.boolean_mask to Keras backend?
    boxes = tf.boolean_mask(boxes, prediction_mask)
    scores = tf.boolean_mask(box_class_scores, prediction_mask)
    classes = tf.boolean_mask(box_classes, prediction_mask)

    return boxes, scores, classes 
Example 33
Project: deepGroupv2   Author: albu5   File: networks.py    MIT License 5 votes vote down vote up
def my_categorical_accuracy(y_true, y_pred):
    y_true = kb.argmax(y_true, axis=2)
    y_pred = kb.argmax(y_pred, axis=2)
    return tf.reduce_mean(tf.cast(y_true == y_pred, kb.floatx()), axis=None) 
Example 34
Project: deepGroupv2   Author: albu5   File: networks.py    MIT License 5 votes vote down vote up
def my_categorical_crossentropy_mask(target, output, from_logits=False):
    """Categorical crossentropy between an output tensor and a target tensor.

    # Arguments
        target: A tensor of the same shape as `output`.
        output: A tensor resulting from a softmax
            (unless `from_logits` is True, in which
            case `output` is expected to be the logits).
        from_logits: Boolean, whether `output` is the
            result of a softmax, or is a tensor of logits.

    # Returns
        Output tensor.
    """
    # Note: tf.nn.softmax_cross_entropy_with_logits
    # expects logits, Keras expects probabilities.
    if not from_logits:
        # scale preds so that the class probas of each sample sum to 1
        output /= tf.reduce_sum(output,
                                len(output.get_shape()) - 1,
                                True)
        # manual computation of crossentropy
        _epsilon = tf.convert_to_tensor(kb.epsilon(), output.dtype.base_dtype)
        output = tf.clip_by_value(output, _epsilon, 1. - _epsilon)
        valid_logs = tf.reduce_sum(target * tf.log(output), axis=3)
        valid_logs *= tf.cast(tf.argmax(target, axis=3) > 0, dtype=kb.floatx())
        return -tf.reduce_sum(valid_logs)
    else:
        return tf.nn.softmax_cross_entropy_with_logits(labels=target,
                                                       logits=output) 
Example 35
Project: deepGroupv2   Author: albu5   File: networks.py    MIT License 5 votes vote down vote up
def my_categorical_accuracy_mask(y_true, y_pred):
    y_true = tf.argmax(y_true, axis=3)
    y_pred = tf.argmax(y_pred, axis=3)
    correct = tf.cast(y_true == y_pred, dtype=kb.floatx())
    correct *= tf.cast(y_true > 0, dtype=kb.floatx())
    return tf.reduce_sum(tf.cast(correct, kb.floatx())) / tf.reduce_sum(tf.cast(y_true > 0, dtype=kb.floatx())) 
Example 36
Project: zlyang-seq2seq-gec   Author: young-zonglin   File: basic_model.py    MIT License 5 votes vote down vote up
def _masked_categorical_accuracy(self, target, preds):
        y_mask = GetPadMask(self.batch_size)(target)
        raw_tag = K.cast(K.equal(K.argmax(target, axis=-1),
                                 K.argmax(preds, axis=-1)),
                         K.floatx())
        assert K.ndim(raw_tag) == 2
        return raw_tag * y_mask

    # TODO 优化算法
    # 动态学习率 => done,在回调中更改学习率
    # Get done => Masked loss function. 
Example 37
Project: elmo-bilstm-cnn-crf   Author: UKPLab   File: ChainCRF.py    Apache License 2.0 5 votes vote down vote up
def chain_crf_loss(y, x, U, b_start=None, b_end=None, mask=None):
    '''Variant of sparse_chain_crf_loss but with one-hot encoded tags y.'''
    y_sparse = K.argmax(y, -1)
    y_sparse = K.cast(y_sparse, 'int32')
    return sparse_chain_crf_loss(y_sparse, x, U, b_start, b_end, mask) 
Example 38
Project: elmo-bilstm-cnn-crf   Author: UKPLab   File: ChainCRF.py    Apache License 2.0 5 votes vote down vote up
def viterbi_decode(x, U, b_start=None, b_end=None, mask=None):
    '''Computes the best tag sequence y for a given input x, i.e. the one that
    maximizes the value of path_energy.'''
    x = add_boundary_energy(x, b_start, b_end, mask)

    alpha_0 = x[:, 0, :]
    gamma_0 = K.zeros_like(alpha_0)
    initial_states = [gamma_0, alpha_0]
    _, gamma = _forward(x,
                        lambda B: [K.cast(K.argmax(B, axis=1), K.floatx()), K.max(B, axis=1)],
                        initial_states,
                        U,
                        mask)
    y = _backward(gamma, mask)
    return y 
Example 39
Project: semantic-tagging   Author: bjerva   File: semtagger.py    GNU General Public License v3.0 5 votes vote down vote up
def calculate_accuracy(model, y, classes):
    '''
    TODO: Document
    '''
    if args.aux:
        classes = classes[0]
        y = y[0]

    sent_tags = []
    corr, err = 0, 0
    for idx, sentence in enumerate(y):
        sent_tags.append([])
        for idy, word in enumerate(sentence):
            gold_tag = np.argmax(word)
            if gold_tag <= 1:
                continue

            pred_tag = np.argmax(classes[idx, idy])
            if pred_tag == gold_tag:
                corr += 1
            else:
                err += 1

            indices = [idx, idy]

            sent_tags[-1].append((indices, gold_tag, pred_tag))

    print('Corr: {0}, Err: {1}'.format(corr, err))
    accuracy = corr / float(corr+err)
    print('Accuracy without dummy labels', accuracy)

    return classes, accuracy, sent_tags 
Example 40
Project: semantic-tagging   Author: bjerva   File: semtagger.py    GNU General Public License v3.0 5 votes vote down vote up
def actual_accuracy(act, pred):
    '''
    Calculate accuracy each batch.
    Keras' standard calculation factors in our padding classes. We don't.
    FIXME: Not always working
    '''
    act_argm  = K.argmax(act, axis=-1)   # Indices of act. classes
    pred_argm = K.argmax(pred, axis=-1)  # Indices of pred. classes

    incorrect = K.cast(K.not_equal(act_argm, pred_argm), dtype='float32')
    correct   = K.cast(K.equal(act_argm, pred_argm), dtype='float32')
    padding   = K.cast(K.equal(K.sum(act), 0), dtype='float32')
    start     = K.cast(K.equal(act_argm, 0), dtype='float32')
    end       = K.cast(K.equal(act_argm, 1), dtype='float32')

    pad_start     = K.maximum(padding, start)
    pad_start_end = K.maximum(pad_start, end) # 1 where pad, start or end

    # Subtract pad_start_end from correct, then check equality to 1
    # E.g.: act: [pad, pad, pad, <s>, tag, tag, tag, </s>]
    #      pred: [pad, tag, pad, <s>, tag, tag, err, </s>]
    #   correct: [1,     0,   1,   1,   1,   1,   0,    1]
    #     p_s_e: [1,     1,   1,   1,,  0,   0,   0,    1]
    #  corr-pse: [0,    -1,   0,   0,   1,   1,   0,    0] # Subtraction
    # actu_corr: [0,     0,   0,   0,   1,   1,   0,    0] # Check equality to 1
    corr_preds   = K.sum(K.cast(K.equal(correct - pad_start_end, 1), dtype='float32'))
    incorr_preds = K.sum(K.cast(K.equal(incorrect - pad_start_end, 1), dtype='float32'))
    total = corr_preds + incorr_preds
    accuracy = corr_preds / total

    return accuracy 
Example 41
Project: textcaps   Author: vinojjayasundara   File: capsulelayers.py    MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        if type(inputs) is list:
            assert len(inputs) == 2
            inputs, mask = inputs
        else: 
            x = K.sqrt(K.sum(K.square(inputs), -1))
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
Example 42
Project: neural-event-model   Author: pdasigi   File: metrics.py    Apache License 2.0 5 votes vote down vote up
def precision(y_true, y_pred):
    '''
    Custom Keras metric that measures the precision of a binary classifier.
    '''
    # Assuming index 1 is positive.
    pred_indices = K.argmax(y_pred, axis=-1)
    true_indices = K.argmax(y_true, axis=-1)
    num_true_positives = K.sum(pred_indices * true_indices)
    num_positive_predictions = K.sum(pred_indices)
    return K.cast(num_true_positives / num_positive_predictions, K.floatx()) 
Example 43
Project: neural-event-model   Author: pdasigi   File: metrics.py    Apache License 2.0 5 votes vote down vote up
def recall(y_true, y_pred):
    '''
    Custom Keras metric that measures the recall of a binary classifier.
    '''
    # Assuming index 1 is positive.
    pred_indices = K.argmax(y_pred, axis=-1)
    true_indices = K.argmax(y_true, axis=-1)
    num_true_positives = K.sum(pred_indices * true_indices)
    num_positive_truths = K.sum(true_indices)
    return K.cast(num_true_positives / num_positive_truths, K.floatx()) 
Example 44
Project: keras_ordinal_categorical_crossentropy   Author: JHart96   File: ordinal_categorical_crossentropy.py    GNU General Public License v3.0 5 votes vote down vote up
def loss(y_true, y_pred):
    weights = K.cast(K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred, axis=1))/(K.int_shape(y_pred)[1] - 1), dtype='float32')
    return (1.0 + weights) * losses.categorical_crossentropy(y_true, y_pred) 
Example 45
Project: deep_em_classifier   Author: MikhailStartsev   File: blstm_model.py    GNU General Public License v3.0 5 votes vote down vote up
def categorical_f1_score_for_class(y_true, y_pred, class_i, dtype=None):
    """
    A generic function for computing sample-level F1 score for a class @class_i (i.e. for the classification problem
    "is @class_i" vs "is not @class_i").
    :param y_true: one-hot encoded true labels for a set of samples
    :param y_pred: predicted probabilities for all classes for the same set of samples
    :param class_i: which class to consider for the F1 score computation
    :param dtype: an optional intermediate data type parameter; unless some relevant exception is raised (type mismatch,
                  for example), no need to pass anything; in some cases 'float64' had to be passed, instead of the
                  default 'float32'
    :return: one floating-point value, the F1 score for the binary @class_i vs [email protected]_i problem
    """
    pred_labels = K.argmax(y_pred, axis=-1)
    tp = K.sum(y_true[:, :, class_i] * K.cast(K.equal(pred_labels, class_i), 'float32' if not dtype else dtype))
    all_p_detections = K.sum(K.cast(K.equal(pred_labels, class_i), 'float32' if not dtype else dtype))
    all_p_true = K.sum(y_true[:, :, class_i])

    precision = tp / (all_p_detections + K.epsilon())
    recall = tp / (all_p_true + K.epsilon())
    f_score = 2 * precision * recall / (precision + recall + K.epsilon())

    return f_score


# A set of F1-score functions for the three major eye movement types that we use to monitor model training on both
# the training and the validation set. Almost the same signature as the "master"
# function categorical_f1_score_for_class() above. 
Example 46
Project: FormicID   Author: naturalis   File: logger.py    MIT License 5 votes vote down vote up
def top_k_cat_accuracy(y_true, y_pred, k=3):
    """Metric for showing the top k categorical accuracy, to be used in model
    compilation.

    Args:
        y_true (str): The true label.
        y_pred (str): The predicted label.
        k (int): Defines the number for a top k accuracy. Defaults to 3.

    Returns:
        type: Top k categorical accuracy.

    """
    return K.mean(K.in_top_k(y_pred, K.argmax(y_true, axis=-1), k), axis=-1) 
Example 47
Project: keras-bert-ner   Author: liushaoweihua   File: crf_utils.py    MIT License 5 votes vote down vote up
def _get_accuracy(self, y_true, y_pred, sparse_target=False):
        y_pred = K.argmax(y_pred, -1)
        mask = K.cast(1. - K.one_hot(K.squeeze(K.cast(y_true, "int32"), axis=-1), num_classes=len(self.tag2id))[:, :, self.mask_pos], K.floatx())
        if sparse_target:
            y_true = K.cast(y_true[:, :, 0], K.dtype(y_pred))
        else:
            y_true = K.argmax(y_true, -1)
        judge = K.cast(K.equal(y_pred, y_true), K.floatx())
        if self.mask_pos is None:
            return K.mean(judge)
        else:
            return K.sum(judge * mask) / K.sum(mask) 
Example 48
Project: keras-bert-ner   Author: liushaoweihua   File: crf_utils.py    MIT License 5 votes vote down vote up
def crf_marginal_accuracy(self, y_true, y_pred):
        """Use time-wise marginal argmax as prediction.
        `y_pred` must be an output from CRF with `learn_mode="marginal"`"""
        crf, idx = y_pred._keras_history[:2]
        X = crf._inbound_nodes[idx].input_tensors[0]
        y_pred = crf.get_marginal_prob(X, None)
        return self._get_accuracy(y_true, y_pred, crf.sparse_target) 
Example 49
Project: keras-bert-ner   Author: liushaoweihua   File: crf_utils-checkpoint.py    MIT License 5 votes vote down vote up
def _get_accuracy(self, y_true, y_pred, sparse_target=False):
        y_pred = K.argmax(y_pred, -1)
        mask = K.cast(1. - K.one_hot(K.squeeze(K.cast(y_true, "int32"), axis=-1), num_classes=len(self.tag2id))[:, :, self.mask_pos], K.floatx())
        if sparse_target:
            y_true = K.cast(y_true[:, :, 0], K.dtype(y_pred))
        else:
            y_true = K.argmax(y_true, -1)
        judge = K.cast(K.equal(y_pred, y_true), K.floatx())
        if self.mask_pos is None:
            return K.mean(judge)
        else:
            return K.sum(judge * mask) / K.sum(mask) 
Example 50
Project: keras-bert-ner   Author: liushaoweihua   File: crf_utils-checkpoint.py    MIT License 5 votes vote down vote up
def crf_marginal_accuracy(self, y_true, y_pred):
        """Use time-wise marginal argmax as prediction.
        `y_pred` must be an output from CRF with `learn_mode="marginal"`"""
        crf, idx = y_pred._keras_history[:2]
        X = crf._inbound_nodes[idx].input_tensors[0]
        y_pred = crf.get_marginal_prob(X, None)
        return self._get_accuracy(y_true, y_pred, crf.sparse_target) 
Example 51
Project: Disaster_management_robot   Author: JoelRaymann   File: Postprocessing.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def YoloFilterBoxes(boxConfidence, boxes, boxClassProbs, threshold = .6):
    '''Filters YOLO boxes by thresholding on object and class confidence.
    
    Arguments:
        boxConfidence {tensor} -- tensor of shape (19, 19, 5, 1) showing prob. of whether there is a recognized item
        in the box or not
        boxes {tensor} -- tensor of shape (19, 19, 5, 4) consisting of shape of the box
        boxClassProbs {tensor} -- tensor of shape (19, 19, 5, 80) consisiting of all class prob. in that box

    Keyword Arguments:
        threshold {float} -- real value, if [ highest class probability score < threshold], then get rid of the 
        corresponding box (default: {0.6})

    Returns:
        scores -- tensor of shape (None,), containing the class probability score for selected boxes
        boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes
        classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes

    Dev Note: "None" is here because we don't know the exact number of selected boxes, as it depends on the threshold. 
    For example, the actual output size of scores would be (10,) if there are 10 boxes.    
    '''
    boxScores = boxConfidence * boxClassProbs # Compute Box Scores

    # Find the box_classes using max box_scores, keep track of the corresponding score
    boxClasses = K.argmax(boxScores, axis = -1)
    boxClassScores = K.max(boxScores, axis = -1)

    # Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
    # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
    filteringMask = boxClassScores >= threshold

    # Apply the mask to scores, boxes, and classes
    scores = tf.boolean_mask(boxClassScores, filteringMask)
    boxes = tf.boolean_mask(boxes, filteringMask)
    classes = tf.boolean_mask(boxClasses, filteringMask)

    return scores, boxes, classes 
Example 52
Project: Disaster_management_robot   Author: JoelRaymann   File: keras_yolo.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):
    """Filter YOLO boxes based on object and class confidence."""

    box_scores = box_confidence * box_class_probs
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1)
    prediction_mask = box_class_scores >= threshold

    # TODO: Expose tf.boolean_mask to Keras backend?
    boxes = tf.boolean_mask(boxes, prediction_mask)
    scores = tf.boolean_mask(box_class_scores, prediction_mask)
    classes = tf.boolean_mask(box_classes, prediction_mask)

    return boxes, scores, classes 
Example 53
Project: onnx-keras   Author: leodestiny   File: backend.py    MIT License 5 votes vote down vote up
def handle_arg_max(cls, node, input_dict):
        data = input_dict[node.inputs[0]]
        axis = node.attrs["axis"]
        keepdims = node.attrs.get("keepdims", 1)
        if keepdims == 1:
            warnings.warn("Definition of ArgMax with keepdims enabled is "
                          "incompatible between onnx and keras.",
                          UserWarning)

        return [Lambda(lambda x: K.argmax(x, axis=axis))(data)] 
Example 54
Project: multilabel-nn   Author: cambridgeltl   File: metrics.py    MIT License 5 votes vote down vote up
def tp_tn_fp_fn(y_true, y_pred):
    y_true = K.argmax(y_true, axis=-1)
    y_pred = K.argmax(y_pred, axis=-1)

    correct = K.equal(y_true, y_pred)
    incorrect = K.not_equal(y_true, y_pred)
    pos_pred = K.not_equal(y_pred, K.zeros_like(y_pred))
    neg_pred = K.equal(y_pred, K.zeros_like(y_pred))

    tp = K.sum(K.cast(correct & pos_pred, 'int32'))
    tn = K.sum(K.cast(correct & neg_pred, 'int32'))
    fp = K.sum(K.cast(incorrect & pos_pred, 'int32'))
    fn = K.sum(K.cast(incorrect & neg_pred, 'int32'))

    return tp, tn, fp, fn 
Example 55
Project: applications   Author: geomstats   File: optimizers_test.py    MIT License 5 votes vote down vote up
def _test_no_grad(optimizer):
    inp = Input([3])
    x = Dense(10)(inp)
    x = Lambda(lambda l: 1.0 * K.reshape(K.cast(K.argmax(l), 'float32'), [-1, 1]))(x)
    mod = Model(inp, x)
    mod.compile(optimizer, 'mse')
    with pytest.raises(ValueError):
        mod.fit(np.zeros([10, 3]), np.zeros([10, 1], np.float32), batch_size=10, epochs=10) 
Example 56
Project: show-attend-and-tell-keras   Author: zimmerrol   File: train.py    MIT License 5 votes vote down vote up
def inference(image_features, plot_attention):
    image_features = np.array([image_features])
    state_h, state_c = initial_state_inference_model.predict(image_features)

    caption = [word_index_map["<START>"]]
    attentions = []

    current_word = None
    for t in range(MAXIMUM_CAPTION_LENGTH):
        caption_array = np.array(caption).reshape(1, -1)
        output, state_h, state_c, attention = inference_model.predict([image_features, caption_array, state_h, state_c])
        attentions.append(attention[0, -1].reshape((14, 14)))

        current_word = np.argmax(output[0, -1])
        caption.append(current_word)

        if current_word == word_index_map["<STOP>"]:
            break
    sentence = [index_word_map[i] for i in caption[1:]]

    if plot_attention:
        print(len(attentions))
        x = int(np.sqrt(len(attentions)))
        y = int(np.ceil(len(attentions) / x))
        _, axes = plt.subplots(y, x, sharex="col", sharey="row")
        axes = axes.flatten()
        for i in range(len(attentions)):
            atn = skimage.transform.pyramid_expand(attentions[i], upscale=16, sigma=20)
            axes[i].set_title(sentence[i])
            axes[i].imshow(atn, cmap="gray")

        plt.show()

    return " ".join(sentence) + " ({0})".format(len(caption)-1) 
Example 57
Project: show-attend-and-tell-keras   Author: zimmerrol   File: train.py    MIT License 5 votes vote down vote up
def masked_categorical_crossentropy(y_true, y_pred):
    mask_value = le._word_index_map["<NULL>"]
    y_true_id = K.argmax(y_true)
    mask = K.cast(K.equal(y_true_id, mask_value), K.floatx())
    mask = 1.0 - mask
    loss = K.categorical_crossentropy(y_true, y_pred) * mask

    # take average w.r.t. the number of unmasked entries
    return K.sum(loss) / K.sum(mask) 
Example 58
Project: naacl18-multitask_argument_mining   Author: UKPLab   File: ChainCRF.py    Apache License 2.0 5 votes vote down vote up
def chain_crf_loss(y, x, U, b_start=None, b_end=None, mask=None):
    '''Variant of sparse_chain_crf_loss but with one-hot encoded tags y.'''
    y_sparse = K.argmax(y, -1)
    y_sparse = K.cast(y_sparse, 'int32')
    return sparse_chain_crf_loss(y_sparse, x, U, b_start, b_end, mask) 
Example 59
Project: naacl18-multitask_argument_mining   Author: UKPLab   File: ChainCRF.py    Apache License 2.0 5 votes vote down vote up
def viterbi_decode(x, U, b_start=None, b_end=None, mask=None):
    '''Computes the best tag sequence y for a given input x, i.e. the one that
    maximizes the value of path_energy.'''
    x = add_boundary_energy(x, b_start, b_end, mask)

    alpha_0 = x[:, 0, :]
    gamma_0 = K.zeros_like(alpha_0)
    initial_states = [gamma_0, alpha_0]
    _, gamma = _forward(x,
                        lambda B: [K.cast(K.argmax(B, axis=1), K.floatx()), K.max(B, axis=1)],
                        initial_states,
                        U,
                        mask)
    y = _backward(gamma, mask)
    return y 
Example 60
Project: DLToy   Author: Spground   File: YOLO_detect.py    GNU General Public License v3.0 5 votes vote down vote up
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):
    """Filters YOLO boxes by thresholding on object and class confidence.
    
    Arguments:
    box_confidence -- tensor of shape (19, 19, 5, 1)
    boxes -- tensor of shape (19, 19, 5, 4)
    box_class_probs -- tensor of shape (19, 19, 5, 80)
    threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
    
    Returns:
    scores -- tensor of shape (None,), containing the class probability score for selected boxes
    boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes
    classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes
    
    Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold. 
    For example, the actual output size of scores would be (10,) if there are 10 boxes.
    """
    
    # Step 1: Compute box scores
    box_scores = box_confidence * box_class_probs  #19 * 19 * 5 * 80
    
    # Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score
    box_classes = K.argmax(box_scores, axis = -1)  #19 * 19 * 5 * 1
    box_class_scores = K.max(box_scores, axis = -1) #19 * 19 * 5 * 1
    
    # Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
    # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
    filtering_mask = box_class_scores >=  threshold  #19 * 19 * 5 * 1
    
    # Step 4: Apply the mask to scores, boxes and classes
    scores = tf.boolean_mask(box_class_scores, filtering_mask) # ? * 1
    boxes = tf.boolean_mask(boxes, filtering_mask)  # ?  * 4
    classes = tf.boolean_mask(box_classes, filtering_mask) # ? * 1
    
    return scores, boxes, classes 
Example 61
Project: DLToy   Author: Spground   File: keras_yolo.py    GNU General Public License v3.0 5 votes vote down vote up
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):
    """Filter YOLO boxes based on object and class confidence."""

    box_scores = box_confidence * box_class_probs
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1)
    prediction_mask = box_class_scores >= threshold

    # TODO: Expose tf.boolean_mask to Keras backend?
    boxes = tf.boolean_mask(boxes, prediction_mask)
    scores = tf.boolean_mask(box_class_scores, prediction_mask)
    classes = tf.boolean_mask(box_classes, prediction_mask)

    return boxes, scores, classes 
Example 62
Project: kpi2017   Author: deepmipt   File: layers.py    Apache License 2.0 5 votes vote down vote up
def answer_end_pred(context_encoding, question_attention_vector, context_mask, answer_start_distribution, W, dropout_rate):
    """Answer end prediction layer."""

    # Answer end prediction depends on the start prediction
    def s_answer_feature(x):
        maxind = K.argmax(
            x,
            axis=1,
        )
        return maxind

    x = Lambda(lambda x: K.tf.cast(s_answer_feature(x), dtype=K.tf.int32))(answer_start_distribution)
    start_feature = Lambda(lambda arg: K.tf.gather_nd(arg[0], K.tf.stack(
        [tf.range(K.tf.shape(arg[1])[0]), tf.cast(arg[1], K.tf.int32)], axis=1)))([context_encoding, x])

    start_feature = Lambda(lambda q: repeat_vector(q[0], q[1]))([start_feature, context_encoding])

    # Answer end prediction
    answer_end = Lambda(lambda arg: concatenate([
        arg[0],
        arg[1],
        arg[2],
        multiply([arg[0], arg[1]]),
        multiply([arg[0], arg[2]])
    ]))([context_encoding, question_attention_vector, start_feature])

    answer_end = TimeDistributed(Dense(W, activation='relu'))(answer_end)
    answer_end = Dropout(rate=dropout_rate)(answer_end)
    answer_end = TimeDistributed(Dense(1))(answer_end)

    # apply masking
    answer_end = Lambda(lambda q: masked_softmax(q[0], q[1]))([answer_end, context_mask])
    answer_end = Lambda(lambda q: flatten(q))(answer_end)
    return answer_end 
Example 63
Project: deep-mlsa   Author: spinningbytes   File: evaluation_metrics_tf.py    Apache License 2.0 5 votes vote down vote up
def f1_score_semeval(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # indices_x = K.arange(start=0, stop=y_true.get_shape()[0])
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true, name='get_indicec_x_shape')[0], dtype='int64'),
                              dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / pred_cnt,
                          name='precision_f1_semeval')

    # recall for each class
    recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / gold_cnt,
                       name='racall_f1_semeval')

    # f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred),
                         2 * (precision * recall) / (precision + recall), name='precision_f1_semeval')

    # return average f1 score over all classes
    return (f1_class[0] + f1_class[2]) / 2.0 
Example 64
Project: deep-mlsa   Author: spinningbytes   File: evaluation_metrics_tf.py    Apache License 2.0 5 votes vote down vote up
def f1_score_task3(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true, name='get_indicec_x_shape')[0], dtype='int64'),
                              dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / pred_cnt,
                          name='precision_f1_semeval')

    # recall for each class
    recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / gold_cnt,
                       name='racall_f1_semeval')

    # f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred),
                         2 * (precision * recall) / (precision + recall), name='precision_f1_semeval')

    # return average f1 score over all classes
    return f1_class[1] 
Example 65
Project: deep-mlsa   Author: spinningbytes   File: evaluation_metrics.py    Apache License 2.0 5 votes vote down vote up
def f1_score_keras(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # indices_x = K.arange(start=0, stop=y_true.get_shape()[0])
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true)[0], dtype='int64'), dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / pred_cnt,
                          name='precision_f1_semeval')

    # recall for each class
    recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / gold_cnt,
                       name='racall_f1_semeval')

    # f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred),
                         2 * (precision * recall) / (precision + recall), name='precision_f1_semeval')

    # return average f1 score over all classes
    return K.mean(f1_class) 
Example 66
Project: deep-mlsa   Author: spinningbytes   File: evaluation_metrics.py    Apache License 2.0 5 votes vote down vote up
def f1_score_semeval(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    #y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #indices_x = K.arange(start=0, stop=y_true.get_shape()[0])
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true, name='get_indicec_x_shape')[0], dtype='int64'), dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    #precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred/pred_cnt, name='precision_f1_semeval')

    #recall for each class
    recall = tf.select(K.equal(gold_cnt, 0),  K.zeros_like(y_true_pred),  y_true_pred/gold_cnt, name='racall_f1_semeval')

    #f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0),  K.zeros_like(y_true_pred),  2*(precision*recall)/(precision+recall), name='precision_f1_semeval')

    #return average f1 score over all classes
    return (f1_class[0] + f1_class[2])/2.0 
Example 67
Project: deep-mlsa   Author: spinningbytes   File: evaluation_metrics.py    Apache License 2.0 5 votes vote down vote up
def f1_score_task3(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)
    indices_x = K.arange(y_true.shape[0])
    indices_y = K.argmax(y_pred, axis=-1)
    indices = K.concatenate(indices_x, indices_y)
    values = K.ones_like(indices_x)
    shape = y_pred_ones.shape
    delta = tf.SparseTensor(indices, values, shape)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # recall for each class
    recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred / gold_cnt)

    # f1 for each class
    f1_class = K.switch(K.equal(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))

    # return average f1 score over all classes
    return f1_class[1] 
Example 68
Project: Google-Landmark-Recognition-Retrieval-2019   Author: mayukh18   File: metrics.py    MIT License 5 votes vote down vote up
def accuracy_class(y_true, y_pred):
    true = K.argmax(y_true, axis=1)
    pred = K.argmax(y_pred, axis=1)
    matches = K.equal(true, pred)
    return K.mean(matches) 
Example 69
Project: Google-Landmark-Recognition-Retrieval-2019   Author: mayukh18   File: metrics.py    MIT License 5 votes vote down vote up
def accuracy_class_numpy(y_true, y_pred):
    true = np.argmax(y_true, axis=1)
    pred = np.argmax(y_pred, axis=1)
    matches = true == pred
    return np.mean(matches) 
Example 70
Project: Google-Landmark-Recognition-Retrieval-2019   Author: mayukh18   File: metrics.py    MIT License 5 votes vote down vote up
def MAP_numpy(y_true, y_pred):
    true = np.argmax(y_true, axis=1)
    pred = np.argmax(y_pred, axis=1)
    matches = true == pred

    order = getOrder(y_pred)
    orderedMatches = matches[order]

    correct = 0.
    summ = 0.
    for i in range(y_true.shape[0]):
        correct += int(orderedMatches[i])
        summ += (correct / (i + 1)) * int(orderedMatches[i])
    return summ / y_true.shape[0] 
Example 71
Project: Google-Landmark-Recognition-Retrieval-2019   Author: mayukh18   File: metrics.py    MIT License 5 votes vote down vote up
def validateMAP(model, valid_x, valid_y):
    """
    :param model: the model to use
    :param valid_x: numpy array of validation images
    :param valid_y: list of landmarks of the validation images
    :return:
    """
    N = valid_x.shape[0]
    batchsize = 1000
    conf_list = []
    y_pred_list = []
    validM = N // batchsize + int(N % batchsize > 0)
    for i in range(validM):
        preds = model.predict(valid_x[i * batchsize:min(N, (i + 1) * batchsize), :, :, :])
        conf = list(np.amax(preds, axis=1))
        conf_list.extend(conf)
        y_pred = list(np.argmax(preds, axis=1))
        y_pred_list.extend(y_pred)

    matches = list(np.array(y_pred_list) == np.array(valid_y))

    order = list(np.argsort(conf_list)[::-1])
    orderedMatches = [matches[o] for o in order]

    correct = 0.
    summ = 0.
    for i in range(len(orderedMatches)):
        correct += int(orderedMatches[i])
        summ += (correct / (i + 1)) * int(orderedMatches[i])

    print(np.sum(matches))
    print(correct)
    print(summ / len(orderedMatches)) 
Example 72
Project: deep-learning-explorer   Author: waspinator   File: metrics.py    Apache License 2.0 5 votes vote down vote up
def sparse_accuracy_ignoring_last_label(y_true, y_pred):
    nb_classes = KB.int_shape(y_pred)[-1]
    y_pred = KB.reshape(y_pred, (-1, nb_classes))

    y_true = KB.one_hot(tf.to_int32(KB.flatten(y_true)),
                        nb_classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[-1], tf.bool)
    y_true = tf.stack(unpacked[:-1], axis=-1)

    return KB.sum(tf.to_float(legal_labels & KB.equal(KB.argmax(y_true, axis=-1), KB.argmax(y_pred, axis=-1)))) / KB.sum(tf.to_float(legal_labels)) 
Example 73
Project: DeepLearning-OCR   Author: xingjian-f   File: util.py    Apache License 2.0 5 votes vote down vote up
def one_hot_decoder(data, whole_set):
	ret = []
	if data.ndim == 1: # keras bug ?
		data = np.expand_dims(data, 0)
	for probs in data:
		idx = np.argmax(probs)
		# print idx, whole_set[idx], probs[idx]
		ret.append(whole_set[idx])
	return ret 
Example 74
Project: DeepLearning-OCR   Author: xingjian-f   File: util.py    Apache License 2.0 5 votes vote down vote up
def top_one_prob(data):
	ret = []
	if data.ndim == 1: # keras bug ?
		data = np.expand_dims(data, 0)
	for probs in data:
		idx = np.argmax(probs)
		ret.append(probs[idx])
	return ret 
Example 75
Project: DeepLearning-OCR   Author: xingjian-f   File: util.py    Apache License 2.0 5 votes vote down vote up
def categorical_accuracy_per_sequence(y_true, y_pred):
	return K.mean(K.min(K.equal(K.argmax(y_true, axis=-1),
				  K.argmax(y_pred, axis=-1)), axis=-1)) 
Example 76
Project: keras-contrib   Author: keras-team   File: crf_accuracies.py    MIT License 5 votes vote down vote up
def _get_accuracy(y_true, y_pred, mask, sparse_target=False):
    y_pred = K.argmax(y_pred, -1)
    if sparse_target:
        y_true = K.cast(y_true[:, :, 0], K.dtype(y_pred))
    else:
        y_true = K.argmax(y_true, -1)
    judge = K.cast(K.equal(y_pred, y_true), K.floatx())
    if mask is None:
        return K.mean(judge)
    else:
        mask = K.cast(mask, K.floatx())
        return K.sum(judge * mask) / K.sum(mask) 
Example 77
Project: keras-contrib   Author: keras-team   File: crf_accuracies.py    MIT License 5 votes vote down vote up
def crf_marginal_accuracy(y_true, y_pred):
    '''Use time-wise marginal argmax as prediction.
    `y_pred` must be an output from CRF with `learn_mode="marginal"`.'''
    crf, idx = y_pred._keras_history[:2]
    X = crf._inbound_nodes[idx].input_tensors[0]
    mask = crf._inbound_nodes[idx].input_masks[0]
    y_pred = crf.get_marginal_prob(X, mask)
    return _get_accuracy(y_true, y_pred, mask, crf.sparse_target) 
Example 78
Project: Fundus_Lesion2018   Author: foamliu   File: utils.py    MIT License 5 votes vote down vote up
def categorical_crossentropy_with_class_rebal(y_true, y_pred):
    y_true = K.reshape(y_true, (-1, num_classes))
    y_pred = K.reshape(y_pred, (-1, num_classes))

    idx_max = K.argmax(y_true, axis=1)
    weights = K.gather(prior_factor, idx_max)
    weights = K.reshape(weights, (-1, 1))

    # multiply y_true by weights
    y_true = y_true * weights

    cross_ent = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true, logits=y_pred)
    cross_ent = K.mean(cross_ent, axis=-1)

    return cross_ent 
Example 79
Project: Fundus_Lesion2018   Author: foamliu   File: utils.py    MIT License 5 votes vote down vote up
def get_best_model():
    import re
    pattern = 'model.(?P<epoch>\d+)-(?P<val_loss>[0-9]*\.?[0-9]*).hdf5'
    p = re.compile(pattern)
    files = [f for f in os.listdir('models/') if p.match(f)]
    filename = None
    if len(files) > 0:
        accs = [float(p.match(f).groups()[1]) for f in files]
        best_index = int(np.argmax(accs))
        filename = os.path.join('models', files[best_index])
    print('loading best model: {}'.format(filename))
    return filename 
Example 80
Project: digitx   Author: PowerOfDream   File: digitx_model.py    MIT License 5 votes vote down vote up
def test(model, example_num, save_error_image):
    '''
    test this model
    '''

    X_test, Y_test = cg.gen_batch_examples(example_num, 32, "./font/")
    X_test /= 255.0
    X_test = np.reshape(X_test, (example_num, 32, 32, 1))
    #ohY_test = to_categorical(Y_test, num_classes = 11)

    Y_predict = model.predict(X_test, batch_size = 512)
    Y_predict = argmax(Y_predict, axis = 1)
    
    import tensorflow as tf
    sess = tf.Session()
    result = Y_predict.eval(session = sess)
    sess.close()

    t = Y_test.reshape((example_num,)).astype(int)
    succ = np.sum((t == result))
    print("Accuracy = " + str(succ / len(Y_test)))

    if (save_error_image):
        import os
        if not os.path.exists("./tmp"):
            os.makedirs("./tmp")
        for i in range(len(Y_test)):
            if (Y_test[i] != result[i]):
                fileName = "./tmp/" + str(int(Y_test[i, 0])) + "-" + str(int(result[i])) + "-" + str(i) + ".png"
                pix = X_test[i,:,:,:]
                pix = (pix.reshape(32, 32) * 255.0).astype(np.uint8)

                from PIL import Image
                Image.fromarray(pix, 'L').save(fileName)

    #fea_maps = get_feature_maps(model, X_test)
    #return fea_maps, Y_test