Python keras.backend.clip() Examples

The following are 30 code examples of keras.backend.clip(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: training.py    From aiexamples with Apache License 2.0 6 votes vote down vote up
def softmax_loss(y_true, y_pred):
    """Compute cross entropy loss aka softmax loss.

    # Arguments
        y_true: Ground truth targets,
            tensor of shape (?, num_boxes, num_classes).
        y_pred: Predicted logits,
            tensor of shape (?, num_boxes, num_classes).

    # Returns
        softmax_loss: Softmax loss, tensor of shape (?, num_boxes).
    """
    eps = K.epsilon()
    y_pred = K.clip(y_pred, eps, 1. - eps)
    softmax_loss = -tf.reduce_sum(y_true * tf.log(y_pred), axis=-1)
    return softmax_loss 
Example #2
Source File: fgs.py    From blackbox-attacks with MIT License 6 votes vote down vote up
def symbolic_fg(x, grad, eps=0.3, clipping=True):
    """
    FG attack
    """
    # Unit vector in direction of gradient
    reduc_ind = list(xrange(1, len(x.get_shape())))
    normed_grad = grad / tf.sqrt(tf.reduce_sum(tf.square(grad),
                                                   reduction_indices=reduc_ind,
                                                   keep_dims=True))
    # Multiply by constant epsilon
    scaled_grad = eps * normed_grad

    # Add perturbation to original example to obtain adversarial example
    adv_x = K.stop_gradient(x + scaled_grad)

    if clipping:
        adv_x = K.clip(adv_x, 0, 1)

    return adv_x 
Example #3
Source File: fgs.py    From blackbox-attacks with MIT License 6 votes vote down vote up
def iter_fgs(model, x, y, steps, alpha, eps, clipping=True):
    """
    I-FGSM attack.
    """

    adv_x = x
    # iteratively apply the FGSM with small step size
    for i in range(steps):
        logits = model(adv_x)
        grad = gen_grad(adv_x, logits, y)

        adv_x = symbolic_fgs(adv_x, grad, alpha, True)
        r = adv_x - x
        r = K.clip(r, -eps, eps)
        adv_x = x+r

    if clipping:
        adv_x = K.clip(adv_x, 0, 1)


    return adv_x 
Example #4
Source File: neural_networks.py    From Quora with MIT License 6 votes vote down vote up
def recall_score(y_true, y_proba, thres=THRES):
    """
    Recall metric

    Only computes a batch-wise average of recall

    Computes the recall, a metric for multi-label classification of
    how many relevant items are selected
    """
    # get prediction
    y_pred = K.cast(K.greater(y_proba, thres), dtype='float32')
    # calc
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall 
Example #5
Source File: inception_resnet_v2_finetune.py    From bird_species_classification with MIT License 6 votes vote down vote up
def f1(y_true, y_pred):
    def precision(y_true, y_pred):
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision

    def recall(y_true, y_pred):
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall

    precision = precision(y_true, y_pred)
    recall = recall(y_true, y_pred)
    return 2 * ((precision * recall) / (precision + recall + K.epsilon()))


# Inception_ResNet_V2 model define 
Example #6
Source File: fgs.py    From blackbox-attacks with MIT License 6 votes vote down vote up
def symbolic_fgs(x, grad, eps=0.3, clipping=True):
    """
    FGSM attack.
    """

    # signed gradient
    normed_grad = K.sign(grad)

    # Multiply by constant epsilon
    scaled_grad = eps * normed_grad

    # Add perturbation to original example to obtain adversarial example
    adv_x = K.stop_gradient(x + scaled_grad)

    if clipping:
        adv_x = K.clip(adv_x, 0, 1)
    return adv_x 
Example #7
Source File: neural_networks.py    From Quora with MIT License 6 votes vote down vote up
def precision_score(y_true, y_proba, thres=THRES):
    """
    Precision metric

    Only computes a batch-wise average of precision

    Computes the precision, a metric for multi-label classification of
    how many selected items are relevant
    """
    # get prediction
    y_pred = K.cast(K.greater(y_proba, thres), dtype='float32')
    # calc
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision 
Example #8
Source File: breakout_dqn.py    From reinforcement-learning with MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network 
Example #9
Source File: breakout_dueling_ddqn.py    From reinforcement-learning with MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None, ), dtype='int32')
        y = K.placeholder(shape=(None, ), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network
    # dueling network's Q Value is sum of advantages and state value 
Example #10
Source File: training.py    From aiexamples with Apache License 2.0 6 votes vote down vote up
def focal_loss(y_true, y_pred, gamma=2, alpha=0.25):
    """Compute focal loss.
    
    # Arguments
        y_true: Ground truth targets,
            tensor of shape (?, num_boxes, num_classes).
        y_pred: Predicted logits,
            tensor of shape (?, num_boxes, num_classes).
    
    # Returns
        focal_loss: Focal loss, tensor of shape (?, num_boxes).

    # References
        https://arxiv.org/abs/1708.02002
    """
    #y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
    eps = K.epsilon()
    y_pred = K.clip(y_pred, eps, 1. - eps)
    
    pt = tf.where(tf.equal(y_true, 1), y_pred, 1 - y_pred)
    focal_loss = -tf.reduce_sum(alpha * K.pow(1. - pt, gamma) * K.log(pt), axis=-1)
    return focal_loss 
Example #11
Source File: keras_metrics.py    From vehicle-ReId with Apache License 2.0 6 votes vote down vote up
def matthews(y_true, y_pred):

    from keras import backend as K
    y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    y_pred_neg = 1 - y_pred_pos

    y_pos = K.round(K.clip(y_true, 0, 1))
    y_neg = 1 - y_pos

    tp = K.sum(y_pos * y_pred_pos)
    tn = K.sum(y_neg * y_pred_neg)

    fp = K.sum(y_neg * y_pred_pos)
    fn = K.sum(y_pos * y_pred_neg)

    numerator = (tp * tn - fp * fn)
    denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))

    return numerator / (denominator + K.epsilon()) 
Example #12
Source File: wtte.py    From wtte-rnn with MIT License 6 votes vote down vote up
def loss_function(self, y_true, y_pred):

        y, u, a, b = _keras_split(y_true, y_pred)
        if self.kind == 'discrete':
            loglikelihoods = loglik_discrete(y, u, a, b)
        elif self.kind == 'continuous':
            loglikelihoods = loglik_continuous(y, u, a, b)

        if self.clip_prob is not None:
            loglikelihoods = K.clip(loglikelihoods, 
                log(self.clip_prob), log(1 - self.clip_prob))
        if self.reduce_loss:
            loss = -1.0 * K.mean(loglikelihoods, axis=-1)
        else:
            loss = -loglikelihoods

        return loss

# For backwards-compatibility 
Example #13
Source File: data_utils.py    From RecurrentGaze with MIT License 6 votes vote down vote up
def angle_error(gt, pred):
    """
    Average angular error computed by cosine difference
    :param gt: list of ground truth label
    :param pred: list of predicted label
    :return: Average angular error
    """
    vec_gt = angles2vector(gt)
    vec_pred = angles2vector(pred)

    x = K.np.multiply(vec_gt[:, 0], vec_pred[:, 0])
    y = K.np.multiply(vec_gt[:, 1], vec_pred[:, 1])
    z = K.np.multiply(vec_gt[:, 2], vec_pred[:, 2])

    dif = K.np.sum([x, y, z], axis=0) / (tf.norm(vec_gt, axis=1) * tf.norm(vec_pred, axis=1))

    clipped_dif = K.clip(dif, np.float(-1.0), np.float(1.0))
    loss = (tf.acos(clipped_dif) * 180) / np.pi
    return K.mean(loss, axis=-1) 
Example #14
Source File: data_utils.py    From RecurrentGaze with MIT License 6 votes vote down vote up
def numpy_angle_error(gt, pred):
    """
    Numpy version of angle_error. Average angular error computed by cosine difference
    :param gt: list of ground truth label
    :param pred: list of predicted label
    :return: Average angular error
    """
    vec_gt = numpy_angles2vector(gt)
    vec_pred = numpy_angles2vector(pred)

    x = np.multiply(vec_gt[:, 0], vec_pred[:, 0])
    y = np.multiply(vec_gt[:, 1], vec_pred[:, 1])
    z = np.multiply(vec_gt[:, 2], vec_pred[:, 2])

    dif = np.sum([x, y, z], axis=0) / (np.linalg.norm(vec_gt, axis=1) * np.linalg.norm(vec_pred, axis=1))

    clipped_dif = np.clip(dif, np.float(-1.0), np.float(1.0))
    loss = (np.arccos(clipped_dif) * 180) / np.pi
    return np.mean(loss, axis=-1) 
Example #15
Source File: losses.py    From icassp19 with MIT License 6 votes vote down vote up
def crossentropy_reed_wrap(_beta):
    def crossentropy_reed_core(y_true, y_pred):
        """
        This loss function is proposed in:
        Reed et al. "Training Deep Neural Networks on Noisy Labels with Bootstrapping", 2014

        :param y_true:
        :param y_pred:
        :return:
        """

        # hyper param
        print(_beta)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # (1) dynamically update the targets based on the current state of the model: bootstrapped target tensor
        # use predicted class proba directly to generate regression targets
        y_true_update = _beta * y_true + (1 - _beta) * y_pred

        # (2) compute loss as always
        _loss = -K.sum(y_true_update * K.log(y_pred), axis=-1)

        return _loss
    return crossentropy_reed_core 
Example #16
Source File: tf.py    From CSBDeep with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def tf_normalize_layer(layer, pmin=1, pmax=99.8, clip=True):
    def norm(x,axis):
        return tf_normalize(x, pmin=pmin, pmax=pmax, axis=axis, clip=clip)

    shape = K.int_shape(layer)
    n_channels_out = shape[-1]
    n_dim_out = len(shape)

    if n_dim_out > 4:
        layer = Lambda(lambda x: K.max(x, axis=tuple(range(1,1+n_dim_out-4))))(layer)

    assert 0 < n_channels_out

    if n_channels_out == 1:
        out = Lambda(lambda x: norm(x, axis=(1,2)))(layer)
    elif n_channels_out == 2:
        out = Lambda(lambda x: norm(K.concatenate([x,x[...,:1]], axis=-1), axis=(1,2,3)))(layer)
    elif n_channels_out == 3:
        out = Lambda(lambda x: norm(x, axis=(1,2,3)))(layer)
    else:
        out = Lambda(lambda x: norm(K.max(x, axis=-1, keepdims=True), axis=(1,2,3)))(layer)
    return out 
Example #17
Source File: TrajectoryTools.py    From TrajLib with Apache License 2.0 6 votes vote down vote up
def f1(y_true, y_pred):
    def recall(y_true, y_pred):
        # Recall metric.
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall

    def precision(y_true, y_pred):
        # Precision metric.
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision

    precision = precision(y_true, y_pred)
    recall = recall(y_true, y_pred)
    return 2 * ((precision * recall) / (precision + recall)) 
Example #18
Source File: utils.py    From Sarcasm-Detection with MIT License 6 votes vote down vote up
def f1_score(y_true, y_pred):
    # Recall metric. Only computes a batch-wise average of recall,
    # a metric for multi-label classification of how many relevant items are selected.
    def recall(y_true, y_pred):
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall

    # Precision metric. Only computes a batch-wise average of precision,
    # a metric for multi-label classification of how many selected items are relevant.
    def precision(y_true, y_pred):
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision

    precision = precision(y_true, y_pred)
    recall = recall(y_true, y_pred)
    return 2 * ((precision*recall) / (precision+recall))


# This code allows you to see the mislabelled examples 
Example #19
Source File: keras_metrics.py    From talos with MIT License 6 votes vote down vote up
def matthews(y_true, y_pred):

    from keras import backend as K
    y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    y_pred_neg = 1 - y_pred_pos

    y_pos = K.round(K.clip(y_true, 0, 1))
    y_neg = 1 - y_pos

    tp = K.sum(y_pos * y_pred_pos)
    tn = K.sum(y_neg * y_pred_neg)

    fp = K.sum(y_neg * y_pred_pos)
    fn = K.sum(y_pos * y_pred_neg)

    numerator = (tp * tn - fp * fn)
    denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))

    return numerator / (denominator + K.epsilon()) 
Example #20
Source File: losses.py    From UnDeepVO with MIT License 6 votes vote down vote up
def ssim(x, y):
    c1 = 0.01 ** 2

    c2 = 0.03 ** 2

    mu_x = K.mean(x, axis=-1)

    mu_y = K.mean(y, axis=-1)

    sigma_x = K.mean(x ** 2, axis=-1) - mu_x ** 2

    sigma_y = K.mean(y ** 2, axis=-1) - mu_y ** 2

    sigma_xy = K.mean(x * y, axis=-1) - mu_x * mu_y

    ssim_n = (2 * mu_x * mu_y + c1) * (2 * sigma_xy + c2)

    ssim_d = (mu_x ** 2 + mu_y ** 2 + c1) * (sigma_x + sigma_y + c2)

    ssim_out = ssim_n / ssim_d

    return K.clip((1 - ssim_out) / 2, 0, 1) 
Example #21
Source File: punctuator.py    From keras-punctuator with MIT License 5 votes vote down vote up
def fbeta_score(y_true, y_pred, beta=1):
    """Computes the F score.

    The F score is the weighted harmonic mean of precision and recall.
    Here it is only computed as a batch-wise average, not globally.

    This is useful for multi-label classification, where input samples can be
    classified as sets of labels. By only using accuracy (precision) a model
    would achieve a perfect score by simply assigning every class to every
    input. In order to avoid this, a metric should penalize incorrect class
    assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
    computes this, as a weighted mean of the proportion of correct class
    assignments vs. the proportion of incorrect class assignments.

    With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
    correct classes becomes more important, and with beta > 1 the metric is
    instead weighted towards penalizing incorrect class assignments.
    """
    if beta < 0:
        raise ValueError('The lowest choosable beta is zero (only precision).')

    # If there are no true positives, fix the F score at 0 like sklearn.
    if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
        return 0

    p = precision(y_true, y_pred)
    r = recall(y_true, y_pred)
    bb = beta ** 2
    fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
    return fbeta_score 
Example #22
Source File: keras_metrics.py    From talos with MIT License 5 votes vote down vote up
def msle(y_true, y_pred):
    from keras import backend as K
    first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
    second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
    return K.mean(K.square(first_log - second_log), axis=-1) 
Example #23
Source File: keras_metrics.py    From vehicle-ReId with Apache License 2.0 5 votes vote down vote up
def mape(y_true, y_pred):
    from keras import backend as K
    diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true),
                                            K.epsilon(),
                                            None))
    return 100. * K.mean(diff, axis=-1) 
Example #24
Source File: punctuator.py    From keras-punctuator with MIT License 5 votes vote down vote up
def recall(y_true, y_pred):
    """Recall metric.

    Only computes a batch-wise average of recall.

    Computes the recall, a metric for multi-label classification of
    how many relevant items are selected.
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall 
Example #25
Source File: punctuator.py    From keras-punctuator with MIT License 5 votes vote down vote up
def precision(y_true, y_pred):
    """Precision metric.

    Only computes a batch-wise average of precision.

    Computes the precision, a metric for multi-label classification of
    how many selected items are relevant.
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision 
Example #26
Source File: keras_metrics.py    From talos with MIT License 5 votes vote down vote up
def rmsle(y_true, y_pred):
    from keras import backend as K
    first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
    second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
    return K.sqrt(K.mean(K.square(first_log - second_log), axis=-1)) 
Example #27
Source File: temporal_mean_rate_theano.py    From snn_toolbox with MIT License 5 votes vote down vote up
def get_new_mem(self):
        """Add input to membrane potential."""

        # Destroy impulse if in refractory period
        masked_impulse = self.impulse if self.tau_refrac == 0 else \
            k.T.set_subtensor(
                self.impulse[k.T.nonzero(self.refrac_until > self.time)], 0.)

        # Add impulse
        if clamp_var:
            # Experimental: Clamp the membrane potential to zero until the
            # presynaptic neurons fire at their steady-state rates. This helps
            # avoid a transient response.
            new_mem = theano.ifelse.ifelse(
                k.less(k.mean(self.var), 1e-4) +
                k.greater(self.time, self.duration / 2),
                self.mem + masked_impulse, self.mem)
        elif hasattr(self, 'clamp_idx'):
            # Set clamp-duration by a specific delay from layer to layer.
            new_mem = theano.ifelse.ifelse(k.less(self.time, self.clamp_idx),
                                           self.mem, self.mem + masked_impulse)
        elif v_clip:
            # Clip membrane potential to prevent too strong accumulation.
            new_mem = k.clip(self.mem + masked_impulse, -3, 3)
        else:
            new_mem = self.mem + masked_impulse

        if self.config.getboolean('cell', 'leak'):
            # Todo: Implement more flexible version of leak!
            new_mem = k.T.inc_subtensor(
                new_mem[k.T.nonzero(k.T.gt(new_mem, 0))], -0.1 * self.dt)

        return new_mem 
Example #28
Source File: fastfilter_utilities.py    From ASKCOS with Mozilla Public License 2.0 5 votes vote down vote up
def real_pos(y_true, y_pred):
	real_pos_ct = K.sum(gt((K.clip(y_true, 0, 1)),0.5))
	return real_pos_ct 
Example #29
Source File: keras_metrics.py    From talos with MIT License 5 votes vote down vote up
def precision(y_true, y_pred):

    from keras import backend as K
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision 
Example #30
Source File: fastfilter_utilities.py    From ASKCOS with Mozilla Public License 2.0 5 votes vote down vote up
def true_pos(y_true, y_pred):
	true_pos_ct = K.sum(gt((K.clip(y_pred*y_true, 0, 1)),0.5))
	return true_pos_ct