Python keras.backend.clip() Examples

The following are code examples for showing how to use keras.backend.clip(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: blackbox-attacks   Author: sunblaze-ucb   File: fgs.py    MIT License 6 votes vote down vote up
def symbolic_fgs(x, grad, eps=0.3, clipping=True):
    """
    FGSM attack.
    """

    # signed gradient
    normed_grad = K.sign(grad)

    # Multiply by constant epsilon
    scaled_grad = eps * normed_grad

    # Add perturbation to original example to obtain adversarial example
    adv_x = K.stop_gradient(x + scaled_grad)

    if clipping:
        adv_x = K.clip(adv_x, 0, 1)
    return adv_x 
Example 2
Project: blackbox-attacks   Author: sunblaze-ucb   File: fgs.py    MIT License 6 votes vote down vote up
def symbolic_fg(x, grad, eps=0.3, clipping=True):
    """
    FG attack
    """
    # Unit vector in direction of gradient
    reduc_ind = list(xrange(1, len(x.get_shape())))
    normed_grad = grad / tf.sqrt(tf.reduce_sum(tf.square(grad),
                                                   reduction_indices=reduc_ind,
                                                   keep_dims=True))
    # Multiply by constant epsilon
    scaled_grad = eps * normed_grad

    # Add perturbation to original example to obtain adversarial example
    adv_x = K.stop_gradient(x + scaled_grad)

    if clipping:
        adv_x = K.clip(adv_x, 0, 1)

    return adv_x 
Example 3
Project: blackbox-attacks   Author: sunblaze-ucb   File: fgs.py    MIT License 6 votes vote down vote up
def iter_fgs(model, x, y, steps, alpha, eps, clipping=True):
    """
    I-FGSM attack.
    """

    adv_x = x
    # iteratively apply the FGSM with small step size
    for i in range(steps):
        logits = model(adv_x)
        grad = gen_grad(adv_x, logits, y)

        adv_x = symbolic_fgs(adv_x, grad, alpha, True)
        r = adv_x - x
        r = K.clip(r, -eps, eps)
        adv_x = x+r

    if clipping:
        adv_x = K.clip(adv_x, 0, 1)


    return adv_x 
Example 4
Project: cbc_networks   Author: saralajew   File: reasoning_layers.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def build(self, input_shape):
        self.input_spec = InputSpec(shape=(None,) + tuple(input_shape[1:]))

        # encoded trainable tensors
        self.reasoning_probabilities = self.add_weight(
            shape=(2,
                   self.n_replicas,
                   input_shape[-1],
                   self.n_classes),
            initializer=self.reasoning_initializer,
            regularizer=self.reasoning_regularizer,
            constraint=lambda x: K.clip(x, 0., 1.),
            name='reasoning_probabilities')

        if self.use_component_probabilities:
            self.component_probabilities = self.add_weight(
                shape=(1, input_shape[-1], 1),
                initializer=self.component_probabilities_initializer,
                regularizer=self.component_probabilities_regularizer,
                constraint=self.component_probabilities_constraint,
                name='component_probabilities')

        self.built = True 
Example 5
Project: reinforcement-learning-kr   Author: rlcode   File: breakout_dqn.py    MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        prediction = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(prediction * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # 상태가 입력, 큐함수가 출력인 인공신경망 생성 
Example 6
Project: reinforcement-learning   Author: buyizhiyou   File: breakout_ddqn.py    MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None, ), dtype='int32')
        y = K.placeholder(shape=(None, ), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network 
Example 7
Project: reinforcement-learning   Author: buyizhiyou   File: breakout_dqn.py    MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network 
Example 8
Project: reinforcement-learning   Author: buyizhiyou   File: breakout_dueling_ddqn.py    MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None, ), dtype='int32')
        y = K.placeholder(shape=(None, ), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network
    # dueling network's Q Value is sum of advantages and state value 
Example 9
Project: reinforcement-learning   Author: rlcode   File: breakout_ddqn.py    MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None, ), dtype='int32')
        y = K.placeholder(shape=(None, ), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network 
Example 10
Project: reinforcement-learning   Author: rlcode   File: breakout_dqn.py    MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network 
Example 11
Project: reinforcement-learning   Author: rlcode   File: breakout_dueling_ddqn.py    MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None, ), dtype='int32')
        y = K.placeholder(shape=(None, ), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network
    # dueling network's Q Value is sum of advantages and state value 
Example 12
Project: wtte-rnn   Author: ragulpr   File: wtte.py    MIT License 6 votes vote down vote up
def loss_function(self, y_true, y_pred):

        y, u, a, b = _keras_split(y_true, y_pred)
        if self.kind == 'discrete':
            loglikelihoods = loglik_discrete(y, u, a, b)
        elif self.kind == 'continuous':
            loglikelihoods = loglik_continuous(y, u, a, b)

        if self.clip_prob is not None:
            loglikelihoods = K.clip(loglikelihoods, 
                log(self.clip_prob), log(1 - self.clip_prob))
        if self.reduce_loss:
            loss = -1.0 * K.mean(loglikelihoods, axis=-1)
        else:
            loss = -loglikelihoods

        return loss

# For backwards-compatibility 
Example 13
Project: RecurrentGaze   Author: crisie   File: data_utils.py    MIT License 6 votes vote down vote up
def angle_error(gt, pred):
    """
    Average angular error computed by cosine difference
    :param gt: list of ground truth label
    :param pred: list of predicted label
    :return: Average angular error
    """
    vec_gt = angles2vector(gt)
    vec_pred = angles2vector(pred)

    x = K.np.multiply(vec_gt[:, 0], vec_pred[:, 0])
    y = K.np.multiply(vec_gt[:, 1], vec_pred[:, 1])
    z = K.np.multiply(vec_gt[:, 2], vec_pred[:, 2])

    dif = K.np.sum([x, y, z], axis=0) / (tf.norm(vec_gt, axis=1) * tf.norm(vec_pred, axis=1))

    clipped_dif = K.clip(dif, np.float(-1.0), np.float(1.0))
    loss = (tf.acos(clipped_dif) * 180) / np.pi
    return K.mean(loss, axis=-1) 
Example 14
Project: RecurrentGaze   Author: crisie   File: data_utils.py    MIT License 6 votes vote down vote up
def numpy_angle_error(gt, pred):
    """
    Numpy version of angle_error. Average angular error computed by cosine difference
    :param gt: list of ground truth label
    :param pred: list of predicted label
    :return: Average angular error
    """
    vec_gt = numpy_angles2vector(gt)
    vec_pred = numpy_angles2vector(pred)

    x = np.multiply(vec_gt[:, 0], vec_pred[:, 0])
    y = np.multiply(vec_gt[:, 1], vec_pred[:, 1])
    z = np.multiply(vec_gt[:, 2], vec_pred[:, 2])

    dif = np.sum([x, y, z], axis=0) / (np.linalg.norm(vec_gt, axis=1) * np.linalg.norm(vec_pred, axis=1))

    clipped_dif = np.clip(dif, np.float(-1.0), np.float(1.0))
    loss = (np.arccos(clipped_dif) * 180) / np.pi
    return np.mean(loss, axis=-1) 
Example 15
Project: SPECT-CT-Seg-ResUNet-Keras   Author: junyuchen245   File: custom_losses.py    MIT License 6 votes vote down vote up
def exp_dice_loss(exp=1.0):
    """
    :param exp: exponent. 1.0 for no exponential effect, i.e. log Dice.
    """

    def inner(y_true, y_pred):
        """Computes the average exponential log Dice coefficients as the loss function.
        :param y_true: one-hot tensor multiplied by label weights, (batch size, number of pixels, number of labels).
        :param y_pred: softmax probabilities, same shape as y_true. Each probability serves as a partial volume.
        :return: average exponential log Dice coefficient.
        """

        dice = dice_coef(y_true, y_pred)
        #dice = generalized_dice(y_true, y_pred, exp)
        dice = K.clip(dice, K.epsilon(), 1 - K.epsilon())  # As log is used
        dice = K.pow(-K.log(dice), exp)
        if K.ndim(dice) == 2:
            dice = K.mean(dice, axis=-1)
        return dice

    return inner 
Example 16
Project: Voiceprint-Recognition   Author: SunYanCN   File: kws.py    Apache License 2.0 6 votes vote down vote up
def proress(x_train):
    x_train = x_train[:, :, 1:]
    # expand on channel axis because we only have one channel
    x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], x_train.shape[2], 1))
    print('x_train shape:', x_train.shape, 'max', x_train.max(), 'min', x_train.min())

    # fake quantised
    # instead of using maximum value for quantised, we allows some saturation to save more details in small values.
    quantise_factor = pow(2, 4)
    print("quantised by", quantise_factor)
    x_train = (x_train / quantise_factor)
    # saturation to -1 to 1
    x_train = np.clip(x_train, -1, 1)
    # -1 to 1 quantised to 256 level (8bit)
    x_train = (x_train * 128).round() / 128
    print('quantised', 'x_train shape:', x_train.shape, 'max', x_train.max(), 'min', x_train.min())
    return x_train 
Example 17
Project: AutoNlp   Author: upwindflys   File: model.py    MIT License 6 votes vote down vote up
def categorical_focal_loss_fixed(y_true, y_pred):
    """
        :param y_true: A tensor of the same shape as `y_pred`
        :param y_pred: A tensor resulting from a softmax
        :return: Output tensor.
        """

    gamma = 2.
    alpha = .25
    # Scale predictions so that the class probas of each sample sum to 1
    y_pred /= K.sum(y_pred, axis=-1, keepdims=True)

    # Clip the prediction value to prevent NaN's and Inf's
    epsilon = K.epsilon()
    y_pred = K.clip(y_pred, epsilon, 1. - epsilon)

    # Calculate Cross Entropy
    cross_entropy = -y_true * K.log(y_pred)

    # Calculate Focal Loss
    loss = alpha * K.pow(1 - y_pred, gamma) * cross_entropy

    # Sum the losses in mini_batch
    return K.sum(loss, axis=1) 
Example 18
Project: icassp19   Author: edufonseca   File: losses.py    MIT License 6 votes vote down vote up
def crossentropy_reed_wrap(_beta):
    def crossentropy_reed_core(y_true, y_pred):
        """
        This loss function is proposed in:
        Reed et al. "Training Deep Neural Networks on Noisy Labels with Bootstrapping", 2014

        :param y_true:
        :param y_pred:
        :return:
        """

        # hyper param
        print(_beta)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # (1) dynamically update the targets based on the current state of the model: bootstrapped target tensor
        # use predicted class proba directly to generate regression targets
        y_true_update = _beta * y_true + (1 - _beta) * y_pred

        # (2) compute loss as always
        _loss = -K.sum(y_true_update * K.log(y_pred), axis=-1)

        return _loss
    return crossentropy_reed_core 
Example 19
Project: cdc   Author: ckbjimmy   File: EmbCRNN.py    MIT License 5 votes vote down vote up
def precision(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision 
Example 20
Project: cdc   Author: ckbjimmy   File: EmbCRNN.py    MIT License 5 votes vote down vote up
def recall(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall 
Example 21
Project: cdc   Author: ckbjimmy   File: EmbCRNN.py    MIT License 5 votes vote down vote up
def fbeta_score(y_true, y_pred, beta=1):
    if beta < 0:
        raise ValueError('The lowest choosable beta is zero (only precision).')
    if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
        return 0
    p = precision(y_true, y_pred)
    r = recall(y_true, y_pred)
    bb = beta ** 2
    fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
    return fbeta_score 
Example 22
Project: cdc   Author: ckbjimmy   File: EmbGRUattention.py    MIT License 5 votes vote down vote up
def precision(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision 
Example 23
Project: cdc   Author: ckbjimmy   File: EmbGRUattention.py    MIT License 5 votes vote down vote up
def recall(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall 
Example 24
Project: CalibrationNN   Author: Andres-Hernandez   File: neural_network.py    GNU General Public License v3.0 5 votes vote down vote up
def logarithmic_mean_squared_error(y_true, y_pred):
    return -K.mean(K.log(1.-K.clip(K.square(y_pred-y_true),0., 1.-K.epsilon())))

#_paper 
Example 25
Project: fancy-cnn   Author: textclf   File: embeddings.py    MIT License 5 votes vote down vote up
def __call__(self, p):
        if self.skip:
            return self.s * (p / K.clip(K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True)), 0.5, 100))
        return self.s * (p / K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True))) 
Example 26
Project: fancy-cnn   Author: textclf   File: embeddings.py    MIT License 5 votes vote down vote up
def __call__(self, p):
        if self.skip:
            return self.s * (p / K.clip(K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True)), 0.5, 100))
        return self.s * (p / K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True))) 
Example 27
Project: blackbox-attacks   Author: sunblaze-ucb   File: query_based_attack.py    MIT License 5 votes vote down vote up
def overall_grad_est(j, logits, prediction, x, curr_sample, curr_target, 
                        p_t, random_indices, num_groups, U=None):
    basis_vec = np.zeros((BATCH_SIZE, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))

    if PCA_FLAG == False:
        if j != num_groups-1:
            curr_indices = random_indices[j*args.group_size:(j+1)*args.group_size]
        elif j == num_groups-1:
            curr_indices = random_indices[j*args.group_size:]
        row = curr_indices/FLAGS.IMAGE_COLS
        col = curr_indices % FLAGS.IMAGE_COLS
        for i in range(len(curr_indices)):
            basis_vec[:, row[i], col[i]] = 1.

    elif PCA_FLAG == True:
        basis_vec[:] = U[:,j].reshape((1, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))
        # basis_vec = np.sign(basis_vec)

    x_plus_i = np.clip(curr_sample + args.delta * basis_vec, CLIP_MIN, CLIP_MAX)
    x_minus_i = np.clip(curr_sample - args.delta * basis_vec, CLIP_MIN, CLIP_MAX)

    if args.loss_type == 'cw':
        logit_t_grad_est, logit_max_grad_est = CW_est(logits, x, x_plus_i,
                                        x_minus_i, curr_sample, curr_target)
        if '_un' in args.method:
            single_grad_est = logit_t_grad_est - logit_max_grad_est
        else:
            single_grad_est = logit_max_grad_est - logit_t_grad_est
    elif args.loss_type == 'xent':
        single_grad_est = xent_est(prediction, x, x_plus_i, x_minus_i, curr_target)

    return single_grad_est 
Example 28
Project: cbc_networks   Author: saralajew   File: constraints.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __call__(self, w):
        w = K.clip(w, self.min_value, self.max_value)
        return w 
Example 29
Project: cbc_networks   Author: saralajew   File: constraints.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def clip(w):
    return Clip()(w) 
Example 30
Project: cbc_networks   Author: saralajew   File: reasoning_layers.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = InputSpec(shape=(None,) + tuple(input_shape[1:]))

        # define kernel_size as full-image if not provided
        if self.kernel_size is None:
            self.kernel_size = input_shape[1:3]

        kernel_shape = (2,) \
                       + self.kernel_size \
                       + (input_shape[-1], self.n_classes * self.n_replicas)

        # encoded trainable tensors
        self.reasoning_probabilities = self.add_weight(
            shape=kernel_shape,
            initializer=self.reasoning_initializer,
            regularizer=self.reasoning_regularizer,
            constraint=lambda x: K.clip(x, 0., 1.),
            name='reasoning_probabilities')

        if self.use_pixel_probabilities:
            self.pixel_probabilities = self.add_weight(
                shape=self.kernel_size + (1, self.n_classes * self.n_replicas),
                initializer=self.pixel_probabilities_initializer,
                regularizer=self.pixel_probabilities_regularizer,
                constraint=self.pixel_probabilities_constraint,
                name='pixel_probabilities')

        if self.use_component_probabilities:
            self.component_probabilities = self.add_weight(
                shape=(1, 1, input_shape[-1], 1),
                initializer=self.component_probabilities_initializer,
                regularizer=self.component_probabilities_regularizer,
                constraint=self.component_probabilities_constraint,
                name='component_probabilities')

        self.built = True 
Example 31
Project: 360_aware_saliency   Author: MikhailStartsev   File: gaussian_prior.py    GNU General Public License v3.0 5 votes vote down vote up
def call(self, x, mask=None):
        mu_x = self.W[:self.nb_gaussian]
        mu_y = self.W[self.nb_gaussian:self.nb_gaussian*2]
        sigma_x = self.W[self.nb_gaussian*2:self.nb_gaussian*3]
        sigma_y = self.W[self.nb_gaussian*3:]

        self.b_s = x.shape[0]
        self.height = x.shape[2]
        self.width = x.shape[3]

        e = self.height / self.width
        e1 = (1 - e) / 2
        e2 = e1 + e

        mu_x = K.clip(mu_x, 0.25, 0.75)
        mu_y = K.clip(mu_y, 0.35, 0.65)

        sigma_x = K.clip(sigma_x, 0.1, 0.9)
        sigma_y = K.clip(sigma_y, 0.2, 0.8)

        x_t = T.dot(T.ones((self.height, 1)), self._linspace(0, 1.0, self.width).dimshuffle('x', 0))
        y_t = T.dot(self._linspace(e1, e2, self.height).dimshuffle(0, 'x'), T.ones((1, self.width)))

        x_t = K.repeat_elements(K.expand_dims(x_t, dim=-1), self.nb_gaussian, axis=-1)
        y_t = K.repeat_elements(K.expand_dims(y_t, dim=-1), self.nb_gaussian, axis=-1)

        gaussian = 1 / (2 * np.pi * sigma_x * sigma_y + K.epsilon()) * \
                   T.exp(-((x_t - mu_x) ** 2 / (2 * sigma_x ** 2 + K.epsilon()) +
                           (y_t - mu_y) ** 2 / (2 * sigma_y ** 2 + K.epsilon())))

        gaussian = K.permute_dimensions(gaussian, (2, 0, 1))
        max_gauss = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(gaussian, axis=1), axis=1)), self.height, axis=-1)), self.width, axis=-1)
        gaussian = gaussian / max_gauss

        output = K.repeat_elements(K.expand_dims(gaussian, dim=0), self.b_s, axis=0)

        return output 
Example 32
Project: musaic   Author: al165   File: MetaEmbedding.py    GNU General Public License v3.0 5 votes vote down vote up
def __call__(self, x):
        clipped_x = K.clip(x, 10**-7, 1-10**-7)
        return (-K.sum(clipped_x*K.log(clipped_x)) / self.max) * self.factor 
Example 33
Project: ycml   Author: skylander86   File: neural_networks.py    Apache License 2.0 5 votes vote down vote up
def keras_f1_score(y_true, y_pred):
    '''Calculates the F score, the weighted harmonic mean of precision and recall.
    This is useful for multi-label classification, where input samples can be
    classified as sets of labels. By only using accuracy (precision) a model
    would achieve a perfect score by simply assigning every class to every
    input. In order to avoid this, a metric should penalize incorrect class
    assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
    computes this, as a weighted mean of the proportion of correct class
    assignments vs. the proportion of incorrect class assignments.
    With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
    correct classes becomes more important, and with beta > 1 the metric is
    instead weighted towards penalizing incorrect class assignments.
    '''
    # If there are no true positives, fix the F score at 0 like sklearn.

    if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
        return 0

    beta = 1.0

    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    p = true_positives / (predicted_positives + K.epsilon())

    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    r = true_positives / (possible_positives + K.epsilon())

    bb = beta ** 2
    fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())

    return fbeta_score
#end def 
Example 34
Project: CapsNet-Fashion-MNIST   Author: subarnop   File: capsulelayers.py    GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs, **kwargs):
        # use true label to select target capsule, shape=[batch_size, num_capsule]
        if type(inputs) is list:  # true label is provided with shape = [batch_size, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of vectors of capsules
            x = inputs
            # Enlarge the range of values in x to make max(new_x)=1 and others < 0
            x = (x - K.max(x, 1, True)) / K.epsilon() + 1
            mask = K.clip(x, 0, 1)  # the max value in x clipped to 1 and other to 0

        # masked inputs, shape = [batch_size, dim_vector]
        inputs_masked = K.batch_dot(inputs, mask, [1, 1])
        return inputs_masked 
Example 35
Project: DeepLearn   Author: GauravBh1010tt   File: layers.py    MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        h1 = x[0]
        h2 = x[1]    
        dif = K.sum(K.abs(h1-h2),axis=1)  
        h = K.exp(-dif)
        #print h.shape
        h=K.clip(h,1e-7,1.0-1e-7)
        h = K.reshape(h, (h.shape[0],1))
        return h 
Example 36
Project: DeepLearn   Author: GauravBh1010tt   File: layers.py    MIT License 5 votes vote down vote up
def mse(y_true, y_pred):
    #print y_true.shape.eval(),y_pred.shape.eval()
    y_true=K.clip(y_true,1e-7,1.0-1e-7)    
    return K.mean(K.square(y_pred - y_true), axis=-1) 
Example 37
Project: gccaps   Author: tqbl   File: capsnet.py    MIT License 5 votes vote down vote up
def _merge(inputs):
    """Merge the given pair of inputs across the temporal dimension.

    Args:
        inputs (list): Pair of inputs to merge. Each input should be a
            T x L Keras tensor (excluding batch dimension), where T is
            the temporal dimension and L is the number of classes.

    Returns:
        A Keras tensor (vector) of length L.
    """
    caps, att = inputs
    att = K.clip(att, K.epsilon(), 1.)
    return K.sum(caps * att, axis=1) / K.sum(att, axis=1) 
Example 38
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 5 votes vote down vote up
def FScore2(y_true, y_pred):
    '''
    The F score, beta=2
    '''
    B2 = K.variable(4)
    OnePlusB2 = K.variable(5)
    pred = K.round(y_pred)
    tp = K.sum(K.cast(K.less(K.abs(pred - K.clip(y_true, .5, 1.)), 0.01), 'float32'), -1)
    fp = K.sum(K.cast(K.greater(pred - y_true, 0.1), 'float32'), -1)
    fn = K.sum(K.cast(K.less(pred - y_true, -0.1), 'float32'), -1)

    f2 = OnePlusB2 * tp / (OnePlusB2 * tp + B2 * fn + fp)

    return K.mean(f2) 
Example 39
Project: spektral   Author: danielegrattarola   File: base.py    MIT License 5 votes vote down vote up
def call(self, inputs):
        F = K.int_shape(inputs)[-1]
        minkowski_prod_mat = np.eye(F)
        minkowski_prod_mat[-1, -1] = -1.
        minkowski_prod_mat = K.constant(minkowski_prod_mat)
        output = K.dot(inputs, minkowski_prod_mat)
        output = K.dot(output, K.transpose(inputs))
        output = K.clip(output, -10e9, -1.)

        if self.activation is not None:
            output = self.activation(output)

        return output 
Example 40
Project: deepJDOT   Author: bbdamodaran   File: dnn.py    MIT License 5 votes vote down vote up
def __call__(self, p):
        desired = K.clip(p, -self.m, self.m)
        return desired 
Example 41
Project: RLDonkeycar   Author: downingbots   File: RLPPO.py    MIT License 5 votes vote down vote up
def proximal_policy_optimization_loss(advantage, old_prediction):
    def loss(y_true, y_pred):
        prob = K.sum(y_true * y_pred)
        old_prob = K.sum(y_true * old_prediction)
        r = prob/(old_prob + 1e-10)

        return -K.mean(K.minimum(r * advantage, K.clip(r, min_value=1 - LOSS_CLIPPING, max_value=1 + LOSS_CLIPPING) * advantage)) + ENTROPY_LOSS * (prob * K.log(prob + 1e-10))
    return loss 
Example 42
Project: RLDonkeycar   Author: downingbots   File: RLPPO.py    MIT License 5 votes vote down vote up
def proximal_policy_optimization_loss_continuous(advantage, old_prediction):
    def loss(y_true, y_pred):
        var = K.square(NOISE)
        pi = 3.1415926
        denom = K.sqrt(2 * pi * var)
        prob_num = K.exp(- K.square(y_true - y_pred) / (2 * var))
        old_prob_num = K.exp(- K.square(y_true - old_prediction) / (2 * var))

        prob = prob_num/denom
        old_prob = old_prob_num/denom
        r = prob/(old_prob + 1e-10)

        return -K.mean(K.minimum(r * advantage, K.clip(r, min_value=1 - LOSS_CLIPPING, max_value=1 + LOSS_CLIPPING) * advantage))
    return loss 
Example 43
Project: brats2017   Author: QTIM-Lab   File: model.py    MIT License 5 votes vote down vote up
def image_categorical_crossentropy(y_true, y_pred):  # compute cross-entropy on 4D tensors
    y_pred = K.clip(y_pred,  1e-5, 1 -  1e-5)
    return -K.mean(y_true * K.log(y_pred) + (1 - y_true) * K.log(1 - y_pred)) 
Example 44
Project: perceptron-benchmark   Author: advboxes   File: keras.py    Apache License 2.0 5 votes vote down vote up
def _to_logits(self, predictions):
        from keras import backend as K
        eps = 10e-8
        predictions = K.clip(predictions, eps, 1 - eps)
        predictions = K.log(predictions)
        return predictions 
Example 45
Project: SpineFinder   Author: jfm15   File: keras_weighted_categorical_crossentropy.py    GNU General Public License v3.0 5 votes vote down vote up
def weighted_categorical_crossentropy(weights):
    """
    A weighted version of keras.objectives.categorical_crossentropy

    Variables:
        weights: numpy array of shape (C,) where C is the number of classes

    Usage:
        weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
        loss = weighted_categorical_crossentropy(weights)
        model.compile(loss=loss,optimizer='adam')
    """

    weights = K.variable(weights)

    def loss(y_true, y_pred):
        # y_true = K.print_tensor(y_true, message='y_true = ')
        # y_pred = K.print_tensor(y_pred, message='y_pred = ')
        # scale predictions so that the class probas of each sample sum to 1
        y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
        # clip to prevent NaN's and Inf's
        y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
        # calc
        loss = y_true * K.log(y_pred) * weights
        loss = -K.sum(loss, -1)
        return loss

    return loss 
Example 46
Project: Voiceprint-Recognition   Author: SunYanCN   File: run.py    Apache License 2.0 5 votes vote down vote up
def f1(y_true, y_pred):
    def recall(y_true, y_pred):
        """Recall metric.

        Only computes a batch-wise average of recall.

        Computes the recall, a metric for multi-label classification of
        how many relevant items are selected.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall

    def precision(y_true, y_pred):
        """Precision metric.

        Only computes a batch-wise average of precision.

        Computes the precision, a metric for multi-label classification of
        how many selected items are relevant.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision
    precision = precision(y_true, y_pred)
    recall = recall(y_true, y_pred)
    return 2*((precision*recall)/(precision+recall+K.epsilon())) 
Example 47
Project: Voiceprint-Recognition   Author: SunYanCN   File: kws.py    Apache License 2.0 5 votes vote down vote up
def f1(y_true, y_pred):
    def recall(y_true, y_pred):
        """Recall metric.

        Only computes a batch-wise average of recall.

        Computes the recall, a metric for multi-label classification of
        how many relevant items are selected.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall

    def precision(y_true, y_pred):
        """Precision metric.

        Only computes a batch-wise average of precision.

        Computes the precision, a metric for multi-label classification of
        how many selected items are relevant.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision
    precision = precision(y_true, y_pred)
    recall = recall(y_true, y_pred)
    return 2*((precision*recall)/(precision+recall+K.epsilon())) 
Example 48
Project: Voiceprint-Recognition   Author: SunYanCN   File: r_model.py    Apache License 2.0 5 votes vote down vote up
def f1(y_true, y_pred):
    def recall(y_true, y_pred):
        """Recall metric.

        Only computes a batch-wise average of recall.

        Computes the recall, a metric for multi-label classification of
        how many relevant items are selected.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall

    def precision(y_true, y_pred):
        """Precision metric.

        Only computes a batch-wise average of precision.

        Computes the precision, a metric for multi-label classification of
        how many selected items are relevant.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision
    precision = precision(y_true, y_pred)
    recall = recall(y_true, y_pred)
    return 2*((precision*recall)/(precision+recall+K.epsilon())) 
Example 49
Project: Voiceprint-Recognition   Author: SunYanCN   File: nni_speaker.py    Apache License 2.0 5 votes vote down vote up
def f1(y_true, y_pred):
    def recall(y_true, y_pred):
        """Recall metric.

        Only computes a batch-wise average of recall.

        Computes the recall, a metric for multi-label classification of
        how many relevant items are selected.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall

    def precision(y_true, y_pred):
        """Precision metric.

        Only computes a batch-wise average of precision.

        Computes the precision, a metric for multi-label classification of
        how many selected items are relevant.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision
    precision = precision(y_true, y_pred)
    recall = recall(y_true, y_pred)
    return 2*((precision*recall)/(precision+recall+K.epsilon())) 
Example 50
Project: DeepCAGE   Author: kimmo1019   File: 4.Classification.py    MIT License 5 votes vote down vote up
def precision(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision 
Example 51
Project: DeepCAGE   Author: kimmo1019   File: 4.Classification.py    MIT License 5 votes vote down vote up
def recall(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall 
Example 52
Project: DeepCAGE   Author: kimmo1019   File: 5.Regression.py    MIT License 5 votes vote down vote up
def precision(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision 
Example 53
Project: DeepCAGE   Author: kimmo1019   File: 5.Regression.py    MIT License 5 votes vote down vote up
def recall(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall 
Example 54
Project: CapsNet-Collections   Author: Jiankai-Sun   File: capsulelayers.py    GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs, **kwargs):
        # use true label to select target capsule, shape=[batch_size, num_capsule]
        if type(inputs) is list:  # true label is provided with shape = [batch_size, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of vectors of capsules
            x = inputs
            # Enlarge the range of values in x to make max(new_x)=1 and others < 0
            x = (x - K.max(x, 1, True)) / K.epsilon() + 1
            mask = K.clip(x, 0, 1)  # the max value in x clipped to 1 and other to 0

        # masked inputs, shape = [batch_size, dim_vector]
        inputs_masked = K.batch_dot(inputs, mask, [1, 1])
        return inputs_masked 
Example 55
Project: CFP_NeuralNetwork   Author: BreezeWhite   File: Statistics.py    GNU General Public License v3.0 5 votes vote down vote up
def Precision(label,pred):
    true_positives = K.sum(K.round(K.clip(label * pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(pred, 0, 1)))
    precision = true_positives / predicted_positives
    return precision 
Example 56
Project: CFP_NeuralNetwork   Author: BreezeWhite   File: Statistics.py    GNU General Public License v3.0 5 votes vote down vote up
def Recall(label,pred):
    true_positives = K.sum(K.round(K.clip(label * pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(label, 0, 1)))
    recall = true_positives / possible_positives
    return recall 
Example 57
Project: CFP_NeuralNetwork   Author: BreezeWhite   File: Statistics.py    GNU General Public License v3.0 5 votes vote down vote up
def Fscore(label,pred):
    true_positives = K.sum(K.round(K.clip(label * pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(pred, 0, 1)))
    precision = true_positives / predicted_positives
    
    possible_positives = K.sum(K.round(K.clip(label, 0, 1)))
    recall = true_positives / possible_positives
    
    fscore = 2*precision*recall / (precision+recall)
    
    return fscore 
Example 58
Project: icassp19   Author: edufonseca   File: losses.py    MIT License 5 votes vote down vote up
def crossentropy_max_wrap(_m):
    def crossentropy_max_core(y_true, y_pred):
        """
        This function is based on the one proposed in
        Il-Young Jeong and Hyungui Lim, "AUDIO TAGGING SYSTEM FOR DCASE 2018: FOCUSING ON LABEL NOISE,
         DATA AUGMENTATION AND ITS EFFICIENT LEARNING", Tech Report, DCASE 2018
        https://github.com/finejuly/dcase2018_task2_cochlearai

        :param y_true:
        :param y_pred:
        :return:
        """

        # hyper param
        print(_m)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # compute loss for every data point
        _loss = -K.sum(y_true * K.log(y_pred), axis=-1)

        # threshold
        t_m = K.max(_loss) * _m
        _mask_m = 1 - (K.cast(K.greater(_loss, t_m), 'float32'))
        _loss = _loss * _mask_m

        return _loss
    return crossentropy_max_core 
Example 59
Project: icassp19   Author: edufonseca   File: losses.py    MIT License 5 votes vote down vote up
def crossentropy_outlier_wrap(_l):
    def crossentropy_outlier_core(y_true, y_pred):

        # hyper param
        print(_l)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # compute loss for every data point
        _loss = -K.sum(y_true * K.log(y_pred), axis=-1)

        def _get_real_median(_v):
            """
            given a tensor with shape (batch_size,), compute and return the median

            :param v:
            :return:
            """
            _val = tf.nn.top_k(_v, 33).values
            return 0.5 * (_val[-1] + _val[-2])

        _mean_loss, _var_loss = tf.nn.moments(_loss, axes=[0])
        _median_loss = _get_real_median(_loss)
        _std_loss = tf.sqrt(_var_loss)

        # threshold
        t_l = _median_loss + _l*_std_loss
        _mask_l = 1 - (K.cast(K.greater(_loss, t_l), 'float32'))
        _loss = _loss * _mask_l

        return _loss
    return crossentropy_outlier_core



#########################################################################
# from here on we distinguish data points in the batch, based on its origin
# we only apply robustness measures to the data points coming from the noisy subset
# Therefore, the next functions are used only when training with the entire train set
######################################################################### 
Example 60
Project: ColiCoords   Author: Jhsmit   File: losses.py    MIT License 5 votes vote down vote up
def weighted_bce_loss(y_true, y_pred, weight):
    # avoiding overflow
    epsilon = 1e-7
    y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
    logit_y_pred = K.log(y_pred / (1. - y_pred))

    # https://www.tensorflow.org/api_docs/python/tf/nn/weighted_cross_entropy_with_logits
    loss = (1. - y_true) * logit_y_pred + (1. + (weight - 1.) * y_true) * \
                                          (K.log(1. + K.exp(-K.abs(logit_y_pred))) + K.maximum(-logit_y_pred, 0.))
    return K.sum(loss) / K.sum(weight) 
Example 61
Project: pyDNAbinding   Author: nboley   File: keraslib.py    GNU General Public License v2.0 5 votes vote down vote up
def get_output(self, train=False):
        print "LogAnyBoundOcc", self.output_shape
        X = self.get_input(train)
        log_none_bnd = K.sum(
            K.log(1-K.clip(K.exp(X), 1e-6, 1-1e-6)), axis=3, keepdims=True)
        at_least_1_bnd = 1-K.exp(log_none_bnd)
        max_occ = K.max(K.exp(X), axis=3, keepdims=True)
        # we take the weighted sum because the max is easier to fit, and 
        # thus this helps to regularize the optimization procedure
        rv = K.log(0.05*max_occ + 0.95*at_least_1_bnd)
        return rv 
Example 62
Project: tfbs-prediction   Author: aniketk21   File: ffnn_train.py    MIT License 5 votes vote down vote up
def precision(y_true, y_pred):		
    """
        Precision metric.		
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))		
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))		
    precision = true_positives / (predicted_positives + K.epsilon())		
    return precision 
Example 63
Project: tfbs-prediction   Author: aniketk21   File: ffnn_train.py    MIT License 5 votes vote down vote up
def recall(y_true, y_pred):		
    """
        Recall metric.
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall 
Example 64
Project: tfbs-prediction   Author: aniketk21   File: ffnn_test.py    MIT License 5 votes vote down vote up
def precision(y_true, y_pred):		
    """
        Precision metric.		
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))		
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))		
    precision = true_positives / (predicted_positives + K.epsilon())		
    return precision 
Example 65
Project: tfbs-prediction   Author: aniketk21   File: ffnn_test.py    MIT License 5 votes vote down vote up
def recall(y_true, y_pred):		
    """
        Recall metric.
    """
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall 
Example 66
Project: Keras-FCN-template   Author: MchZys   File: losses.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def weighted_bce_loss(y_true, y_pred, weight):
    epsilon = 1e-7
    y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
    logit_y_pred = K.log(y_pred / (1. - y_pred))
    loss = weight * (logit_y_pred * (1. - y_true) +
                     K.log(1. + K.exp(-K.abs(logit_y_pred))) + K.maximum(-logit_y_pred, 0.))
    return K.sum(loss) / K.sum(weight) 
Example 67
Project: foolbox   Author: bethgelab   File: keras.py    MIT License 5 votes vote down vote up
def _to_logits(self, predictions):  # pragma: no cover
        from keras import backend as K

        eps = 10e-8
        predictions = K.clip(predictions, eps, 1 - eps)
        predictions = K.log(predictions)
        return predictions 
Example 68
Project: onnx-keras   Author: leodestiny   File: backend.py    MIT License 5 votes vote down vote up
def handle_clip(cls, node, input_dict):
        assert "max" in node.attrs.keys()
        assert "min" in node.attrs.keys()

        max_val = node.attrs["max"]
        min_val = node.attrs["min"]

        return [Lambda(lambda x: K.clip(x, min_val, max_val))(input_dict[node.inputs[0]])] 
Example 69
Project: onnx-keras   Author: leodestiny   File: backend.py    MIT License 5 votes vote down vote up
def handle_hard_sigmoid(cls, node, input_dict):
        alpha = node.attrs.get('alpha', 0.2)
        beta = node.attrs.get('beta', 0.5)
        return [Lambda(lambda x: K.clip(alpha * x + beta, 0, 1))(input_dict[node.inputs[0]])] 
Example 70
Project: BOLSTM   Author: lasigeBioTM   File: models.py    Apache License 2.0 5 votes vote down vote up
def precision(y_true, y_pred):
    """Precision metric.

    Only computes a batch-wise average of precision.

    Computes the precision, a metric for multi-label classification of
    how many selected items are relevant.
    """
    # print(y_true, y_pred)
    true_positives = K.sum(K.round(K.clip(y_true[...,1:] * y_pred[...,1:], 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred[...,1:], 0, 1)))
    p = true_positives / (predicted_positives + K.epsilon())
    #p = precision_score(y_true[...,1:], y_pred[...,1:], average='macro')
    return p 
Example 71
Project: BOLSTM   Author: lasigeBioTM   File: models.py    Apache License 2.0 5 votes vote down vote up
def recall(y_true, y_pred):
    """Recall metric.

    Only computes a batch-wise average of recall.

    Computes the recall, a metric for multi-label classification of
    how many relevant items are selected.
    """
    true_positives = K.sum(K.round(K.clip(y_true[...,1:] * y_pred[...,1:], 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true[...,1:], 0, 1)))
    r = true_positives / (possible_positives + K.epsilon())
    #r = recall_score(y_true[...,1:], y_pred[...,1:], average='macro')
    return r 
Example 72
Project: BOLSTM   Author: lasigeBioTM   File: models.py    Apache License 2.0 5 votes vote down vote up
def f1(y_true, y_pred):
    def precision(y_true, y_pred):
        """Precision metric.

        Only computes a batch-wise average of precision.

        Computes the precision, a metric for multi-label classification of
        how many selected items are relevant.
        """
        # print(y_true, y_pred)
        true_positives = K.sum(K.round(K.clip(y_true[..., 1:] * y_pred[..., 1:], 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred[..., 1:], 0, 1)))
        p = true_positives / (predicted_positives + K.epsilon())
        # p = precision_score(y_true[...,1:], y_pred[...,1:], average='macro')
        return p

    def recall(y_true, y_pred):
        """Recall metric.

        Only computes a batch-wise average of recall.

        Computes the recall, a metric for multi-label classification of
        how many relevant items are selected.
        """
        true_positives = K.sum(K.round(K.clip(y_true[..., 1:] * y_pred[..., 1:], 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true[..., 1:], 0, 1)))
        r = true_positives / (possible_positives + K.epsilon())
        # r = recall_score(y_true[...,1:], y_pred[...,1:], average='macro')
        return r
    precision_v = precision(y_true, y_pred)
    recall_v = recall(y_true, y_pred)
    return (2.0*precision_v*recall_v)/(precision_v+recall_v + K.epsilon())
    #return f1_score(y_true[...,1:], y_pred[...,1:], average='macro') 
Example 73
Project: Road_Segmentation_ML   Author: TaoSunVoyage   File: metrics.py    MIT License 5 votes vote down vote up
def f1(y_true, y_pred):
    def recall(y_true, y_pred):
        """Recall metric.

        Only computes a batch-wise average of recall.

        Computes the recall, a metric for multi-label classification of
        how many relevant items are selected.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall

    def precision(y_true, y_pred):
        """Precision metric.

        Only computes a batch-wise average of precision.

        Computes the precision, a metric for multi-label classification of
        how many selected items are relevant.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision
    precision = precision(y_true, y_pred)
    recall = recall(y_true, y_pred)
    return 2*((precision*recall)/(precision+recall+K.epsilon()))

# credits: https://github.com/lyakaap/Kaggle-Carvana-3rd-Place-Solution/blob/master/losses.py 
Example 74
Project: blackbox-attacks   Author: sunblaze-ucb   File: query_based_attack.py    MIT License 4 votes vote down vote up
def spsa(prediction, logits, x, curr_sample, curr_target, p_t, dim):
    grad_est = np.zeros((BATCH_SIZE, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS,
                         FLAGS.NUM_CHANNELS))
    logits_np = K.get_session().run([logits], feed_dict={x: curr_sample})[0]
    perturb_vec = np.random.normal(size=dim*BATCH_SIZE).reshape((BATCH_SIZE, dim))
    for i in range(BATCH_SIZE):
        perturb_vec[i,:] = perturb_vec[i,:]/np.linalg.norm(perturb_vec[i,:])
    # perturb_vec = perturb_vec/np.linalg.norm(perturb_vec)
    perturb_vec = perturb_vec.reshape((BATCH_SIZE, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))

    x_plus_i = np.clip(curr_sample + args.delta * perturb_vec, CLIP_MIN, CLIP_MAX)
    x_minus_i = np.clip(curr_sample - args.delta * perturb_vec, CLIP_MIN, CLIP_MAX)

    if args.loss_type == 'cw':
        logit_t_grad_est, logit_max_grad_est = CW_est(logits, x, x_plus_i,
                                        x_minus_i, curr_sample, curr_target)
        if '_un' in args.method:
            single_grad_est = logit_t_grad_est - logit_max_grad_est
        else:
            single_grad_est = logit_max_grad_est - logit_t_grad_est
    elif args.loss_type == 'xent':
        single_grad_est = xent_est(prediction, x, x_plus_i, x_minus_i, curr_target)

    for i in range(BATCH_SIZE):
        grad_est[i] = single_grad_est[i]/perturb_vec[i]
    # Getting gradient of the loss
    if args.loss_type == 'xent':
        loss_grad = -1.0 * grad_est/p_t[:, None, None, None]
    elif args.loss_type == 'cw':
        logits_np_t = logits_np[np.arange(BATCH_SIZE), list(curr_target)].reshape(BATCH_SIZE)
        logits_np[np.arange(BATCH_SIZE), list(curr_target)] = -1e4
        max_indices = np.argmax(logits_np, 1)
        logits_np_max = logits_np[np.arange(BATCH_SIZE), list(max_indices)].reshape(BATCH_SIZE)
        logit_diff = logits_np_t - logits_np_max
        if '_un' in args.method:
            zero_indices = np.where(logit_diff + args.conf < 0.0)
        else:
            zero_indices = np.where(-logit_diff + args.conf < 0.0)
        grad_est[zero_indices[0]] = np.zeros((len(zero_indices), FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))
        loss_grad = grad_est

    return loss_grad 
Example 75
Project: DeepLearn   Author: GauravBh1010tt   File: p3_cnn.py    MIT License 4 votes vote down vote up
def trainCNN(obj, dataset_headLines, dataset_body):
    embedding_dim = 300
    LSTM_neurons = 50
    dense_neuron = 16
    dimx = 100
    dimy = 200
    lamda = 0.0
    nb_filter = 100
    filter_length = 4
    vocab_size = 10000
    batch_size = 50
    epochs = 5
    ntn_out = 16
    ntn_in = nb_filter 
    state = False
    
    
    train_head,train_body,embedding_matrix = obj.process_data(sent_Q=dataset_headLines,
                                                     sent_A=dataset_body,dimx=dimx,dimy=dimy,
                                                     wordVec_model = wordVec_model)    
    inpx = Input(shape=(dimx,),dtype='int32',name='inpx')
    #x = Embedding(output_dim=embedding_dim, input_dim=vocab_size, input_length=dimx)(inpx)
    x = word2vec_embedding_layer(embedding_matrix)(inpx)  
    inpy = Input(shape=(dimy,),dtype='int32',name='inpy')
    #y = Embedding(output_dim=embedding_dim, input_dim=vocab_size, input_length=dimy)(inpy)
    y = word2vec_embedding_layer(embedding_matrix)(inpy)
    ques = Convolution1D(nb_filter=nb_filter, filter_length=filter_length,
                         border_mode='valid', activation='relu',
                         subsample_length=1)(x)
                            
    ans = Convolution1D(nb_filter=nb_filter, filter_length=filter_length,
                        border_mode='valid', activation='relu',
                        subsample_length=1)(y)
            
    #hx = Lambda(max_1d, output_shape=(nb_filter,))(ques)
    #hy = Lambda(max_1d, output_shape=(nb_filter,))(ans)
    hx = GlobalMaxPooling1D()(ques)
    hy = GlobalMaxPooling1D()(ans)
    #wordVec_model = []
    #h =  Merge(mode="concat",name='h')([hx,hy])
    
    h1 = Multiply()([hx,hy])
    h2 = Abs()([hx,hy])

    h =  Merge(mode="concat",name='h')([h1,h2])
    #h = NeuralTensorLayer(output_dim=1,input_dim=ntn_in)([hx,hy])
    #h = ntn_layer(ntn_in,ntn_out,activation=None)([hx,hy])
    #score = h
    wrap = Dense(dense_neuron, activation='relu',name='wrap')(h)
    #score = Dense(1,activation='sigmoid',name='score')(h)
    #wrap = Dense(dense_neuron,activation='relu',name='wrap')(h)
    score = Dense(4,activation='softmax',name='score')(wrap)
    
    #score=K.clip(score,1e-7,1.0-1e-7)
    #corr = CorrelationRegularization(-lamda)([hx,hy])
    #model = Model( [inpx,inpy],[score,corr])
    model = Model( [inpx,inpy],score)
    model.compile( loss='categorical_crossentropy',optimizer="adadelta",metrics=['accuracy'])    
    return model,train_head,train_body 
Example 76
Project: RLDonkeycar   Author: downingbots   File: RLKeras.py    MIT License 4 votes vote down vote up
def proximal_policy_optimization_loss_continuous(advantage, old_prediction):
      def loss(y_true, y_pred):

        import tensorflow as tf
        # return K.mean(K.square(y_pred - y_true), axis=-1)

        # adv = K.squeeze(advantage, axis = 1)
        # pred = K.squeeze(old_prediction, axis = 1)
        # adv_sum = K.sum(adv, axis=-1)
        # adv_mean = K.mean(adv, axis=-1)
        # pred_sum = K.sum(pred, axis=-1)
        # pred_mean = K.mean(pred, axis=-1)
        # if (pred_mean == PPO_OUT_OF_RANGE):
        # if (K.sum(adv_sum, -pred_sum) == K.sum(adv_mean, -pred_mean) and adv_sum != adv_mean):
        # if (K.equal(adv_sum, pred_sum) and K.equal(adv_mean, pred_mean) and K.not_equal(adv_sum, adv_mean)):
        # out_of_range = tf.constant(PPO_OUT_OF_RANGE, tf.shape(advantage))
        # out_of_range = K.constant(PPO_OUT_OF_RANGE, dtype=old_prediction.dtype, shape=old_prediction.shape)
        # pred_out_of_range = K.equal(old_prediction, out_of_range)
        # pred_out_of_range = K.equal(old_prediction, PPO_OUT_OF_RANGE)

        mean_sq_err = K.mean(K.square(y_pred - y_true), axis=-1)

        try:
          PPO_OUT_OF_RANGE = 1000    # negative of -1000
          checkifzero = K.sum(old_prediction, PPO_OUT_OF_RANGE)
          divbyzero = old_prediction / checkifzero
        except:
          return mean_sq_err
          
          
        # pred_out_of_range = K.mean((old_prediction / PPO_OUT_OF_RANGE), axis=-1)
        # pred_out_of_range = K.mean(K.equal(old_prediction , PPO_OUT_OF_RANGE), axis=-1)
        pred_out_of_range = K.mean(old_prediction, axis=-1)

        PPO_NOISE = 1.0
        var = keras.backend.square(PPO_NOISE)
        denom = K.sqrt(2 * np.pi * var)
        prob_num = K.exp(- K.square(y_true - y_pred) / (2 * var))
        old_prob_num = K.exp(- K.square(y_true - old_prediction) / (2 * var))

        prob = prob_num/denom
        old_prob = old_prob_num/denom
        r = prob/(old_prob + 1e-10)

        PPO_LOSS_CLIPPING = 0.2
        PPO_ENTROPY_LOSS = 5 * 1e-3 # Does not converge without entropy penalty
        # return -K.mean(K.minimum(r * advantage, K.clip(r, min_value=1 - PPO_LOSS_CLIPPING, max_value=1 + PPO_LOSS_CLIPPING) * advantage))
        # ppo_loss = -K.mean(K.minimum(r * advantage, K.clip(r, min_value=1 - PPO_LOSS_CLIPPING, max_value=1 + PPO_LOSS_CLIPPING) * advantage)) + PPO_ENTROPY_LOSS * (prob * K.log(prob + 1e-10))
        return -K.mean(K.minimum(r * advantage, K.clip(r, min_value=1 - PPO_LOSS_CLIPPING, max_value=1 + PPO_LOSS_CLIPPING) * advantage)) + PPO_ENTROPY_LOSS * (prob * K.log(prob + 1e-10))

        # out = tf.where(tf.equal(pred_out_of_range, PPO_OUT_OF_RANGE), mean_sq_err,  ppo_loss)
        # out = K.switch(K.equal(-1000, PPO_OUT_OF_RANGE), mean_sq_err,  ppo_loss)
        # out = K.switch(K.equal(pred_out_of_range, PPO_OUT_OF_RANGE), mean_sq_err,  ppo_loss)
        # out = K.switch( pred_out_of_range, K.zeros_like(pred_out_of_range),  K.zeros_like(pred_out_of_range))
        # out = K.switch( K.mean(old_prediction/PPO_OUT_OF_RANGE), mean_sq_err,  ppo_loss)
        # return out
      return loss 
Example 77
Project: icassp19   Author: edufonseca   File: losses.py    MIT License 4 votes vote down vote up
def crossentropy_reed_origin_wrap(_beta):
    def crossentropy_reed_origin_core(y_true, y_pred):
        # hyper param
        print(_beta)

        # 1) determine the origin of the patch, as a boolean vector in y_true_flag
        # (True = patch from noisy subset)
        _y_true_flag = K.greater(K.sum(y_true, axis=-1), 90)

        # 2) convert the input y_true (with flags inside) into a valid y_true one-hot-vector format
        # attenuating factor for data points that need it (those that came with a one-hot of 100)
        _mask_reduce = K.cast(_y_true_flag, 'float32') * 0.01

        # identity factor for standard one-hot vectors
        _mask_keep = K.cast(K.equal(_y_true_flag, False), 'float32')

        # combine 2 masks
        _mask = _mask_reduce + _mask_keep

        _y_true_shape = K.shape(y_true)
        _mask = K.reshape(_mask, (_y_true_shape[0], 1))

        # applying mask to have a valid y_true that we can use as always
        y_true = y_true * _mask

        y_true = K.clip(y_true, K.epsilon(), 1)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # (1) dynamically update the targets based on the current state of the model: bootstrapped target tensor
        # use predicted class proba directly to generate regression targets
        y_true_bootstrapped = _beta * y_true + (1 - _beta) * y_pred

        # at this point we have 2 versions of y_true
        # decide which target label to use for each datapoint
        _mask_noisy = K.cast(_y_true_flag, 'float32')                   # only allows patches from noisy set
        _mask_clean = K.cast(K.equal(_y_true_flag, False), 'float32')   # only allows patches from clean set
        _mask_noisy = K.reshape(_mask_noisy, (_y_true_shape[0], 1))
        _mask_clean = K.reshape(_mask_clean, (_y_true_shape[0], 1))

        # points coming from clean set use the standard true one-hot vector. dim is (batch_size, 1)
        # points coming from noisy set use the Reed bootstrapped target tensor
        y_true_final = y_true * _mask_clean + y_true_bootstrapped * _mask_noisy

        # (2) compute loss as always
        _loss = -K.sum(y_true_final * K.log(y_pred), axis=-1)

        return _loss
    return crossentropy_reed_origin_core 
Example 78
Project: icassp19   Author: edufonseca   File: losses.py    MIT License 4 votes vote down vote up
def lq_loss_origin_wrap(_q):
    def lq_loss_origin_core(y_true, y_pred):

        # hyper param
        print(_q)

        # 1) determine the origin of the patch, as a boolean vector in y_true_flag
        # (True = patch from noisy subset)
        _y_true_flag = K.greater(K.sum(y_true, axis=-1), 90)

        # 2) convert the input y_true (with flags inside) into a valid y_true one-hot-vector format
        # attenuating factor for data points that need it (those that came with a one-hot of 100)
        _mask_reduce = K.cast(_y_true_flag, 'float32') * 0.01

        # identity factor for standard one-hot vectors
        _mask_keep = K.cast(K.equal(_y_true_flag, False), 'float32')

        # combine 2 masks
        _mask = _mask_reduce + _mask_keep

        _y_true_shape = K.shape(y_true)
        _mask = K.reshape(_mask, (_y_true_shape[0], 1))

        # applying mask to have a valid y_true that we can use as always
        y_true = y_true * _mask

        y_true = K.clip(y_true, K.epsilon(), 1)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # compute two types of losses, for all the data points
        # (1) compute CCE loss for every data point
        _loss_CCE = -K.sum(y_true * K.log(y_pred), axis=-1)

        # (2) compute lq_loss for every data point
        _tmp = y_pred * y_true
        _loss_tmp = K.max(_tmp, axis=-1)
        # compute the Lq loss between the one-hot encoded label and the predictions
        _loss_q = (1 - (_loss_tmp + 10 ** (-8)) ** _q) / _q

        # decide which loss to take for each datapoint
        _mask_noisy = K.cast(_y_true_flag, 'float32')                   # only allows patches from noisy set
        _mask_clean = K.cast(K.equal(_y_true_flag, False), 'float32')   # only allows patches from clean set

        # points coming from clean set contribute with CCE loss
        # points coming from noisy set contribute with lq_loss
        _loss_final = _loss_CCE * _mask_clean + _loss_q * _mask_noisy

        return _loss_final
    return lq_loss_origin_core 
Example 79
Project: icassp19   Author: edufonseca   File: losses.py    MIT License 4 votes vote down vote up
def crossentropy_outlier_origin_wrap(_l):
    def crossentropy_outlier_origin_core(y_true, y_pred):

        # hyper param
        print(_l)

        # 1) determine the origin of the patch, as a boolean vector y_true_flag
        # (True = patch from noisy subset)
        _y_true_flag = K.greater(K.sum(y_true, axis=-1), 90)

        # 2) convert the input y_true (with flags inside) into a valid y_true one-hot-vector format
        # attenuating factor for data points that need it (those that came with a one-hot of 100)
        _mask_reduce = K.cast(_y_true_flag, 'float32') * 0.01

        # identity factor for standard one-hot vectors
        _mask_keep = K.cast(K.equal(_y_true_flag, False), 'float32')

        # combine 2 masks
        _mask = _mask_reduce + _mask_keep

        _y_true_shape = K.shape(y_true)
        _mask = K.reshape(_mask, (_y_true_shape[0], 1))

        # applying mask to have a valid y_true that we can use as always
        y_true = y_true * _mask

        y_true = K.clip(y_true, K.epsilon(), 1)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # compute loss for every data point
        _loss = -K.sum(y_true * K.log(y_pred), axis=-1)

        def _get_real_median(_v):
            """
            given a tensor with shape (batch_size,), compute and return the median

            :param v:
            :return:
            """
            _val = tf.nn.top_k(_v, 33).values
            return 0.5 * (_val[-1] + _val[-2])

        _mean_loss, _var_loss = tf.nn.moments(_loss, axes=[0])
        _median_loss = _get_real_median(_loss)
        _std_loss = tf.sqrt(_var_loss)

        # threshold
        t_l = _median_loss + _l*_std_loss

        _mask_l = 1 - (K.cast(K.greater(_loss, t_l), 'float32') * K.cast(_y_true_flag, 'float32'))
        _loss = _loss * _mask_l

        return _loss
    return crossentropy_outlier_origin_core 
Example 80
Project: FormicID   Author: naturalis   File: optimizer.py    MIT License 4 votes vote down vote up
def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)

        self.updates = [K.update_add(self.iterations, 1)]
        t = self.iterations + 1

        shapes = [K.get_variable_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]

        loss_prev = K.variable(0)
        self.updates.append(K.update(loss_prev, loss))

        # Calculate the numerator of the Eve coefficient
        d_num_t = K.abs(loss_prev - loss)
        self.updates.append(K.update(self.d_num, d_num_t))

        # Calculate the denominator of the Eve coefficient
        d_den_t = K.abs(K.minimum(loss_prev, loss) - self.loss_min)
        self.updates.append(K.update(self.d_den, d_den_t))

        # Calculate the Eve coefficient. At the first iteration, it is 1.
        d_tilde_t = K.clip(
            (d_num_t + self.fmin_pos) / (d_den_t + self.fmin_pos),
            1. / self.c,
            self.c,
        )
        d_t = (self.beta_3 * self.d) + (1. - self.beta_3) * d_tilde_t
        d_t = K.switch(K.greater(t, 1), d_t, K.constant(1))
        self.updates.append(K.update(self.d, d_t))

        # Calculate the effective learning rate as lr / (d * decay)
        lr_eff_t = self.lr / (d_t * (1. + (self.iterations * self.decay)))
        self.updates.append(K.update(self.lr_eff, lr_eff_t))

        # Apply bias correction to the learning rate
        lr_hat_t = (
            lr_eff_t
            * K.sqrt(1. - K.pow(self.beta_2, t))
            / (1. - K.pow(self.beta_1, t))
        )

        # Update per parameter
        for p, g, m, v in zip(params, grads, ms, vs):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            self.updates.append(K.update(m, m_t))

            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            self.updates.append(K.update(v, v_t))

            p_t = p - lr_hat_t * m_t / (K.sqrt(v_t) + self.epsilon)
            new_p = p_t
            # Apply constraints
            if p in constraints:
                c = constraints[p]
                new_p = c(new_p)
            self.updates.append(K.update(p, new_p))
        return self.updates