Python tensorflow.keras.backend.mean() Examples

The following are 30 code examples of tensorflow.keras.backend.mean(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.backend , or try the search function .
Example #1
Source File: vae-cnn-mnist-8.1.2.py    From Advanced-Deep-Learning-with-Keras with MIT License 7 votes vote down vote up
def sampling(args):
    """Reparameterization trick by sampling 
        fr an isotropic unit Gaussian.

    # Arguments:
        args (tensor): mean and log of variance of Q(z|X)

    # Returns:
        z (tensor): sampled latent vector
    """

    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example #2
Source File: metrics.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def mean_dice(self, y_true, y_pred):
        """ weighted mean dice across all patches and labels """

        # compute dice, which will now be [batch_size, nb_labels]
        dice_metric = self.dice(y_true, y_pred)

        # weigh the entries in the dice matrix:
        if self.weights is not None:
            dice_metric *= self.weights
        if self.vox_weights is not None:
            dice_metric *= self.vox_weights

        # return one minus mean dice as loss
        mean_dice_metric = K.mean(dice_metric)
        tf.verify_tensor_all_finite(mean_dice_metric, 'metric not finite')
        return mean_dice_metric 
Example #3
Source File: losses.py    From TF.Keras-Commonly-used-models with Apache License 2.0 6 votes vote down vote up
def cce_jaccard_loss(gt, pr, cce_weight=1., class_weights=1., smooth=SMOOTH, per_image=True):
    r"""Sum of categorical crossentropy and jaccard losses:
    
    .. math:: L(A, B) = cce_weight * categorical_crossentropy(A, B) + jaccard_loss(A, B)
    
    Args:
        gt: ground truth 4D keras tensor (B, H, W, C)
        pr: prediction 4D keras tensor (B, H, W, C)
        class_weights: 1. or list of class weights for jaccard loss, len(weights) = C
        smooth: value to avoid division by zero
        per_image: if ``True``, jaccard loss is calculated as mean over images in batch (B),
            else over whole batch

    Returns:
        loss
    
    """
    cce = categorical_crossentropy(gt, pr) * class_weights
    cce = K.mean(cce)
    return cce_weight * cce + jaccard_loss(gt, pr, smooth=smooth, class_weights=class_weights, per_image=per_image)


# Update custom objects 
Example #4
Source File: losses.py    From TF.Keras-Commonly-used-models with Apache License 2.0 6 votes vote down vote up
def dice_loss(gt, pr, class_weights=1., smooth=SMOOTH, per_image=True, beta=1.):
    r"""Dice loss function for imbalanced datasets:

    .. math:: L(precision, recall) = 1 - (1 + \beta^2) \frac{precision \cdot recall}
        {\beta^2 \cdot precision + recall}

    Args:
        gt: ground truth 4D keras tensor (B, H, W, C)
        pr: prediction 4D keras tensor (B, H, W, C)
        class_weights: 1. or list of class weights, len(weights) = C
        smooth: value to avoid division by zero
        per_image: if ``True``, metric is calculated as mean over images in batch (B),
            else over whole batch
        beta: coefficient for precision recall balance

    Returns:
        Dice loss in range [0, 1]

    """
    return 1 - f_score(gt, pr, class_weights=class_weights, smooth=smooth, per_image=per_image, beta=beta) 
Example #5
Source File: arma_conv.py    From spektral with MIT License 6 votes vote down vote up
def call(self, inputs):
        features = inputs[0]
        fltr = inputs[1]

        # Convolution
        output = []  # Stores the parallel filters
        for k in range(self.order):
            output_k = features
            for i in range(self.iterations):
                output_k = self.gcs([output_k, features, fltr], k, i)
            output.append(output_k)

        # Average stacks
        output = K.stack(output, axis=-1)
        output = K.mean(output, axis=-1)
        output = self.activation(output)

        return output 
Example #6
Source File: losses.py    From TF.Keras-Commonly-used-models with Apache License 2.0 6 votes vote down vote up
def jaccard_loss(gt, pr, class_weights=1., smooth=SMOOTH, per_image=True):
    r"""Jaccard loss function for imbalanced datasets:

    .. math:: L(A, B) = 1 - \frac{A \cap B}{A \cup B}

    Args:
        gt: ground truth 4D keras tensor (B, H, W, C)
        pr: prediction 4D keras tensor (B, H, W, C)
        class_weights: 1. or list of class weights, len(weights) = C
        smooth: value to avoid division by zero
        per_image: if ``True``, metric is calculated as mean over images in batch (B),
            else over whole batch

    Returns:
        Jaccard loss in range [0, 1]

    """
    return 1 - jaccard_score(gt, pr, class_weights=class_weights, smooth=smooth, per_image=per_image) 
Example #7
Source File: losses.py    From TF.Keras-Commonly-used-models with Apache License 2.0 6 votes vote down vote up
def bce_dice_loss(gt, pr, bce_weight=1., smooth=SMOOTH, per_image=True, beta=1.):
    r"""Sum of binary crossentropy and dice losses:
    
    .. math:: L(A, B) = bce_weight * binary_crossentropy(A, B) + dice_loss(A, B)
    
    Args:
        gt: ground truth 4D keras tensor (B, H, W, C)
        pr: prediction 4D keras tensor (B, H, W, C)
        class_weights: 1. or list of class weights for dice loss, len(weights) = C 
        smooth: value to avoid division by zero
        per_image: if ``True``, dice loss is calculated as mean over images in batch (B),
            else over whole batch
        beta: coefficient for precision recall balance

    Returns:
        loss
    
    """
    bce = K.mean(binary_crossentropy(gt, pr))
    loss = bce_weight * bce + dice_loss(gt, pr, smooth=smooth, per_image=per_image, beta=beta)
    return loss 
Example #8
Source File: metrics.py    From TF.Keras-Commonly-used-models with Apache License 2.0 6 votes vote down vote up
def get_f_score(class_weights=1, beta=1, smooth=SMOOTH, per_image=True, threshold=None):
    """Change default parameters of F-score score

    Args:
        class_weights: 1. or list of class weights, len(weights) = C
        smooth: value to avoid division by zero
        beta: f-score coefficient
        per_image: if ``True``, metric is calculated as mean over images in batch (B),
            else over whole batch
        threshold: value to round predictions (use ``>`` comparison), if ``None`` prediction prediction will not be round

    Returns:
        ``callable``: F-score
    """
    def score(gt, pr):
        return f_score(gt, pr, class_weights=class_weights, beta=beta, smooth=smooth, per_image=per_image, threshold=threshold)

    return score 
Example #9
Source File: metrics.py    From TF.Keras-Commonly-used-models with Apache License 2.0 6 votes vote down vote up
def get_iou_score(class_weights=1., smooth=SMOOTH, per_image=True, threshold=None):
    """Change default parameters of IoU/Jaccard score

    Args:
        class_weights: 1. or list of class weights, len(weights) = C
        smooth: value to avoid division by zero
        per_image: if ``True``, metric is calculated as mean over images in batch (B),
            else over whole batch
        threshold: value to round predictions (use ``>`` comparison), if ``None`` prediction prediction will not be round

    Returns:
        ``callable``: IoU/Jaccard score
    """
    def score(gt, pr):
        return iou_score(gt, pr, class_weights=class_weights, smooth=smooth, per_image=per_image, threshold=threshold)

    return score 
Example #10
Source File: losses.py    From TF.Keras-Commonly-used-models with Apache License 2.0 6 votes vote down vote up
def cce_dice_loss(gt, pr, cce_weight=1., class_weights=1., smooth=SMOOTH, per_image=True, beta=1.):
    r"""Sum of categorical crossentropy and dice losses:
    
    .. math:: L(A, B) = cce_weight * categorical_crossentropy(A, B) + dice_loss(A, B)
    
    Args:
        gt: ground truth 4D keras tensor (B, H, W, C)
        pr: prediction 4D keras tensor (B, H, W, C)
        class_weights: 1. or list of class weights for dice loss, len(weights) = C 
        smooth: value to avoid division by zero
        per_image: if ``True``, dice loss is calculated as mean over images in batch (B),
            else over whole batch
        beta: coefficient for precision recall balance

    Returns:
        loss
    
    """
    cce = categorical_crossentropy(gt, pr) * class_weights
    cce = K.mean(cce)
    return cce_weight * cce + dice_loss(gt, pr, smooth=smooth, class_weights=class_weights, per_image=per_image, beta=beta)


# Update custom objects 
Example #11
Source File: operation_layers.py    From onnx2keras with MIT License 6 votes vote down vote up
def convert_reduce_mean(node, params, layers, lambda_func, node_name, keras_name):
    """
    Convert reduce mean.
    :param node: current operation node
    :param params: operation attributes
    :param layers: available keras layers
    :param lambda_func: function for keras Lambda layer
    :param node_name: internal converter name
    :param keras_name: resulting layer name
    :return: None
    """
    if len(node.input) != 1:
        assert AttributeError('More than 1 input for reduce mean layer.')

    input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)

    def target_layer(x, axis=params['axes'], keepdims=params['keepdims']):
        import tensorflow.keras.backend as K
        return K.mean(x, keepdims=(keepdims == 1), axis=axis)

    lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
    layers[node_name] = lambda_layer(input_0)
    layers[node_name].set_shape(layers[node_name].shape)
    lambda_func[keras_name] = target_layer 
Example #12
Source File: metrics.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def loss(self, y_true, y_pred):

        if self.crop_indices is not None:
            y_true = utils.batch_gather(y_true, self.crop_indices)
            y_pred = utils.batch_gather(y_pred, self.crop_indices)

        ksq = K.square(y_pred - y_true)

        if self.vox_weights is not None:
            if self.vox_weights == 'y_true':
                ksq *= y_true
            elif self.vox_weights == 'expy_true':
                ksq *= tf.exp(y_true)
            else:
                ksq *= self.vox_weights

        if self.weights is not None:
            ksq *= self.weights

        return K.mean(ksq) 
Example #13
Source File: metrics.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def loss(self, y_true, y_pred):
        """ the loss. Assumes y_pred is prob (in [0,1] and sum_row = 1) """

        # compute dice, which will now be [batch_size, nb_labels]
        dice_metric = self.dice(y_true, y_pred)

        # loss
        dice_loss = 1 - dice_metric

        # weigh the entries in the dice matrix:
        if self.weights is not None:
            dice_loss *= self.weights

        # return one minus mean dice as loss
        mean_dice_loss = K.mean(dice_loss)
        tf.verify_tensor_all_finite(mean_dice_loss, 'Loss not finite')
        return mean_dice_loss 
Example #14
Source File: metrics.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def loss(self, y_true, y_pred):

        # get the value for the true and fake images
        disc_true = self.disc(y_true)
        disc_pred = self.disc(y_pred)

        # sample a x_hat by sampling along the line between true and pred
        # z = tf.placeholder(tf.float32, shape=[None, 1])
        # shp = y_true.get_shape()[0]
        # WARNING: SHOULD REALLY BE shape=[batch_size, 1] !!!
        # self.batch_size does not work, since it's not None!!!
        alpha = K.random_uniform(shape=[K.shape(y_pred)[0], 1, 1, 1])
        diff = y_pred - y_true
        interp = y_true + alpha * diff

        # take gradient of D(x_hat)
        gradients = K.gradients(self.disc(interp), [interp])[0]
        grad_pen = K.mean(K.square(K.sqrt(K.sum(K.square(gradients), axis=1))-1))

        # compute loss
        return (K.mean(disc_pred) - K.mean(disc_true)) + self.lambda_gp * grad_pen 
Example #15
Source File: mine-13.8.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def mi_loss(self, y_true, y_pred):
        """ MINE loss function

        Arguments:
            y_true (tensor): Not used since this is
                unsupervised learning
            y_pred (tensor): stack of predictions for
                joint T(x,y) and marginal T(x,y)
        """
        size = self.args.batch_size
        # lower half is pred for joint dist
        pred_xy = y_pred[0: size, :]

        # upper half is pred for marginal dist
        pred_x_y = y_pred[size : y_pred.shape[0], :]
        loss = K.mean(K.exp(pred_x_y))
        loss = K.clip(loss, K.epsilon(), np.finfo(float).max)
        loss = K.mean(pred_xy) - K.log(loss)
        return -loss 
Example #16
Source File: mine-13.8.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def mi_loss(self, y_true, y_pred):
        """ MINE loss function

        Arguments:
            y_true (tensor): Not used since this is
                unsupervised learning
            y_pred (tensor): stack of predictions for
                joint T(x,y) and marginal T(x,y)
        """
        size = self.args.batch_size
        # lower half is pred for joint dist
        pred_xy = y_pred[0: size, :]

        # upper half is pred for marginal dist
        pred_x_y = y_pred[size : y_pred.shape[0], :]
        # implentation of MINE loss (Eq 13.7.3)
        loss = K.mean(pred_xy) \
               - K.log(K.mean(K.exp(pred_x_y)))
        return -loss 
Example #17
Source File: layers.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def build(self, input_shape):
        # Create mean and count
        # These are weights because just maintaining variables don't get saved with the model, and we'd like
        # to have these numbers saved when we save the model.
        # But we need to make sure that the weights are untrainable.
        self.mean = self.add_weight(name='mean', 
                                      shape=input_shape[1:],
                                      initializer='zeros',
                                      trainable=False)
        self.count = self.add_weight(name='count', 
                                      shape=[1],
                                      initializer='zeros',
                                      trainable=False)

        # self.mean = K.zeros(input_shape[1:], name='mean')
        # self.count = K.variable(0.0, name='count')
        super(MeanStream, self).build(input_shape)  # Be sure to call this somewhere! 
Example #18
Source File: layers.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def build(self, input_shape):
        # Create mean, cov and and count
        # See note in MeanStream.build()
        self.mean = self.add_weight(name='mean', 
                                    shape=input_shape[1:],
                                    initializer='zeros',
                                    trainable=False)
        v = np.prod(input_shape[1:])
        self.cov = self.add_weight(name='cov', 
                                 shape=[v, v],
                                 initializer='zeros',
                                 trainable=False)
        self.count = self.add_weight(name='count', 
                                      shape=[1],
                                      initializer='zeros',
                                      trainable=False)

        super(CovStream, self).build(input_shape)  # Be sure to call this somewhere! 
Example #19
Source File: layers.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def _mean_update(pre_mean, pre_count, x, pre_cap=None):

    # compute this batch stats
    this_sum = tf.reduce_sum(x, 0)
    this_bs = tf.cast(K.shape(x)[0], 'float32')  # this batch size
    
    # increase count and compute weights
    new_count = pre_count + this_bs
    alpha = this_bs/K.minimum(new_count, pre_cap)
    
    # compute new mean. Note that once we reach self.cap (e.g. 1000), the 'previous mean' matters less
    new_mean = pre_mean * (1-alpha) + (this_sum/this_bs) * alpha

    return (new_mean, new_count)

##########################################
## FFT Layers
########################################## 
Example #20
Source File: utils.py    From aitom with GNU General Public License v3.0 6 votes vote down vote up
def alignment_eval(y_true, y_pred, image_size):
    """
    y_true is defined in Radian [-pi, pi] (ZYZ convention) for rotation, and voxels for translation
    y_pred is in [0, 1] from sigmoid activation, need to scale y_pred for comparison
    """

    ang_d = []
    loc_d = []

    for i in range(len(y_true)):
        a = angle_zyz_difference(ang1 = y_true[i][:3], ang2 = y_pred[i][:3] * 2 * np.pi - np.pi)
        b = np.linalg.norm(np.round(y_true[i][3:6]) - np.round((y_pred[i][3:6] * 2 - 1) * (image_size/2) ))
        ang_d.append(a)
        loc_d.append(b)

    print('Rotation error: ', np.mean(ang_d), '+/-', np.std(ang_d), 'Translation error: ', np.mean(loc_d), '+/-', np.std(loc_d), '----------') 
Example #21
Source File: metrics.py    From MIScnn with GNU General Public License v3.0 6 votes vote down vote up
def dice_soft(y_true, y_pred, smooth=0.00001):
    # Identify axis
    axis = identify_axis(y_true.get_shape())

    # Calculate required variables
    intersection = y_true * y_pred
    intersection = K.sum(intersection, axis=axis)
    y_true = K.sum(y_true, axis=axis)
    y_pred = K.sum(y_pred, axis=axis)

    # Calculate Soft Dice Similarity Coefficient
    dice = ((2 * intersection) + smooth) / (y_true + y_pred + smooth)

    # Obtain mean of Dice & return result score
    dice = K.mean(dice)
    return dice 
Example #22
Source File: metrics.py    From MIScnn with GNU General Public License v3.0 6 votes vote down vote up
def dice_crossentropy(y_truth, y_pred):
    # Obtain Soft DSC
    dice = dice_soft_loss(y_truth, y_pred)
    # Obtain Crossentropy
    crossentropy = K.categorical_crossentropy(y_truth, y_pred)
    crossentropy = K.mean(crossentropy)
    # Return sum
    return dice + crossentropy

#-----------------------------------------------------#
#                    Tversky loss                     #
#-----------------------------------------------------#
#                     Reference:                      #
#                Sadegh et al. (2017)                 #
#     Tversky loss function for image segmentation    #
#      using 3D fully convolutional deep networks     #
#-----------------------------------------------------#
# alpha=beta=0.5 : dice coefficient                   #
# alpha=beta=1   : jaccard                            #
# alpha+beta=1   : produces set of F*-scores          #
#-----------------------------------------------------# 
Example #23
Source File: vae-mlp-mnist-8.1.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def sampling(args):
    """Reparameterization trick by sampling 
        fr an isotropic unit Gaussian.

    # Arguments:
        args (tensor): mean and log of variance of Q(z|X)

    # Returns:
        z (tensor): sampled latent vector
    """

    z_mean, z_log_var = args
    # K is the keras backend
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example #24
Source File: cvae-cnn-mnist-8.2.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def sampling(args):
    """Implements reparameterization trick by sampling
    from a gaussian with zero mean and std=1.

    Arguments:
        args (tensor): mean and log of variance of Q(z|X)

    Returns:
        sampled latent vector (tensor)
    """

    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example #25
Source File: metrics.py    From keras-unet with MIT License 5 votes vote down vote up
def jaccard_coef(y_true, y_pred):
    intersection = K.sum(y_true * y_pred)
    union = K.sum(y_true + y_pred)
    jac = (intersection + 1.) / (union - intersection + 1.)
    return K.mean(jac) 
Example #26
Source File: losses.py    From ivis with GNU General Public License v2.0 5 votes vote down vote up
def cosine_loss(margin=1):
    def _cosine_loss(y_true, y_pred):
        anchor, positive, negative = tf.unstack(y_pred)        
        return K.mean(K.maximum(_cosine_distance(anchor, positive) - _cosine_distance(anchor, negative) + margin, 0))
    return _cosine_loss 
Example #27
Source File: losses.py    From ivis with GNU General Public License v2.0 5 votes vote down vote up
def softmax_ratio_pn(y_true, y_pred):
    anchor, positive, negative = tf.unstack(y_pred)

    anchor_positive_distance = _euclidean_distance(anchor, positive)
    anchor_negative_distance = _euclidean_distance(anchor, negative)
    positive_negative_distance = _euclidean_distance(positive, negative)

    minimum_distance = K.min(K.concatenate([anchor_negative_distance, positive_negative_distance]), axis=-1, keepdims=True)

    softmax = K.softmax(K.concatenate([anchor_positive_distance, minimum_distance]))
    ideal_distance = K.variable([0, 1])
    return K.mean(K.maximum(softmax - ideal_distance, 0)) 
Example #28
Source File: losses.py    From ivis with GNU General Public License v2.0 5 votes vote down vote up
def softmax_ratio(y_true, y_pred):
    anchor, positive, negative = tf.unstack(y_pred)

    positive_distance = _euclidean_distance(anchor, positive)
    negative_distance = _euclidean_distance(anchor, negative)

    softmax = K.softmax(K.concatenate([positive_distance, negative_distance]))
    ideal_distance = K.variable([0, 1])
    return K.mean(K.maximum(softmax - ideal_distance, 0)) 
Example #29
Source File: losses.py    From ivis with GNU General Public License v2.0 5 votes vote down vote up
def cosine_pn_loss(margin=1):
    def _pn_loss(y_true, y_pred):
        anchor, positive, negative = tf.unstack(y_pred)

        anchor_positive_distance = _cosine_distance(anchor, positive)
        anchor_negative_distance = _cosine_distance(anchor, negative)
        positive_negative_distance = _cosine_distance(positive, negative)

        minimum_distance = K.min(tf.stack([anchor_negative_distance, positive_negative_distance]), axis=0, keepdims=True)

        return K.mean(K.maximum(anchor_positive_distance - minimum_distance + margin, 0))

    return _pn_loss 
Example #30
Source File: FRN.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def frn_layer_keras(x, tau, beta, gamma, epsilon=1e-6):
    # x: Input tensor of shape [BxHxWxC].
    # tau, beta, gamma: Variables of shape [1, 1, 1, C].
    # eps: A scalar constant or learnable variable.
    # Compute the mean norm of activations per channel.
    nu2 = K.mean(K.square(x), axis=[1, 2], keepdims=True)
    # Perform FRN.
    x = x * 1 / K.sqrt(nu2 + K.abs(epsilon))
    # Return after applying the Offset-ReLU non-linearity.
    return K.maximum(gamma * x + beta, tau)