Python keras.backend.flatten() Examples

The following are code examples for showing how to use keras.backend.flatten(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn_all.py    MIT License 6 votes vote down vote up
def softmax_sparse_crossentropy_ignoring_first_label(y_true, y_pred):
    y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))
    log_softmax = tf.nn.log_softmax(y_pred)

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[0], tf.bool)
    y_true = tf.stack(unpacked[1:], axis=-1)

    cross_entropy = -K.sum(y_true * log_softmax, axis=1)
    cross_entropy_mean = K.sum(cross_entropy) / K.sum(tf.to_float(legal_labels))

    return cross_entropy_mean


# Accuracy for segmentation (ignoring first label) 
Example 2
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 6 votes vote down vote up
def sparse_accuracy(y_true, y_pred):
    classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[0], tf.bool)
    y_true = tf.stack(unpacked[1:], axis=-1)

    return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels))


# Define different models


# 3D-FCN model 
Example 3
Project: speech_separation   Author: bill9800   File: model_loss.py    MIT License 6 votes vote down vote up
def audio_discriminate_loss2(gamma=0.1,beta = 2*0.1,num_speaker=2):
    def loss_func(S_true,S_pred,gamma=gamma,beta=beta,num_speaker=num_speaker):
        sum_mtr = K.zeros_like(S_true[:,:,:,:,0])
        for i in range(num_speaker):
            sum_mtr += K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,i])
            for j in range(num_speaker):
                if i != j:
                    sum_mtr -= gamma*(K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,j]))

        for i in range(num_speaker):
            for j in range(i+1,num_speaker):
                #sum_mtr -= beta*K.square(S_pred[:,:,:,i]-S_pred[:,:,:,j])
                #sum_mtr += beta*K.square(S_true[:,:,:,:,i]-S_true[:,:,:,:,j])
                pass
        #sum = K.sum(K.maximum(K.flatten(sum_mtr),0))

        loss = K.mean(K.flatten(sum_mtr))

        return loss
    return loss_func 
Example 4
Project: deep-learning-explorer   Author: waspinator   File: losses.py    Apache License 2.0 6 votes vote down vote up
def softmax_sparse_crossentropy_ignoring_last_label(y_true, y_pred):
    '''
    Softmax cross-entropy loss function for pascal voc segmentation
    and models which do not perform softmax.
    tensorlow only
    '''
    y_pred = KB.reshape(y_pred, (-1, KB.int_shape(y_pred)[-1]))
    log_softmax = tf.nn.log_softmax(y_pred)

    y_true = KB.one_hot(tf.to_int32(KB.flatten(y_true)),
                        KB.int_shape(y_pred)[-1]+1)
    unpacked = tf.unstack(y_true, axis=-1)
    y_true = tf.stack(unpacked[:-1], axis=-1)

    cross_entropy = -KB.sum(y_true * log_softmax, axis=1)
    cross_entropy_mean = KB.mean(cross_entropy)

    return cross_entropy_mean 
Example 5
Project: AnisotropicMultiStreamCNN   Author: AnnekeMeyer   File: UNET3D_MultiStream_v2.py    MIT License 5 votes vote down vote up
def dice_coef(y_true, y_pred):
  y_true_f = K.flatten(y_true)
  y_pred_f = K.flatten(y_pred)
  intersection = K.sum(y_true_f * y_pred_f)
  return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 6
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    MIT License 5 votes vote down vote up
def dice_coef_clipped(y_true, y_pred, smooth=1.0):
    y_true_f = K.flatten(K.round(y_true))
    y_pred_f = K.flatten(K.round(y_pred))
    intersection = K.sum(y_true_f * y_pred_f)
    return 100. * (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 7
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    MIT License 5 votes vote down vote up
def dice_coef(y_true, y_pred, smooth=1.0):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 8
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    MIT License 5 votes vote down vote up
def online_bootstrapping(y_true, y_pred, pixels=512, threshold=0.5):
    """ Implements nline Bootstrapping crossentropy loss, to train only on hard pixels,
        see  https://arxiv.org/abs/1605.06885 Bridging Category-level and Instance-level Semantic Image Segmentation
        The implementation is a bit different as we use binary crossentropy instead of softmax
        SUPPORTS ONLY MINIBATCH WITH 1 ELEMENT!
    # Arguments
        y_true: A tensor with labels.

        y_pred: A tensor with predicted probabilites.

        pixels: number of hard pixels to keep

        threshold: confidence to use, i.e. if threshold is 0.7, y_true=1, prediction=0.65 then we consider that pixel as hard
    # Returns
        Mean loss value
    """
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    difference = K.abs(y_true - y_pred)

    values, indices = K.tf.nn.top_k(difference, sorted=True, k=pixels)
    min_difference = (1 - threshold)
    y_true = K.tf.gather(K.gather(y_true, indices), K.tf.where(values > min_difference))
    y_pred = K.tf.gather(K.gather(y_pred, indices), K.tf.where(values > min_difference))

    return K.mean(K.binary_crossentropy(y_true, y_pred)) 
Example 9
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    MIT License 5 votes vote down vote up
def dice_coef_border(y_true, y_pred):
    border = get_border_mask((21, 21), y_true)

    border = K.flatten(border)
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    y_true_f = K.tf.gather(y_true_f, K.tf.where(border > 0.5))
    y_pred_f = K.tf.gather(y_pred_f, K.tf.where(border > 0.5))

    return dice_coef(y_true_f, y_pred_f) 
Example 10
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    MIT License 5 votes vote down vote up
def bce_border(y_true, y_pred):
    border = get_border_mask((21, 21), y_true)

    border = K.flatten(border)
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    y_true_f = K.tf.gather(y_true_f, K.tf.where(border > 0.5))
    y_pred_f = K.tf.gather(y_pred_f, K.tf.where(border > 0.5))

    return binary_crossentropy(y_true_f, y_pred_f) 
Example 11
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn_all.py    MIT License 5 votes vote down vote up
def sparse_accuracy(y_true, y_pred):
    classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[0], tf.bool)
    y_true = tf.stack(unpacked[1:], axis=-1)

    return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels)) 
Example 12
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 5 votes vote down vote up
def softmax_sparse_crossentropy_ignoring_first_label(y_true, y_pred):
    y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))
    log = tf.nn.log_softmax(y_pred)

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1)
    unpacked = tf.unstack(y_true, axis=-1)
    y_true = tf.stack(unpacked[1:], axis=-1)

    cross_entropy = -K.sum(y_true * log, axis=1)
    cross_entropy_mean = K.mean(cross_entropy)

    return cross_entropy_mean


# Accuracy for segmentation (ignoring first label) 
Example 13
Project: speech_separation   Author: bill9800   File: model_loss.py    MIT License 5 votes vote down vote up
def audio_discriminate_loss(gamma=0.1,num_speaker=2):
    def loss_func(S_true,S_pred,gamma=gamma,num_speaker=num_speaker):
        sum = 0
        for i in range(num_speaker):
            sum += K.sum(K.flatten((K.square(S_true[:,:,:,i]-S_pred[:,:,:,i]))))
            for j in range(num_speaker):
                if i != j:
                    sum -= gamma*K.sum(K.flatten((K.square(S_true[:,:,:,i]-S_pred[:,:,:,j]))))

        loss = sum / (num_speaker*298*257*2)
        return loss
    return loss_func 
Example 14
Project: HippMapp3r   Author: mgoubran   File: metrics.py    GNU General Public License v3.0 5 votes vote down vote up
def dice_coefficient(y_true, y_pred, smooth=1.):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 15
Project: typhon   Author: atmtools   File: qrnn.py    MIT License 5 votes vote down vote up
def quantile_loss(y_true, y_pred, taus):
    """
    The quantiles loss for a list of quantiles. Sums up the error contribution
    from the each of the quantile loss functions.
    """
    e = skewed_absolute_error(
        K.flatten(y_true), K.flatten(y_pred[:, 0]), taus[0])
    for i, tau in enumerate(taus[1:]):
        e += skewed_absolute_error(K.flatten(y_true),
                                   K.flatten(y_pred[:, i + 1]),
                                   tau)
    return e 
Example 16
Project: SkinLesionNeuralNetwork   Author: Neurality   File: external_methods.py    GNU General Public License v3.0 5 votes vote down vote up
def dice_coef(y_true, y_pred):
    """
        Calculate the dice coefficient of the 2 input Tensors.
        Refence: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient

        Keyword arguments:
        y_true -- Keras Tensor containing the ground truth
        y_pred -- Keras Tensor containing the prediction
        """
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 17
Project: SkinLesionNeuralNetwork   Author: Neurality   File: external_methods.py    GNU General Public License v3.0 5 votes vote down vote up
def jaccard_coef(y_true, y_pred):
    """
        Calculate the dice coefficient of the 2 input Tensors.
        Refence: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient

        Keyword arguments:
        y_true -- Keras Tensor containing the ground truth
        y_pred -- Keras Tensor containing the prediction
        """
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + smooth) 
Example 18
Project: MS-CMR2019   Author: Suiiyu   File: metrics.py    MIT License 5 votes vote down vote up
def dice_coefficient(y_true, y_pred, smooth=1.):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 19
Project: CarND-Capstone   Author: ooleksyuk   File: tl_detector.py    MIT License 5 votes vote down vote up
def dice_coef(y_true, y_pred):
    y_true_f = backend.flatten(y_true)
    y_pred_f = backend.flatten(y_pred)
    intersection = backend.sum(y_true_f * y_pred_f)
    return (2. * intersection + SMOOTH) / (backend.sum(y_true_f) + backend.sum(y_pred_f) + SMOOTH) 
Example 20
Project: object-detection   Author: kaka-lin   File: model.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, n):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    conv_dims = K.shape(feats)[1:3]  # assuming channels last
    # In YOLO the height index is the inner most iteration.
    conv_height_index = K.arange(0, stop=conv_dims[0])
    conv_width_index = K.arange(0, stop=conv_dims[1])
    conv_height_index = K.tile(conv_height_index, [conv_dims[1]])

    conv_width_index = K.tile(K.expand_dims(conv_width_index, 0), [conv_dims[0], 1])
    conv_width_index = K.flatten(K.transpose(conv_width_index))
    conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
    conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
    conv_index = K.cast(conv_index, K.dtype(feats))

    feats = K.reshape(feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
    conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    # Note: YOLO iterates over height index before width index.
    # TODO: It works with +1, don't know why.
    box_xy = (box_xy + conv_index + 1) / conv_dims
    # TODO: Input layer size
    box_wh = box_wh * anchors_tensor / conv_dims / {0:32, 1:16, 2:8}[n]

    return [box_xy, box_wh, box_confidence, box_class_probs] 
Example 21
Project: keras-image-segmentation   Author: dhkim0225   File: fcn.py    MIT License 5 votes vote down vote up
def dice_coef(y_true, y_pred):
    smooth = 1.
    y_true /= 255.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 22
Project: keras-image-segmentation   Author: dhkim0225   File: fcn.py    MIT License 5 votes vote down vote up
def pixelwise_l2_loss(y_true, y_pred):
    y_true /= 255.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    return K.mean(K.square(y_true_f - y_pred_f)) 
Example 23
Project: keras-image-segmentation   Author: dhkim0225   File: fcn.py    MIT License 5 votes vote down vote up
def pixelwise_binary_ce(y_true, y_pred):
    y_true /= 255.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    return K.mean(K.binary_crossentropy(y_true_f, y_pred_f)) 
Example 24
Project: keras-image-segmentation   Author: dhkim0225   File: model.py    MIT License 5 votes vote down vote up
def dice_coef(y_true, y_pred):
    smooth = 1.
    y_true /= 255.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 25
Project: keras-image-segmentation   Author: dhkim0225   File: model.py    MIT License 5 votes vote down vote up
def pixelwise_l2_loss(y_true, y_pred):
    y_true /= 255.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    return K.mean(K.square(y_true_f - y_pred_f)) 
Example 26
Project: Trident-Segmentation-CNN   Author: YalongLiu   File: metrics.py    MIT License 5 votes vote down vote up
def dice_coefficient(y_true, y_pred, smooth=0.1):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    loss = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
    return loss 
Example 27
Project: Trident-Segmentation-CNN   Author: YalongLiu   File: metrics.py    MIT License 5 votes vote down vote up
def dice_coef_metrics(y_true, y_pred, smooth=0.1):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    y_pred_f = tf.to_float(y_pred_f > 0.7)
    intersection = K.sum(y_true_f * y_pred_f)
    loss = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
    return loss 
Example 28
Project: dep-gan-im   Author: febrianrachmadi   File: RLD44-depgan-im-flair-noSL.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def dice_coef(y_true, y_pred, smooth=1e-7):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)

# https://github.com/jocicmarko/ultrasound-nerve-segmentation/blob/master/train.py#L19 
Example 29
Project: dep-gan-im   Author: febrianrachmadi   File: RLD441-depgan-twoCritics-im-flair-noSL.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def dice_coef(y_true, y_pred, smooth=1e-7):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)

# https://github.com/jocicmarko/ultrasound-nerve-segmentation/blob/master/train.py#L19 
Example 30
Project: VAE-for-Image-Generation   Author: chaitanya100100   File: caltech101_92_generate.py    MIT License 5 votes vote down vote up
def vae_loss(self, x, x_decoded_mean_squash):
        x = K.flatten(x)
        x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
        xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
        kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(xent_loss + kl_loss) 
Example 31
Project: VAE-for-Image-Generation   Author: chaitanya100100   File: caltech101_128_train.py    MIT License 5 votes vote down vote up
def vae_loss(self, x, x_decoded_mean_squash):
        x = K.flatten(x)
        x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
        xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
        kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(xent_loss + kl_loss) 
Example 32
Project: VAE-for-Image-Generation   Author: chaitanya100100   File: test.py    MIT License 5 votes vote down vote up
def vae_loss(self, x, x_decoded_mean_squash):
        x = K.flatten(x)
        x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
        xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
        kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(xent_loss + kl_loss) 
Example 33
Project: VAE-for-Image-Generation   Author: chaitanya100100   File: caltech101_128_generate.py    MIT License 5 votes vote down vote up
def vae_loss(self, x, x_decoded_mean_squash):
        x = K.flatten(x)
        x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
        xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
        kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(xent_loss + kl_loss) 
Example 34
Project: VAE-for-Image-Generation   Author: chaitanya100100   File: cifar10_train.py    MIT License 5 votes vote down vote up
def vae_loss(self, x, x_decoded_mean_squash):
        x = K.flatten(x)
        x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
        xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
        kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(xent_loss + kl_loss) 
Example 35
Project: VAE-for-Image-Generation   Author: chaitanya100100   File: caltech101_92_train.py    MIT License 5 votes vote down vote up
def vae_loss(self, x, x_decoded_mean_squash):
        x = K.flatten(x)
        x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
        xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
        kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(xent_loss + kl_loss) 
Example 36
Project: elmo-bilstm-cnn-crf   Author: UKPLab   File: ChainCRF.py    Apache License 2.0 5 votes vote down vote up
def batch_gather(reference, indices):
    ref_shape = K.shape(reference)
    batch_size = ref_shape[0]
    n_classes = ref_shape[1]
    flat_indices = K.arange(0, batch_size) * n_classes + K.flatten(indices)
    return K.gather(K.flatten(reference), flat_indices) 
Example 37
Project: TCFPN-ISBA   Author: Zephyr-D   File: weak_model.py    MIT License 5 votes vote down vote up
def dice_coeff(y_true, y_pred):
    smooth = 0.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
    return score 
Example 38
Project: TCFPN-ISBA   Author: Zephyr-D   File: weak_model.py    MIT License 5 votes vote down vote up
def IOU(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    score = intersection / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection)
    return score 
Example 39
Project: TCFPN-ISBA   Author: Zephyr-D   File: weak_model.py    MIT License 5 votes vote down vote up
def sigmoid_cross_entropy(y_true, y_pred):
    z = K.flatten(y_true)
    x = K.flatten(y_pred)
    q = 10
    l = (1 + (q - 1) * z)
    loss = (K.sum((1 - z) * x) + K.sum(l * (K.log(1 + K.exp(- K.abs(x))) + K.max(-x, 0)))) / 500
    return loss 
Example 40
Project: Res3DUNET   Author: tgwboers   File: UnetBuilder.py    GNU General Public License v3.0 5 votes vote down vote up
def create_dice_coef(self,label):
        def dice_coef_c(y_true, y_pred, smooth=1.):
            y_true_f=K.flatten(y_true[:,:,:,:,label+1])
            y_pred_f=K.flatten(y_pred[:,:,:,:,label+1])
            intersection= K.sum(y_true_f * y_pred_f)
            dice = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)

            return dice
        return dice_coef_c 
Example 41
Project: brats2017   Author: QTIM-Lab   File: model.py    MIT License 5 votes vote down vote up
def dice_coef(y_true, y_pred, smooth=1.):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 42
Project: brats2017   Author: QTIM-Lab   File: model.py    MIT License 5 votes vote down vote up
def neg_dice_coef(y_true, y_pred, smooth=1.):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    y_true_f_norm = (y_true_f * 2) - 1
    y_pred_f_norm = (y_pred_f * 2) - 1
    intersection = K.sum(y_true_f_norm * y_pred_f_norm)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 43
Project: segmentation-unet-maskrcnn   Author: olgaliak   File: metrics.py    MIT License 5 votes vote down vote up
def jacard_coef_flat(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (intersection + SMOOTH_LOSS) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + SMOOTH_LOSS) 
Example 44
Project: segmentation-unet-maskrcnn   Author: olgaliak   File: metrics.py    MIT License 5 votes vote down vote up
def dice_coef(y_true, y_pred, smooth=1.0):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 45
Project: FSA-Net   Author: shamangary   File: layers.py    Apache License 2.0 5 votes vote down vote up
def _make_regular_grids(self, batch_size, height, width):
        # making a single regular grid
        x_linspace = K_linspace(-1., 1., width)
        y_linspace = K_linspace(-1., 1., height)
        x_coordinates, y_coordinates = K_meshgrid(x_linspace, y_linspace)
        x_coordinates = K.flatten(x_coordinates)
        y_coordinates = K.flatten(y_coordinates)
        ones = K.ones_like(x_coordinates)
        grid = K.concatenate([x_coordinates, y_coordinates, ones], 0)

        # repeating grids for each batch
        grid = K.flatten(grid)
        grids = K.tile(grid, K.stack([batch_size]))
        return K.reshape(grids, (batch_size, 3, height * width)) 
Example 46
Project: Dstl-Satellite-Imagery-Feature-Detection   Author: DeepVoltaire   File: training.py    MIT License 5 votes vote down vote up
def jaccard_coef(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + 1.0) 
Example 47
Project: ColiCoords   Author: Jhsmit   File: losses.py    MIT License 5 votes vote down vote up
def dice_coeff(y_true, y_pred):
    smooth = 1.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
    return score 
Example 48
Project: Keras-FCN-template   Author: MchZys   File: losses.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def dice_coef(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred = K.cast(y_pred, 'float32')
    y_pred_f = K.cast(K.greater(K.flatten(y_pred), 0.5), 'float32')
    intersection = y_true_f * y_pred_f
    score = 2. * K.sum(intersection) / (K.sum(y_true_f) + K.sum(y_pred_f))
    return score 
Example 49
Project: Keras-FCN-template   Author: MchZys   File: losses.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def dice_loss(y_true, y_pred):
    smooth = 1.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = y_true_f * y_pred_f
    score = (2. * K.sum(intersection) + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
    return 1. - score 
Example 50
Project: workspace_2017   Author: nwiizo   File: variational_autoencoder_deconv.py    MIT License 5 votes vote down vote up
def vae_loss(x, x_decoded_mean):
    # NOTE: binary_crossentropy expects a batch_size by dim
    # for x and x_decoded_mean, so we MUST flatten these!
    x = K.flatten(x)
    x_decoded_mean = K.flatten(x_decoded_mean)
    xent_loss = img_rows * img_cols * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss 
Example 51
Project: Cat-Segmentation   Author: ardamavi   File: get_model.py    Apache License 2.0 5 votes vote down vote up
def dice_coef(y_true, y_pred):
    smooth = 1.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 52
Project: GAMO   Author: SankhaSubhra   File: fashion_mnist_gamo2pix_main.py    GNU General Public License v3.0 5 votes vote down vote up
def vae_loss(y_true, y_pred):
    mse_loss=28*28*mean_squared_error(K.flatten(y_true), K.flatten(y_pred))
    kl_loss=-0.5*K.sum(1+z_sigma-K.square(z_mean)-K.exp(z_sigma), axis=-1)
    return K.mean(mse_loss+kl_loss)

# For selecting a GPU 
Example 53
Project: Road_Segmentation_ML   Author: TaoSunVoyage   File: metrics.py    MIT License 5 votes vote down vote up
def dice_coeff(y_true, y_pred):
    smooth = 1.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
    return score 
Example 54
Project: wmh_ibbmTum   Author: hongweilibran   File: submission_sysu_.py    GNU General Public License v3.0 5 votes vote down vote up
def dice_coef_for_training(y_true, y_pred):
    print(np.shape(y_pred))
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 55
Project: wmh_ibbmTum   Author: hongweilibran   File: train_leave_one_out.py    GNU General Public License v3.0 5 votes vote down vote up
def dice_coef_for_training(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 56
Project: wmh_ibbmTum   Author: hongweilibran   File: test_leave_one_out.py    GNU General Public License v3.0 5 votes vote down vote up
def dice_coef_for_training(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 57
Project: async-rl   Author: Grzego   File: train.py    MIT License 5 votes vote down vote up
def policy_loss(adventage=0., beta=0.01):
    from keras import backend as K

    def loss(y_true, y_pred):
        return -K.sum(K.log(K.sum(y_true * y_pred, axis=-1) + K.epsilon()) * K.flatten(adventage)) + \
               beta * K.sum(y_pred * K.log(y_pred + K.epsilon()))

    return loss 
Example 58
Project: async-rl   Author: Grzego   File: train.py    MIT License 5 votes vote down vote up
def learn(self, last_observations, actions, rewards, learning_rate=0.001):
        import keras.backend as K
        K.set_value(self.train_net.optimizer.lr, learning_rate)
        frames = len(last_observations)
        self.counter += frames
        # -----
        values, policy = self.train_net.predict([last_observations, self.unroll])
        # -----
        self.targets.fill(0.)
        adventage = rewards - values.flatten()
        self.targets[self.unroll, actions] = 1.
        # -----
        loss = self.train_net.train_on_batch([last_observations, adventage], [rewards, self.targets])
        entropy = np.mean(-policy * np.log(policy + 0.00000001))
        self.pol_loss.append(loss[2])
        self.val_loss.append(loss[1])
        self.entropy.append(entropy)
        self.values.append(np.mean(values))
        min_val, max_val, avg_val = min(self.values), max(self.values), np.mean(self.values)
        print('\rFrames: %8d; Policy-Loss: %10.6f; Avg: %10.6f '
              '--- Value-Loss: %10.6f; Avg: %10.6f '
              '--- Entropy: %7.6f; Avg: %7.6f '
              '--- V-value; Min: %6.3f; Max: %6.3f; Avg: %6.3f' % (
                  self.counter,
                  loss[2], np.mean(self.pol_loss),
                  loss[1], np.mean(self.val_loss),
                  entropy, np.mean(self.entropy),
                  min_val, max_val, avg_val), end='')
        # -----
        self.swap_counter -= frames
        if self.swap_counter < 0:
            self.swap_counter += self.swap_freq
            return True
        return False 
Example 59
Project: ghn   Author: kamwoh   File: experimentutils.py    MIT License 5 votes vote down vote up
def record_in_tensorboard(self, summary_inputs, postfix_name, tensor):
        min_weight = K.min(tensor)
        max_weight = K.max(tensor)
        mean_weight = K.mean(tensor)

        if postfix_name != '':
            postfix_name = '_' + postfix_name

        summary_inputs.append(tf.summary.scalar('min' + postfix_name, min_weight))
        summary_inputs.append(tf.summary.scalar('max' + postfix_name, max_weight))
        summary_inputs.append(tf.summary.scalar('mean' + postfix_name, mean_weight))

        flatten_weight = K.flatten(tensor)
        summary_inputs.append(tf.summary.histogram('histogram' + postfix_name, flatten_weight)) 
Example 60
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: loss_utils.py    Apache License 2.0 5 votes vote down vote up
def sparse_categorical_crossentropy_seg(y_true, y_pred):
    """ calculate cross-entropy of the one-hot prediction and the sparse gt.
    :param y_true: tensor of shape (batch_size, height, width)
    :param y_pred: tensor of shape (batch_size, height, width, n_class)
    :return: categorical cross-entropy
    """
    n_class = K.int_shape(y_pred)[-1]

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), n_class)
    y_pred = K.log(K.reshape(y_pred, (-1, n_class)))

    cross_entropy = -K.sum(y_true * y_pred, axis=1)
    cross_entropy_mean = K.mean(cross_entropy)

    return cross_entropy_mean 
Example 61
Project: CONSNet   Author: MICLab-Unicamp   File: cnn_utils.py    MIT License 5 votes vote down vote up
def dice_coef(y_true, y_pred):
    
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 62
Project: deep_segment   Author: JoshuaEbenezer   File: metrics.py    GNU General Public License v3.0 5 votes vote down vote up
def dice_coef(y_true, y_pred, smooth = smooth_default, per_batch = True):
    if not per_batch:
        y_true_f = K.flatten(y_true)
        y_pred_f = K.flatten(y_pred)
        intersection = K.sum(y_true_f * y_pred_f)
        return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
    else: 
        y_true_f = K.batch_flatten(y_true)
        y_pred_f = K.batch_flatten(y_pred)
        intersec = 2. * K.sum(y_true_f * y_pred_f, axis=1, keepdims=True) + smooth
        union = K.sum(y_true_f, axis=1, keepdims=True) + K.sum(y_pred_f, axis=1, keepdims=True) + smooth
        return K.mean(intersec / union) 
Example 63
Project: deep_segment   Author: JoshuaEbenezer   File: metrics.py    GNU General Public License v3.0 5 votes vote down vote up
def jacc_coef(y_true, y_pred, smooth = smooth_default):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + smooth) 
Example 64
Project: thyroid_segmentation   Author: suryatejadev   File: loss.py    MIT License 5 votes vote down vote up
def dice_coef(y_true, y_pred):
    smooth = 1.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 65
Project: thyroid_segmentation   Author: suryatejadev   File: loss.py    MIT License 5 votes vote down vote up
def dice_coef_numpy(y_true, y_pred):
    smooth = 1.
    y_true_f = np.ndarray.flatten(y_true)
    y_pred_f = np.ndarray.flatten(y_pred)
    intersection = np.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth) 
Example 66
Project: thyroid_segmentation   Author: suryatejadev   File: loss.py    MIT License 5 votes vote down vote up
def iou_score(y_true, y_pred):
    smooth = 1.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (1. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 67
Project: MMdnn   Author: microsoft   File: utils.py    MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    conv_dims = K.shape(feats)[1:3]
    conv_height_index = K.arange(0, stop=conv_dims[1])
    conv_width_index = K.arange(0, stop=conv_dims[0])
    conv_height_index = K.tile(conv_height_index, [conv_dims[0]])

    conv_width_index = K.tile(
        K.expand_dims(conv_width_index, 0), [conv_dims[1], 1])
    conv_width_index = K.flatten(K.transpose(conv_width_index))
    conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
    conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
    conv_index = K.cast(conv_index, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
    conv_dims = K.cast(conv_dims[::-1], K.dtype(feats))

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    # Note: YOLO iterates over height index before width index.
    # TODO: It works with +1, don't know why.
    box_xy = (box_xy + conv_index + 1) / conv_dims
    box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(box_wh))

    return box_xy, box_wh, box_confidence, box_class_probs 
Example 68
Project: unet-pspnet-unet2d-segment-implement   Author: qianmingduowan   File: metrics.py    GNU General Public License v3.0 5 votes vote down vote up
def dice_coefficient(y_true,y_pred,smooth=1.):
    y_true_f=K.flatten(y_true)
    y_pred_f=K.flatten(y_pred)

    intersection= K.sum(y_true_f*y_pred_f)

    return (2. * intersection + smooth)/(K.sum(y_true_f)+K.sum(y_pred_f)+smooth) 
Example 69
Project: WhiteMatterHyperintensities   Author: pabloduque0   File: metrics.py    MIT License 5 votes vote down vote up
def dice_coefficient(y_true, y_pred, smooth=0.1):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)

    intersection = K.sum(y_pred_f * y_true_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 70
Project: ComboLoss   Author: asgsaeid   File: combo_loss.py    MIT License 5 votes vote down vote up
def Combo_loss(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    d = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
    y_pred_f = K.clip(y_pred_f, e, 1.0 - e)
    out = - (ce_w * y_true_f * K.log(y_pred_f)) + ((1 - ce_w) * (1.0 - y_true_f) * K.log(1.0 - y_pred_f))
    weighted_ce = K.mean(out, axis=-1)
    combo = (ce_d_w * weighted_ce) - ((1 - ce_d_w) * (1 - d))
    return combo 
Example 71
Project: DiscriminativeActiveLearning   Author: dsgissin   File: query_methods.py    MIT License 5 votes vote down vote up
def compute_egls(self, unlabeled, n_classes):

        # create a function for computing the gradient length:
        self.input_placeholder = K.placeholder(self.model.get_input_shape_at(0))
        self.output_placeholder = K.placeholder(self.model.get_output_shape_at(0))
        predict = self.model.call(self.input_placeholder)
        loss = K.mean(categorical_crossentropy(self.output_placeholder, predict))
        weights = [tensor for tensor in self.model.trainable_weights]
        gradient = self.model.optimizer.get_gradients(loss, weights)
        gradient_flat = [K.flatten(x) for x in gradient]
        gradient_flat = K.concatenate(gradient_flat)
        gradient_length = K.sum(K.square(gradient_flat))
        self.get_gradient_length = K.function([K.learning_phase(), self.input_placeholder, self.output_placeholder], [gradient_length])

        # calculate the expected gradient length of the unlabeled set (iteratively, to avoid memory issues):
        unlabeled_predictions = self.model.predict(unlabeled)
        egls = np.zeros(unlabeled.shape[0])
        for i in range(n_classes):
            calculated_so_far = 0
            while calculated_so_far < unlabeled_predictions.shape[0]:
                if calculated_so_far + 100 >= unlabeled_predictions.shape[0]:
                    next = unlabeled_predictions.shape[0] - calculated_so_far
                else:
                    next = 100

                labels = np.zeros((next, n_classes))
                labels[:,i] = 1
                grads = self.get_gradient_length([0, unlabeled[calculated_so_far:calculated_so_far+next, :], labels])[0]
                grads *= unlabeled_predictions[calculated_so_far:calculated_so_far+next, i]
                egls[calculated_so_far:calculated_so_far+next] += grads

                calculated_so_far += next

        return egls 
Example 72
Project: Ultrasound-Nerve-Segmentation   Author: ardamavi   File: get_model.py    Apache License 2.0 5 votes vote down vote up
def dice_coef(y_true, y_pred):
    smooth = 1.
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 73
Project: Urban3d   Author: topcoderinc   File: a02_zf_unet_model.py    MIT License 5 votes vote down vote up
def dice_coef(y_true, y_pred):
    from keras import backend as K
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2.0 * intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) + 1.0) 
Example 74
Project: Urban3d   Author: topcoderinc   File: a02_zf_unet_model.py    MIT License 5 votes vote down vote up
def jacard_coef(y_true, y_pred):
    from keras import backend as K
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) - intersection + 1.0) 
Example 75
Project: ISML_auto_voter   Author: Hecate2   File: model.py    MIT License 5 votes vote down vote up
def Classifier(self, name='Classifier'):

        img_ch = self.img_ch  # image channels
        class_num = self.class_num  # output channel
        img_height, img_width = self.img_size[0], self.img_size[1]
        padding = 'same'  #'valid'

        inputs = Input((img_height, img_width, img_ch))

        conv1 = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding=padding)(inputs)
        conv1 = BatchNormalization(scale=False, axis=3)(conv1)
        conv1 = Activation('relu')(conv1)
        conv1 = Conv2D(32, kernel_size=(3, 3), strides=(2, 2), padding=padding)(conv1)
        conv1 = BatchNormalization(scale=False, axis=3)(conv1)
        conv1 = Activation('relu')(conv1)

        conv2 = Conv2D(64, kernel_size=(3, 3), strides=(2, 2), padding=padding)(conv1)
        conv2 = BatchNormalization(scale=False, axis=3)(conv2)
        conv2 = Activation('relu')(conv2)

        flat = Flatten()(conv2)
        fc1 = Dense(512, activation='relu')(flat)
        fc1 = Dropout(0.2)(fc1)

        outputs = Dense(self.class_num, activation='sigmoid')(fc1)

        model = Model(inputs, outputs, name=name)

        def train_loss(y_true, y_pred):
            Loss = objectives.binary_crossentropy(K.flatten(y_true), K.flatten(y_pred))

            return Loss


        model.compile(optimizer=Adam(lr=self.init_lr), loss=train_loss, metrics=['accuracy'])

        return model 
Example 76
Project: MICCAI2018---Complementary_Segmentation_Network-Raw-Code   Author: raun1   File: comp_net_raw.py    MIT License 5 votes vote down vote up
def dice_coef(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)

# Negative dice to obtain region of interest (ROI-Branch loss) 
Example 77
Project: deep-learning-explorer   Author: waspinator   File: metrics.py    Apache License 2.0 5 votes vote down vote up
def sparse_accuracy_ignoring_last_label(y_true, y_pred):
    nb_classes = KB.int_shape(y_pred)[-1]
    y_pred = KB.reshape(y_pred, (-1, nb_classes))

    y_true = KB.one_hot(tf.to_int32(KB.flatten(y_true)),
                        nb_classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[-1], tf.bool)
    y_true = tf.stack(unpacked[:-1], axis=-1)

    return KB.sum(tf.to_float(legal_labels & KB.equal(KB.argmax(y_true, axis=-1), KB.argmax(y_pred, axis=-1)))) / KB.sum(tf.to_float(legal_labels)) 
Example 78
Project: dr.b   Author: taoddiao   File: LUNA_train_unet.py    Apache License 2.0 5 votes vote down vote up
def dice_coef(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 79
Project: Variational-AutoEncoder-For-Novelty-Detection   Author: LordAlucard90   File: model.py    GNU General Public License v3.0 4 votes vote down vote up
def _get_standard(self):
        input_img = Input(shape=(28, 28, 1))
        encoder = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)

        encoder_branch_left = MaxPooling2D((2, 2), padding='same')(encoder)
        encoder_branch_left = Conv2D(16, (3, 3), activation='relu', padding='same')(encoder_branch_left)
        encoder_branch_left = MaxPooling2D((2, 2), padding='same')(encoder_branch_left)
        encoder_branch_left = Conv2D(16, (3, 3), activation='relu', padding='same')(encoder_branch_left)
        encoder_branch_left = MaxPooling2D((2, 2), padding='same')(encoder_branch_left)

        encoder_branch_right = AveragePooling2D((2, 2), padding='same')(encoder)
        encoder_branch_right = Conv2D(16, (3, 3), activation='relu', padding='same')(encoder_branch_right)
        encoder_branch_right = AveragePooling2D((2, 2), padding='same')(encoder_branch_right)
        encoder_branch_right = Conv2D(16, (3, 3), activation='relu', padding='same')(encoder_branch_right)
        encoder_branch_right = AveragePooling2D((2, 2), padding='same')(encoder_branch_right)

        encoder_out = Flatten()(Concatenate()([encoder_branch_left, encoder_branch_right]))
        encoder_out = Dense(128, activation='relu')(encoder_out)

        if self.vae:
            mean = Dense(self.hidden, name='mean')(encoder_out)
            log_var = Dense(self.hidden, name='log_var')(encoder_out)
            mirror = Lambda(self._sampling)([mean, log_var])
        else:
            mirror = Dense(self.hidden, name='log_var')(encoder_out)

        decoder = Dense(128, activation='relu')(mirror)
        decoder = Dense(16 * 4 * 4, activation='relu')(decoder)
        decoder = Reshape((4, 4, 16))(decoder)

        decoder_branch_left = Conv2D(16, (3, 3), activation='relu', padding='same')(decoder)
        decoder_branch_left = UpSampling2D((2, 2))(decoder_branch_left)
        decoder_branch_left = Conv2D(16, (3, 3), activation='relu', padding='same')(decoder_branch_left)
        decoder_branch_left = UpSampling2D((2, 2))(decoder_branch_left)
        decoder_branch_left = Conv2D(16, (3, 3), activation='relu')(decoder_branch_left)
        decoder_branch_left = UpSampling2D((2, 2))(decoder_branch_left)
        decoder_branch_left = Conv2D(16, (3, 3), activation='relu', padding='same')(decoder_branch_left)

        decoder_branch_right = Conv2DTranspose(16, (3, 3), activation='relu')(decoder)
        decoder_branch_right = UpSampling2D((2, 2))(decoder_branch_right)
        decoder_branch_right = Conv2DTranspose(16, (3, 3), activation='relu')(decoder_branch_right)
        decoder_branch_right = UpSampling2D((2, 2))(decoder_branch_right)
        decoder_branch_right = Conv2DTranspose(16, (3, 3), activation='relu', padding='same')(decoder_branch_right)

        out = Concatenate()([decoder_branch_left, decoder_branch_right])
        out = Conv2D(16, (3, 3), activation='relu', padding='same')(out)
        out_img = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(out)

        self.model = Model(input_img, out_img)

        if self.vae:
            def my_loss(y_true, y_pred):
                xent = 28 * 28 * binary_crossentropy(K.flatten(y_true), K.flatten(y_pred))
                kl = - 0.5 * K.sum(1 + log_var - K.square(mean) - K.exp(log_var), axis=-1)
                return K.mean(xent + kl)
        else:
            def my_loss(y_true, y_pred):
                return 28 * 28 * binary_crossentropy(K.flatten(y_true), K.flatten(y_pred))

        self.model.compile(optimizer='rmsprop', loss=my_loss) 
Example 80
Project: Dstl-Satellite-Imagery-Feature-Detection   Author: DeepVoltaire   File: training.py    MIT License 4 votes vote down vote up
def calc_jacc(model, logger, dims, visual_name, img, msk, use_sample_weights, N_Cls=10):
    """
    Finds the optimal thresholds for the metric to be maximized: Average Jaccard Index over areas with positive mask
    over all 10 classes.
    """
    ind_scores, trs, trs_bin = [], [], []

    prd = model.predict(img, batch_size=16)
    if use_sample_weights:
        # Output is (None, 160*160, 10), reshape to (None, 10, 160, 160)
        prd = np.rollaxis(prd, 2, 1)
        prd = prd.reshape(prd.shape[0], N_Cls, 160, 160)
        msk = np.rollaxis(msk, 2, 1)
        msk = msk.reshape(msk.shape[0], N_Cls, 160, 160)
    print(prd.shape, msk.shape)
    print(np.sum(prd))
    print(np.max(prd), np.min(prd), np.mean(prd))

    def compute_jaccard(threshold, t_prd, t_msk):
        pred_binary_ys = t_prd >= threshold
        tp, fp, fn = ((pred_binary_ys & t_msk).sum(),
                      (pred_binary_ys & ~t_msk).sum(),
                      (~pred_binary_ys & t_msk).sum())
        jaccard = tp / (tp + fp + fn)
        return jaccard

    for i in range(N_Cls):
        t_msk = msk[:, i, :, :]
        t_prd = prd[:, i, :, :]
        t_msk = t_msk.flatten()
        t_msk = t_msk == 1
        t_prd = t_prd.flatten()

        best_jac, best_thresh = 0, 0
        for k in [0, .01, .02, .03, .04, .05, .075, .1, .125, .15, .175, .2, .225, .25, .275, .3, .325, .35, .375, .4, .45,
              .5, .55, .6, .65, .7, .75, .8, .85, .9, .95]:
            tr = k
            jk = compute_jaccard(tr, t_prd, t_msk)
            if jk > best_jac:
                best_jac = jk
                best_thresh = tr
        print("{}: Max Jaccard von {:.4f} bei >= {:.4f}".format(class_list[i], best_jac, best_thresh))
        logger.info("{}: Max Jaccard von {:.4f} bei >= {:.4f}".format(class_list[i], best_jac, best_thresh))
        # Liste von average Jaccard Scores über alle Validation Crops
        ind_scores.append(best_jac)
        # Liste der besten Thresholds
        trs.append(best_thresh)
    avg_score = sum(ind_scores) / 10.0
    np.save("../data/thresholds_unet_{}_{:.4f}".format(visual_name, avg_score), trs)
    print("Average Jaccard: {:.4f}".format(avg_score))
    logger.info("Average Jaccard: {:.4f}".format(avg_score))
    return avg_score, trs, ind_scores