Python keras.backend.std() Examples

The following are code examples for showing how to use keras.backend.std(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: 360_aware_saliency   Author: MikhailStartsev   File: models.py    GNU General Public License v3.0 7 votes vote down vote up
def nss(y_true, y_pred):
    max_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    y_pred /= max_y_pred
    y_pred_flatten = K.batch_flatten(y_pred)

    y_mean = K.mean(y_pred_flatten, axis=-1)
    y_mean = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_mean)), 
                                                               shape_r_out, axis=-1)), shape_c_out, axis=-1)

    y_std = K.std(y_pred_flatten, axis=-1)
    y_std = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_std)), 
                                                              shape_r_out, axis=-1)), shape_c_out, axis=-1)

    y_pred = (y_pred - y_mean) / (y_std + K.epsilon())

    return -(K.sum(K.sum(y_true * y_pred, axis=2), axis=2) / K.sum(K.sum(y_true, axis=2), axis=2))


# Gaussian priors initialization 
Example 2
Project: keras-utility-layer-collection   Author: zimmerrol   File: layer_normalization.py    MIT License 6 votes vote down vote up
def call(self, x):
        mean = K.mean(x, axis=-1)
        std = K.std(x, axis=-1)

        if len(x.shape) == 3:
            mean = K.permute_dimensions(
                K.repeat(mean, x.shape.as_list()[-1]),
                [0,2,1]
            )
            std = K.permute_dimensions(
                K.repeat(std, x.shape.as_list()[-1]),
                [0,2,1] 
            )
            
        elif len(x.shape) == 2:
            mean = K.reshape(
                K.repeat_elements(mean, x.shape.as_list()[-1], 0),
                (-1, x.shape.as_list()[-1])
            )
            std = K.reshape(
                K.repeat_elements(mean, x.shape.as_list()[-1], 0),
                (-1, x.shape.as_list()[-1])
            )
        
        return self._g * (x - mean) / (std + self._epsilon) + self._b 
Example 3
Project: Coloring-greyscale-images   Author: emilwallner   File: instance_normalization.py    MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)
        reduction_axes = list(range(0, len(input_shape)))

        if (self.axis is not None):
            del reduction_axes[self.axis]

        del reduction_axes[0]

        mean = K.mean(inputs, reduction_axes, keepdims=True)
        stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs - mean) / stddev

        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            normed = normed * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            normed = normed + broadcast_beta
        return normed 
Example 4
Project: DeepFakeTutorial   Author: MITeoRIte   File: instance_normalization.py    GNU General Public License v3.0 6 votes vote down vote up
def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)
        reduction_axes = list(range(0, len(input_shape)))

        if (self.axis is not None):
            del reduction_axes[self.axis]

        del reduction_axes[0]

        mean = K.mean(inputs, reduction_axes, keepdims=True)
        stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs - mean) / stddev

        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            normed = normed * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            normed = normed + broadcast_beta
        return normed 
Example 5
Project: DeepFakeTutorial   Author: MITeoRIte   File: instance_normalization.py    GNU General Public License v3.0 6 votes vote down vote up
def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)
        reduction_axes = list(range(0, len(input_shape)))

        if (self.axis is not None):
            del reduction_axes[self.axis]

        del reduction_axes[0]

        mean = K.mean(inputs, reduction_axes, keepdims=True)
        stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs - mean) / stddev

        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            normed = normed * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            normed = normed + broadcast_beta
        return normed 
Example 6
Project: DeepFakeTutorial   Author: MITeoRIte   File: instance_normalization.py    GNU General Public License v3.0 6 votes vote down vote up
def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)
        reduction_axes = list(range(0, len(input_shape)))

        if (self.axis is not None):
            del reduction_axes[self.axis]

        del reduction_axes[0]

        mean = K.mean(inputs, reduction_axes, keepdims=True)
        stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs - mean) / stddev

        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            normed = normed * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            normed = normed + broadcast_beta
        return normed 
Example 7
Project: rs-data-fusion   Author: theonegis   File: metrics.py    Apache License 2.0 6 votes vote down vote up
def ssim(y_true, y_pred, data_range=10000):
    """structural similarity measurement system."""
    K1 = 0.01
    K2 = 0.03

    mu_x = K.mean(y_pred)
    mu_y = K.mean(y_true)

    sig_x = K.std(y_pred)
    sig_y = K.std(y_true)
    sig_xy = cov(y_true, y_pred)

    L = data_range
    C1 = (K1 * L) ** 2
    C2 = (K2 * L) ** 2

    return ((2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) /
            (mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2)) 
Example 8
Project: keras-contrib   Author: keras-team   File: instancenormalization.py    MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)
        reduction_axes = list(range(0, len(input_shape)))

        if self.axis is not None:
            del reduction_axes[self.axis]

        del reduction_axes[0]

        mean = K.mean(inputs, reduction_axes, keepdims=True)
        stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs - mean) / stddev

        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            normed = normed * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            normed = normed + broadcast_beta
        return normed 
Example 9
Project: keras-examples   Author: aidiary   File: dream1.py    MIT License 6 votes vote down vote up
def render_naive(layer_name, filter_index, img0=img_noise, iter_n=20, step=1.0):
    if layer_name not in layer_dict:
        print("ERROR: invalid layer name: %s" % layer_name)
        return

    layer = layer_dict[layer_name]

    print("{} < {}".format(filter_index, layer.output_shape[-1]))

    activation = K.mean(layer.output[:, :, :, filter_index])
    grads = K.gradients(activation, input_tensor)[0]

    # DropoutやBNを含むネットワークはK.learning_phase()が必要
    iterate = K.function([input_tensor, K.learning_phase()], [activation, grads])

    img = img0.copy()
    for i in range(iter_n):
        # 学習はしないので0を入力
        activation_value, grads_value = iterate([img, 0])
        grads_value /= K.std(grads_value) + 1e-8
        img += grads_value * step
        print(i, activation_value) 
Example 10
Project: ASD_classification   Author: MikhailStartsev   File: models.py    GNU General Public License v3.0 6 votes vote down vote up
def nss(y_true, y_pred):
    max_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    y_pred /= max_y_pred
    y_pred_flatten = K.batch_flatten(y_pred)

    y_mean = K.mean(y_pred_flatten, axis=-1)
    y_mean = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_mean)), 
                                                               shape_r_out, axis=-1)), shape_c_out, axis=-1)

    y_std = K.std(y_pred_flatten, axis=-1)
    y_std = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_std)), 
                                                              shape_r_out, axis=-1)), shape_c_out, axis=-1)

    y_pred = (y_pred - y_mean) / (y_std + K.epsilon())

    return -(K.sum(K.sum(y_true * y_pred, axis=2), axis=2) / K.sum(K.sum(y_true, axis=2), axis=2))


# Gaussian priors initialization 
Example 11
Project: faceswap   Author: deepfakes   File: losses.py    GNU General Public License v3.0 6 votes vote down vote up
def gmsd_loss(y_true, y_pred):
    """
    Improved image quality metric over MS-SSIM with easier calc
    http://www4.comp.polyu.edu.hk/~cslzhang/IQA/GMSD/GMSD.htm
    https://arxiv.org/ftp/arxiv/papers/1308/1308.3052.pdf
    """

    true_edge = scharr_edges(y_true, True)
    pred_edge = scharr_edges(y_pred, True)
    ephsilon = 0.0025
    upper = 2.0 * true_edge * pred_edge
    lower = K.square(true_edge) + K.square(pred_edge)
    gms = (upper + ephsilon) / (lower + ephsilon)
    gmsd = K.std(gms, axis=(1, 2, 3), keepdims=True)
    gmsd = K.squeeze(gmsd, axis=-1)
    return gmsd 
Example 12
Project: faceswap   Author: deepfakes   File: normalization.py    GNU General Public License v3.0 6 votes vote down vote up
def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)
        reduction_axes = list(range(0, len(input_shape)))

        if self.axis is not None:
            del reduction_axes[self.axis]

        del reduction_axes[0]

        mean = K.mean(inputs, reduction_axes, keepdims=True)
        stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs - mean) / stddev

        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            normed = normed * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            normed = normed + broadcast_beta
        return normed 
Example 13
Project: StyleGAN-Keras   Author: manicman1999   File: AdaIN.py    MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs[0])
        reduction_axes = list(range(0, len(input_shape)))
        
        beta = inputs[1]
        gamma = inputs[2]

        if self.axis is not None:
            del reduction_axes[self.axis]

        del reduction_axes[0]
        mean = K.mean(inputs[0], reduction_axes, keepdims=True)
        stddev = K.std(inputs[0], reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs[0] - mean) / stddev

        return normed * gamma + beta 
Example 14
Project: srcnn   Author: qobilidop   File: metrics.py    MIT License 6 votes vote down vote up
def ssim(y_true, y_pred):
    """structural similarity measurement system."""
    ## K1, K2 are two constants, much smaller than 1
    K1 = 0.04
    K2 = 0.06
    
    ## mean, std, correlation
    mu_x = K.mean(y_pred)
    mu_y = K.mean(y_true)
    
    sig_x = K.std(y_pred)
    sig_y = K.std(y_true)
    sig_xy = (sig_x * sig_y) ** 0.5

    ## L, number of pixels, C1, C2, two constants
    L =  33
    C1 = (K1 * L) ** 2
    C2 = (K2 * L) ** 2

    ssim = (2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) * 1.0 / ((mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2))
    return ssim 
Example 15
Project: kutils   Author: subpic   File: tensor_ops.py    MIT License 5 votes vote down vote up
def plcc_tf(x, y):
    """PLCC metric"""
    xc = x - K.mean(x)
    yc = y - K.mean(y)
    return K.mean(xc*yc) / (K.std(x)*K.std(y) + K.epsilon()) 
Example 16
Project: darkchem   Author: pnnl   File: network.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _build_encoder(self, x):
        # build filters
        for i, (f, k) in enumerate(zip(self.filters, self.kernels)):
            if i < 1:
                h = Conv1D(f, k, activation='relu', padding='same')(x)
            else:
                h = Conv1D(f, k, activation='relu', padding='same')(h)
        h = Flatten()(h)

        # latent space sampling
        def sampling(args):
            z_mean_, z_log_var_ = args
            batch_size = K.shape(z_mean_)[0]
            epsilon = K.random_normal(shape=(batch_size, self.latent_dim), mean=0., stddev=self.epsilon_std)
            return z_mean_ + K.exp(z_log_var_) * epsilon

        # latent dim
        z_mean = Dense(self.latent_dim, activation='linear')(h)
        z_log_var = Dense(self.latent_dim, activation='linear')(h)

        # custom loss term
        def vae_loss(y_true, y_pred):
            xent_loss = K.mean(objectives.categorical_crossentropy(y_true, y_pred), axis=-1)
            kl_loss = K.mean(-z_log_var + 0.5 * K.square(z_mean) + K.exp(z_log_var) - 1, axis=-1)

            # # explicit kl
            # mu_loss = K.mean(0.5 * K.square(z_mean))
            # var = K.std(z_mean)
            # var_loss = -K.log(var) + var - 1

            # # decay kl loss by learning rate
            # lr, lr0 = self._get_learning_rate()
            # kl_loss *= (lr / lr0)

            return xent_loss + kl_loss  # + mu_loss + var_loss

        # add noise
        z_mean_variational = Lambda(sampling, output_shape=(self.latent_dim,))([z_mean, z_log_var])

        return (vae_loss, z_mean, z_mean_variational) 
Example 17
Project: dts   Author: albertogaspar   File: losses.py    MIT License 5 votes vote down vote up
def nrmse_b(y_true, y_pred):
    " If this value is larger than 1, you 'd obtain a better model by simply generating a random time series " \
    "of the same mean and standard deviation as Y."
    return K.sqrt(K.mean(K.sum(K.square(y_true - y_pred)))) / K.std(K.identity(y_true)) 
Example 18
Project: graph-representation-learning   Author: vuptran   File: ae.py    MIT License 5 votes vote down vote up
def mvn(tensor):
    """Per row mean-variance normalization."""
    epsilon = 1e-6
    mean = K.mean(tensor, axis=1, keepdims=True)
    std = K.std(tensor, axis=1, keepdims=True)
    mvn = (tensor - mean) / (std + epsilon)
    return mvn 
Example 19
Project: faceswap   Author: deepfakes   File: layers.py    GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs):
        if self.data_format == 'channels_last':
            pooled = K.std(inputs, axis=[1, 2])
        else:
            pooled = K.std(inputs, axis=[2, 3])
        return pooled 
Example 20
Project: faceswap   Author: deepfakes   File: losses.py    GNU General Public License v3.0 5 votes vote down vote up
def __call__(self, y_true, y_pred):
        # There are additional parameters for this function
        # Note: some of the 'modes' for edge behavior do not yet have a
        # gradient definition in the Theano tree and cannot be used for
        # learning

        kernel = [self.kernel_size, self.kernel_size]
        y_true = K.reshape(y_true, [-1] + list(self.__int_shape(y_pred)[1:]))
        y_pred = K.reshape(y_pred, [-1] + list(self.__int_shape(y_pred)[1:]))

        patches_pred = self.extract_image_patches(y_pred,
                                                  kernel,
                                                  kernel,
                                                  'valid',
                                                  self.dim_ordering)
        patches_true = self.extract_image_patches(y_true,
                                                  kernel,
                                                  kernel,
                                                  'valid',
                                                  self.dim_ordering)

        # Get mean
        u_true = K.mean(patches_true, axis=-1)
        u_pred = K.mean(patches_pred, axis=-1)
        # Get variance
        var_true = K.var(patches_true, axis=-1)
        var_pred = K.var(patches_pred, axis=-1)
        # Get std dev
        covar_true_pred = K.mean(
            patches_true * patches_pred, axis=-1) - u_true * u_pred

        ssim = (2 * u_true * u_pred + self.c_1) * (
            2 * covar_true_pred + self.c_2)
        denom = (K.square(u_true) + K.square(u_pred) + self.c_1) * (
            var_pred + var_true + self.c_2)
        ssim /= denom  # no need for clipping, c_1 + c_2 make the denom non-zero
        return K.mean((1.0 - ssim) / 2.0) 
Example 21
Project: StyleGAN-Keras   Author: manicman1999   File: AdaIN.py    MIT License 5 votes vote down vote up
def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs[0])
        
        beta = inputs[1]
        gamma = inputs[2]

        reduction_axes = [0, 1, 2]
        mean = K.mean(inputs[0], reduction_axes, keepdims=True)
        stddev = K.std(inputs[0], reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs[0] - mean) / stddev

        return normed * gamma + beta 
Example 22
Project: Graph-GEN   Author: RuudFirsa   File: layernormalization.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def call(self, x):
        mean = K.mean(x, axis=-1, keepdims=True)
        std = K.std(x, axis=-1, keepdims=True)
        return self.gamma * (x - mean) / (std + self.eps) + self.beta 
Example 23
Project: CIKM-AnalytiCup-2018   Author: zake7749   File: layers.py    Apache License 2.0 5 votes vote down vote up
def call(self, x):
        mean = K.mean(x, axis=-1, keepdims=True)
        std = K.std(x, axis=-1, keepdims=True)
        return self.gamma * (x - mean) / (std + self.eps) + self.beta 
Example 24
Project: GewitterGefahr   Author: thunderhoser   File: saliency_maps.py    MIT License 4 votes vote down vote up
def _do_saliency_calculations(
        model_object, loss_tensor, list_of_input_matrices):
    """Does saliency calculations.

    T = number of input tensors to the model
    E = number of examples (storm objects)

    :param model_object: Instance of `keras.models.Model`.
    :param loss_tensor: Keras tensor defining the loss function.
    :param list_of_input_matrices: length-T list of numpy arrays, comprising one
        or more examples (storm objects).  list_of_input_matrices[i] must have
        the same dimensions as the [i]th input tensor to the model.
    :return: list_of_saliency_matrices: length-T list of numpy arrays,
        comprising the saliency map for each example.
        list_of_saliency_matrices[i] has the same dimensions as
        list_of_input_matrices[i] and defines the "saliency" of each value x,
        which is the gradient of the loss function with respect to x.
    """

    if isinstance(model_object.input, list):
        list_of_input_tensors = model_object.input
    else:
        list_of_input_tensors = [model_object.input]

    list_of_gradient_tensors = K.gradients(loss_tensor, list_of_input_tensors)
    num_input_tensors = len(list_of_input_tensors)

    for i in range(num_input_tensors):
        list_of_gradient_tensors[i] /= K.maximum(
            K.std(list_of_gradient_tensors[i]), K.epsilon()
        )

    inputs_to_gradients_function = K.function(
        list_of_input_tensors + [K.learning_phase()], list_of_gradient_tensors
    )

    # list_of_saliency_matrices = None
    # num_examples = list_of_input_matrices[0].shape[0]
    #
    # for i in range(num_examples):
    #     these_input_matrices = [a[[i], ...] for a in list_of_input_matrices]
    #     these_saliency_matrices = inputs_to_gradients_function(
    #         these_input_matrices + [0])
    #
    #     if list_of_saliency_matrices is None:
    #         list_of_saliency_matrices = these_saliency_matrices + []
    #     else:
    #         for i in range(num_input_tensors):
    #             list_of_saliency_matrices[i] = numpy.concatenate(
    #                 (list_of_saliency_matrices[i], these_saliency_matrices[i]),
    #                 axis=0)

    list_of_saliency_matrices = inputs_to_gradients_function(
        list_of_input_matrices + [0]
    )

    for i in range(num_input_tensors):
        list_of_saliency_matrices[i] *= -1

    return list_of_saliency_matrices 
Example 25
Project: faceswap   Author: deepfakes   File: losses.py    GNU General Public License v3.0 4 votes vote down vote up
def style_loss(gaussian_blur_radius=0.0, loss_weight=1.0, wnd_size=0, step_size=1):
    """ Style Loss from DeepFaceLab
        https://github.com/iperov/DeepFaceLab """

    if gaussian_blur_radius > 0.0:
        gblur = gaussian_blur(gaussian_blur_radius)

    def std(content, style, loss_weight):
        content_nc = K.int_shape(content)[-1]
        style_nc = K.int_shape(style)[-1]
        if content_nc != style_nc:
            raise Exception("style_loss() content_nc != style_nc")

        axes = [1, 2]
        c_mean, c_var = K.mean(content, axis=axes, keepdims=True), K.var(content,
                                                                         axis=axes,
                                                                         keepdims=True)
        s_mean, s_var = K.mean(style, axis=axes, keepdims=True), K.var(style,
                                                                       axis=axes,
                                                                       keepdims=True)
        c_std, s_std = K.sqrt(c_var + 1e-5), K.sqrt(s_var + 1e-5)

        mean_loss = K.sum(K.square(c_mean-s_mean))
        std_loss = K.sum(K.square(c_std-s_std))

        return (mean_loss + std_loss) * (loss_weight / float(content_nc))

    def func(target, style):
        if wnd_size == 0:
            if gaussian_blur_radius > 0.0:
                return std(gblur(target), gblur(style), loss_weight=loss_weight)
            return std(target, style, loss_weight=loss_weight)

        # currently unused
        if K.backend() == "plaidml.keras.backend":
            logger.warning("plaidML backend does not support style_loss. Disabling")
            return 0
        shp = K.int_shape(target)[1]
        k = (shp - wnd_size) // step_size + 1
        if gaussian_blur_radius > 0.0:
            target, style = gblur(target), gblur(style)
        target = tf.image.extract_image_patches(target,
                                                [1, k, k, 1],
                                                [1, 1, 1, 1],
                                                [1, step_size, step_size, 1],
                                                "VALID")
        style = tf.image.extract_image_patches(style,
                                               [1, k, k, 1],
                                               [1, 1, 1, 1],
                                               [1, step_size, step_size, 1],
                                               "VALID")
        return std(target, style, loss_weight)

    return func
# <<< END: from DFL >>> #


# <<< START: from Shoanlu GAN >>> #