Python keras.backend.mean() Examples

The following are 30 code examples for showing how to use keras.backend.mean(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.backend , or try the search function .

Example 1
Project: deep-learning-note   Author: wdxtub   File: 7_visualize_filters.py    License: MIT License 6 votes vote down vote up
def generate_pattern(layer_name, filter_index, size=150):
    # 过滤器可视化函数
    layer_output = model.get_layer(layer_name).output
    loss = K.mean(layer_output[:, :, :, filter_index])
    grads = K.gradients(loss, model.input)[0]
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
    iterate = K.function([model.input], [loss, grads])
    input_img_data = np.random.random((1, size, size, 3)) * 20 + 128.
    
    step = 1
    for _ in range(40):
        loss_value, grads_value = iterate([input_img_data])
        input_img_data += grads_value * step
    
    img = input_img_data[0]
    return deprocess_image(img) 
Example 2
Project: keras-utility-layer-collection   Author: zimmerrol   File: layer_normalization.py    License: MIT License 6 votes vote down vote up
def call(self, x):
        mean = K.mean(x, axis=-1)
        std = K.std(x, axis=-1)

        if len(x.shape) == 3:
            mean = K.permute_dimensions(
                K.repeat(mean, x.shape.as_list()[-1]),
                [0,2,1]
            )
            std = K.permute_dimensions(
                K.repeat(std, x.shape.as_list()[-1]),
                [0,2,1] 
            )
            
        elif len(x.shape) == 2:
            mean = K.reshape(
                K.repeat_elements(mean, x.shape.as_list()[-1], 0),
                (-1, x.shape.as_list()[-1])
            )
            std = K.reshape(
                K.repeat_elements(mean, x.shape.as_list()[-1], 0),
                (-1, x.shape.as_list()[-1])
            )
        
        return self._g * (x - mean) / (std + self._epsilon) + self._b 
Example 3
Project: Keras-GAN   Author: eriklindernoren   File: wgan_gp.py    License: MIT License 6 votes vote down vote up
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
        """
        Computes gradient penalty based on prediction and weighted real / fake samples
        """
        gradients = K.gradients(y_pred, averaged_samples)[0]
        # compute the euclidean norm by squaring ...
        gradients_sqr = K.square(gradients)
        #   ... summing over the rows ...
        gradients_sqr_sum = K.sum(gradients_sqr,
                                  axis=np.arange(1, len(gradients_sqr.shape)))
        #   ... and sqrt
        gradient_l2_norm = K.sqrt(gradients_sqr_sum)
        # compute lambda * (1 - ||grad||)^2 still for each single sample
        gradient_penalty = K.square(1 - gradient_l2_norm)
        # return the mean as loss over all the batch samples
        return K.mean(gradient_penalty) 
Example 4
Project: CalibrationNN   Author: Andres-Hernandez   File: neural_network.py    License: GNU General Public License v3.0 6 votes vote down vote up
def test_helper(func, exponent, layer, lr, dropout_first, dropout_middle, 
                dropout_last, alpha, prefix='SWO GBP ', postfix='',
                with_comparison=False):
    print('Test %s, %s, %s, %s, %s %s %s' % (exponent, layer, lr, dropout_first,
                                       dropout_middle, dropout_last, alpha))
    model = func(exponent=exponent, lr=lr, layers=layer, 
                 dropout_first=dropout_first, dropout_middle=dropout_middle,
                 dropout_last=dropout_last, prefix=prefix, postfix=postfix, 
                 alpha=alpha)
    model.train(200)
    val_loss = np.mean(model.history['history']['val_loss'][-5:])
    
#    if with_comparison:
#        swo = inst.get_swaptiongen(inst.hullwhite_analytic)
#        _, values = swo.compare_history(model, dates=dates)
#        
    
    return (val_loss, layer, exponent, lr, dropout_first, dropout_middle, 
            dropout_last, alpha) 
Example 5
Project: speech_separation   Author: bill9800   File: model_loss.py    License: MIT License 6 votes vote down vote up
def audio_discriminate_loss2(gamma=0.1,beta = 2*0.1,num_speaker=2):
    def loss_func(S_true,S_pred,gamma=gamma,beta=beta,num_speaker=num_speaker):
        sum_mtr = K.zeros_like(S_true[:,:,:,:,0])
        for i in range(num_speaker):
            sum_mtr += K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,i])
            for j in range(num_speaker):
                if i != j:
                    sum_mtr -= gamma*(K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,j]))

        for i in range(num_speaker):
            for j in range(i+1,num_speaker):
                #sum_mtr -= beta*K.square(S_pred[:,:,:,i]-S_pred[:,:,:,j])
                #sum_mtr += beta*K.square(S_true[:,:,:,:,i]-S_true[:,:,:,:,j])
                pass
        #sum = K.sum(K.maximum(K.flatten(sum_mtr),0))

        loss = K.mean(K.flatten(sum_mtr))

        return loss
    return loss_func 
Example 6
Project: Keras-BiGAN   Author: manicman1999   File: bigan.py    License: MIT License 6 votes vote down vote up
def prepareSamples(self, cnum = 0, num = 1000): #8x8 images, bottom row is constant

        try:
            os.mkdir("Results/Samples-c" + str(cnum))
        except:
            x = 0

        im = self.im.get_class(cnum)
        e = self.GAN.E.predict(im, batch_size = BATCH_SIZE * k_images)

        mean = np.mean(e, axis = 0)
        std = np.std(e, axis = 0)

        n = noise(num)
        nc = nClass(num, mean, std)

        im = self.GAN.G.predict([n, nc], batch_size = BATCH_SIZE)

        for i in range(im.shape[0]):

            x = Image.fromarray(np.uint8(im[i]*255), mode = 'RGB')

            x.save("Results/Samples-c" + str(cnum) + "/im ("+str(i+1)+").png") 
Example 7
Project: dataiku-contrib   Author: dataiku   File: model.py    License: Apache License 2.0 6 votes vote down vote up
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
    """RPN anchor classifier loss.
    rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
               -1=negative, 0=neutral anchor.
    rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
    """
    # Squeeze last dim to simplify
    rpn_match = tf.squeeze(rpn_match, -1)
    # Get anchor classes. Convert the -1/+1 match to 0/1 values.
    anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
    # Positive and Negative anchors contribute to the loss,
    # but neutral anchors (match value = 0) don't.
    indices = tf.where(K.not_equal(rpn_match, 0))
    # Pick rows that contribute to the loss and filter out the rest.
    rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
    anchor_class = tf.gather_nd(anchor_class, indices)
    # Cross entropy loss
    loss = K.sparse_categorical_crossentropy(target=anchor_class,
                                             output=rpn_class_logits,
                                             from_logits=True)
    loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
    return loss 
Example 8
Project: blackbox-attacks   Author: sunblaze-ucb   File: attack_utils.py    License: MIT License 6 votes vote down vote up
def gen_adv_loss(logits, y, loss='logloss', mean=False):
    """
    Generate the loss function.
    """

    if loss == 'training':
        # use the model's output instead of the true labels to avoid
        # label leaking at training time
        y = K.cast(K.equal(logits, K.max(logits, 1, keepdims=True)), "float32")
        y = y / K.sum(y, 1, keepdims=True)
        out = K.categorical_crossentropy(y, logits, from_logits=True)
    elif loss == 'logloss':
        out = K.categorical_crossentropy(y, logits, from_logits=True)
    else:
        raise ValueError("Unknown loss: {}".format(loss))

    if mean:
        out = K.mean(out)
    # else:
    #     out = K.sum(out)
    return out 
Example 9
Project: typhon   Author: atmtools   File: qrnn.py    License: MIT License 6 votes vote down vote up
def predict(self, x):
        r"""
        Predict quantiles of the conditional distribution P(y|x).

        Forward propagates the inputs in `x` through the network to
        obtain the predicted quantiles `y`.

        Arguments:

            x(np.array): Array of shape `(n, m)` containing `n` m-dimensional inputs
                         for which to predict the conditional quantiles.

        Returns:

             Array of shape `(n, k)` with the columns corresponding to the k
             quantiles of the network.

        """
        predictions = np.stack(
            [m.predict((x - self.x_mean) / self.x_sigma) for m in self.models])
        return np.mean(predictions, axis=0) 
Example 10
Project: typhon   Author: atmtools   File: qrnn.py    License: MIT License 6 votes vote down vote up
def posterior_mean(self, x):
        r"""
        Computes the posterior mean by computing the first moment of the
        estimated posterior CDF.

        Arguments:

            x(np.array): Array of shape `(n, m)` containing `n` inputs for which
                         to predict the posterior mean.
        Returns:

            Array containing the posterior means for the provided inputs.
        """
        y_pred, qs = self.cdf(x)
        mus = y_pred[-1] - np.trapz(qs, x=y_pred)
        return mus 
Example 11
Project: Python-Deep-Learning-SE   Author: ivan-vasilev   File: chapter_06_001.py    License: MIT License 6 votes vote down vote up
def sampling(args: tuple):
    """
    Reparameterization trick by sampling z from unit Gaussian
    :param args: (tensor, tensor) mean and log of variance of q(z|x)
    :returns tensor: sampled latent vector z
    """

    # unpack the input tuple
    z_mean, z_log_var = args

    # mini-batch size
    mb_size = K.shape(z_mean)[0]

    # latent space size
    dim = K.int_shape(z_mean)[1]

    # random normal vector with mean=0 and std=1.0
    epsilon = K.random_normal(shape=(mb_size, dim))

    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example 12
Project: reinforcement-learning   Author: rlcode   File: breakout_dqn.py    License: MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network 
Example 13
Project: reinforcement-learning   Author: rlcode   File: breakout_dueling_ddqn.py    License: MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None, ), dtype='int32')
        y = K.placeholder(shape=(None, ), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network
    # dueling network's Q Value is sum of advantages and state value 
Example 14
Project: PanopticSegmentation   Author: dmechea   File: model.py    License: MIT License 6 votes vote down vote up
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
    """RPN anchor classifier loss.

    rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
               -1=negative, 0=neutral anchor.
    rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
    """
    # Squeeze last dim to simplify
    rpn_match = tf.squeeze(rpn_match, -1)
    # Get anchor classes. Convert the -1/+1 match to 0/1 values.
    anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
    # Positive and Negative anchors contribute to the loss,
    # but neutral anchors (match value = 0) don't.
    indices = tf.where(K.not_equal(rpn_match, 0))
    # Pick rows that contribute to the loss and filter out the rest.
    rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
    anchor_class = tf.gather_nd(anchor_class, indices)
    # Cross entropy loss
    loss = K.sparse_categorical_crossentropy(target=anchor_class,
                                             output=rpn_class_logits,
                                             from_logits=True)
    loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
    return loss 
Example 15
Project: models   Author: kipoi   File: model.py    License: MIT License 5 votes vote down vote up
def profile_contrib(p):
    return kl.Lambda(lambda p:
                     K.mean(K.sum(K.stop_gradient(tf.nn.softmax(p, dim=-2)) * p, axis=-2), axis=-1)
                     )(p) 
Example 16
Project: deep-learning-note   Author: wdxtub   File: 7_visualize_filters.py    License: MIT License 5 votes vote down vote up
def deprocess_image(x):
    # 将张量转换为有效图像的函数
    x -= x.mean()
    x /= (x.std() + 1e-5)
    x *= 0.1

    x += 0.5
    x = np.clip(x, 0, 1)

    x *= 255
    x = np.clip(x, 0, 255).astype('uint8')
    return x 
Example 17
Project: deep-models   Author: LaurentMazare   File: lstm_ln.py    License: Apache License 2.0 5 votes vote down vote up
def norm(self, xs, norm_id):
    mu = K.mean(xs, axis=-1, keepdims=True)
    sigma = K.sqrt(K.var(xs, axis=-1, keepdims=True) + 1e-3)
    xs = self.gs[norm_id] * (xs - mu) / (sigma + 1e-3) + self.bs[norm_id]
    return xs 
Example 18
Project: steppy-toolkit   Author: minerva-ml   File: contrib.py    License: MIT License 5 votes vote down vote up
def pair_loss(y_true, y_pred):
    y_true = tf.cast(y_true, tf.int32)
    parts = tf.dynamic_partition(y_pred, y_true, 2)
    y_pos = parts[1]
    y_neg = parts[0]
    y_pos = tf.expand_dims(y_pos, 0)
    y_neg = tf.expand_dims(y_neg, -1)
    out = K.sigmoid(y_neg - y_pos)
    return K.mean(out) 
Example 19
Project: gandlf   Author: codekansas   File: losses.py    License: MIT License 5 votes vote down vote up
def negative_binary_crossentropy(y_true, y_pred):
    """Instead of minimizing log(1-D), maximize log(D).

    Note that when using this loss function, you should not change the target.
    For example, if you want G -> 0 and D -> 1, then you should replace your
    binary_crossentropy loss with negative_binary_crossentropy loss without
    changing to G -> 1.
    """

    return -K.mean(K.binary_crossentropy(y_pred, 1 - y_true), axis=-1) 
Example 20
Project: gandlf   Author: codekansas   File: losses.py    License: MIT License 5 votes vote down vote up
def maximize(_, y_pred):
    """Maximizes y_pred, regardless of y_true."""

    return -K.mean(y_pred) 
Example 21
Project: gandlf   Author: codekansas   File: losses.py    License: MIT License 5 votes vote down vote up
def minimize(_, y_pred):
    """Minimizes y_pred, regardless of y_true."""

    return K.mean(y_pred) 
Example 22
Project: gandlf   Author: codekansas   File: similarities.py    License: MIT License 5 votes vote down vote up
def cosine(a, b):
    """Cosine similarity. Maximum is 1 (a == b), minimum is -1 (a == -b)."""

    a = K.l2_normalize(a)
    b = K.l2_normalize(b)
    return 1 - K.mean(a * b, axis=-1) 
Example 23
Project: gandlf   Author: codekansas   File: similarities.py    License: MIT License 5 votes vote down vote up
def geometric(a, b):
    """Geometric mean of sigmoid and euclidian similarity."""

    return sigmoid(a, b) * euclidean(a, b) 
Example 24
Project: kaggle-carvana-2017   Author: killthekitten   File: ensemble_gpu.py    License: MIT License 5 votes vote down vote up
def create_model(gpu):
    with tf.device(gpu):
        input = Input((1280, 1918, len(dirs)))
        x = Lambda(lambda x: K.mean(x, axis=-1, keepdims=True))(input)
        model = Model(input, x)
        model.summary()
    return model 
Example 25
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    License: MIT License 5 votes vote down vote up
def bootstrapped_crossentropy(y_true, y_pred, bootstrap_type='hard', alpha=0.95):
    target_tensor = y_true
    prediction_tensor = y_pred
    _epsilon = _to_tensor(K.epsilon(), prediction_tensor.dtype.base_dtype)
    prediction_tensor = K.tf.clip_by_value(prediction_tensor, _epsilon, 1 - _epsilon)
    prediction_tensor = K.tf.log(prediction_tensor / (1 - prediction_tensor))

    if bootstrap_type == 'soft':
        bootstrap_target_tensor = alpha * target_tensor + (1.0 - alpha) * K.tf.sigmoid(prediction_tensor)
    else:
        bootstrap_target_tensor = alpha * target_tensor + (1.0 - alpha) * K.tf.cast(
            K.tf.sigmoid(prediction_tensor) > 0.5, K.tf.float32)
    return K.mean(K.tf.nn.sigmoid_cross_entropy_with_logits(
        labels=bootstrap_target_tensor, logits=prediction_tensor)) 
Example 26
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    License: MIT License 5 votes vote down vote up
def online_bootstrapping(y_true, y_pred, pixels=512, threshold=0.5):
    """ Implements nline Bootstrapping crossentropy loss, to train only on hard pixels,
        see  https://arxiv.org/abs/1605.06885 Bridging Category-level and Instance-level Semantic Image Segmentation
        The implementation is a bit different as we use binary crossentropy instead of softmax
        SUPPORTS ONLY MINIBATCH WITH 1 ELEMENT!
    # Arguments
        y_true: A tensor with labels.

        y_pred: A tensor with predicted probabilites.

        pixels: number of hard pixels to keep

        threshold: confidence to use, i.e. if threshold is 0.7, y_true=1, prediction=0.65 then we consider that pixel as hard
    # Returns
        Mean loss value
    """
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    difference = K.abs(y_true - y_pred)

    values, indices = K.tf.nn.top_k(difference, sorted=True, k=pixels)
    min_difference = (1 - threshold)
    y_true = K.tf.gather(K.gather(y_true, indices), K.tf.where(values > min_difference))
    y_pred = K.tf.gather(K.gather(y_pred, indices), K.tf.where(values > min_difference))

    return K.mean(K.binary_crossentropy(y_true, y_pred)) 
Example 27
Project: Keras-GAN   Author: eriklindernoren   File: infogan.py    License: MIT License 5 votes vote down vote up
def mutual_info_loss(self, c, c_given_x):
        """The mutual information metric we aim to minimize"""
        eps = 1e-8
        conditional_entropy = K.mean(- K.sum(K.log(c_given_x + eps) * c, axis=1))
        entropy = K.mean(- K.sum(K.log(c + eps) * c, axis=1))

        return conditional_entropy + entropy 
Example 28
Project: Keras-GAN   Author: eriklindernoren   File: wgan.py    License: MIT License 5 votes vote down vote up
def wasserstein_loss(self, y_true, y_pred):
        return K.mean(y_true * y_pred) 
Example 29
Project: Keras-GAN   Author: eriklindernoren   File: wgan_gp.py    License: MIT License 5 votes vote down vote up
def wasserstein_loss(self, y_true, y_pred):
        return K.mean(y_true * y_pred) 
Example 30
Project: Keras-GAN   Author: eriklindernoren   File: dualgan.py    License: MIT License 5 votes vote down vote up
def wasserstein_loss(self, y_true, y_pred):
        return K.mean(y_true * y_pred)