Python keras.backend.mean() Examples

The following are code examples for showing how to use keras.backend.mean(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Scene-Understanding   Author: foamliu   File: utils.py    MIT License 7 votes vote down vote up
def categorical_crossentropy_with_class_rebal(y_true, y_pred):
    y_true = K.reshape(y_true, (-1, num_classes))
    y_pred = K.reshape(y_pred, (-1, num_classes))

    idx_max = K.argmax(y_true, axis=1)
    weights = K.gather(factor, idx_max)
    weights = K.reshape(weights, (-1, 1))

    # multiply y_true by weights
    y_true = y_true * weights

    cross_ent = K.categorical_crossentropy(y_pred, y_true)
    cross_ent = K.mean(cross_ent, axis=-1)

    return cross_ent


# getting the number of GPUs 
Example 2
Project: 360_aware_saliency   Author: MikhailStartsev   File: models.py    GNU General Public License v3.0 7 votes vote down vote up
def nss(y_true, y_pred):
    max_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    y_pred /= max_y_pred
    y_pred_flatten = K.batch_flatten(y_pred)

    y_mean = K.mean(y_pred_flatten, axis=-1)
    y_mean = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_mean)), 
                                                               shape_r_out, axis=-1)), shape_c_out, axis=-1)

    y_std = K.std(y_pred_flatten, axis=-1)
    y_std = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_std)), 
                                                              shape_r_out, axis=-1)), shape_c_out, axis=-1)

    y_pred = (y_pred - y_mean) / (y_std + K.epsilon())

    return -(K.sum(K.sum(y_true * y_pred, axis=2), axis=2) / K.sum(K.sum(y_true, axis=2), axis=2))


# Gaussian priors initialization 
Example 3
Project: MODS_ConvNet   Author: santiagolopezg   File: filter_visualize.py    MIT License 6 votes vote down vote up
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + 1e-5)
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_dim_ordering() == 'th':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x



# build the network with best weights 
Example 4
Project: MODS_ConvNet   Author: santiagolopezg   File: filter_visualize.py    MIT License 6 votes vote down vote up
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + 1e-5)
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_dim_ordering() == 'th':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x



# build the network with best weights 
Example 5
Project: deep-learning-note   Author: wdxtub   File: 7_visualize_filters.py    MIT License 6 votes vote down vote up
def generate_pattern(layer_name, filter_index, size=150):
    # 过滤器可视化函数
    layer_output = model.get_layer(layer_name).output
    loss = K.mean(layer_output[:, :, :, filter_index])
    grads = K.gradients(loss, model.input)[0]
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
    iterate = K.function([model.input], [loss, grads])
    input_img_data = np.random.random((1, size, size, 3)) * 20 + 128.
    
    step = 1
    for _ in range(40):
        loss_value, grads_value = iterate([input_img_data])
        input_img_data += grads_value * step
    
    img = input_img_data[0]
    return deprocess_image(img) 
Example 6
Project: CapsAttnNet   Author: rstager   File: train.py    MIT License 6 votes vote down vote up
def margin_loss(y_true, y_pred):
    """
    Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
    :param y_true: [None, n_classes, n_instance]
    :param y_pred: [None, n_classes, n_instance]
    :return: a scalar loss value.
    """

    L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
        0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))

    loss = K.mean(K.sum(L, 1))

    acc = K.equal(K.argmax(y_true, axis=1), K.argmax(y_pred, axis=1))

    # loss = tf.Print(loss,[tf.shape(y_true)],message=" margin loss y_true shape",summarize=6,first_n=1)
    # loss = tf.Print(loss,[tf.shape(y_pred)],message=" margin loss y_pred shape",summarize=6,first_n=1)
    # loss = tf.Print(loss,[tf.shape(L)],message=" margin loss L shape",summarize=6,first_n=1)
    # loss = tf.Print(loss,[tf.shape(acc)],message=" margin loss acc shape",summarize=6,first_n=1)
    # loss = tf.Print(loss,[y_true[0,0,:],y_pred[0,0,:]],message=" margin loss y_true/y_pred",summarize=20)
    # loss = tf.Print(loss,[L[0,0,:]],message=" margin loss L",summarize=6)
    # loss = tf.Print(loss,[loss],message=" margin loss loss",summarize=6)
    # loss = tf.Print(loss,[acc[0,0]],message=" margin loss acc",summarize=6)

    return loss 
Example 7
Project: keras-utility-layer-collection   Author: zimmerrol   File: layer_normalization.py    MIT License 6 votes vote down vote up
def call(self, x):
        mean = K.mean(x, axis=-1)
        std = K.std(x, axis=-1)

        if len(x.shape) == 3:
            mean = K.permute_dimensions(
                K.repeat(mean, x.shape.as_list()[-1]),
                [0,2,1]
            )
            std = K.permute_dimensions(
                K.repeat(std, x.shape.as_list()[-1]),
                [0,2,1] 
            )
            
        elif len(x.shape) == 2:
            mean = K.reshape(
                K.repeat_elements(mean, x.shape.as_list()[-1], 0),
                (-1, x.shape.as_list()[-1])
            )
            std = K.reshape(
                K.repeat_elements(mean, x.shape.as_list()[-1], 0),
                (-1, x.shape.as_list()[-1])
            )
        
        return self._g * (x - mean) / (std + self._epsilon) + self._b 
Example 8
Project: phoneticSimilarity   Author: ronggong   File: eval_grad_cam.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def grad_cam(model, x, layer_name):

    teacher_output = model.output[:, 0]

    last_conv_layer = model.get_layer(layer_name)

    # gradient of the 0th variable
    grads = K.gradients(teacher_output, last_conv_layer.output)[0]

    pooled_grads = K.mean(grads, axis=(0, 2, 3))

    iterate = K.function([model.input],
                         [pooled_grads, last_conv_layer.output[0]])

    pooled_grads_value, conv_layer_output_value = iterate([x])

    for i in range(len(pooled_grads_value)):
        conv_layer_output_value[i, :, :] *= pooled_grads_value[i]

    heatmap = np.mean(conv_layer_output_value, axis=0)

    heatmap = np.maximum(heatmap, 0)

    return heatmap 
Example 9
Project: apex_dqn   Author: omurammm   File: learner.py    MIT License 6 votes vote down vote up
def build_network(self):
        l_input = Input(shape=(4,84,84))
        conv2d = Conv2D(32,8,strides=(4,4),activation='relu', data_format="channels_first")(l_input)
        conv2d = Conv2D(64,4,strides=(2,2),activation='relu', data_format="channels_first")(conv2d)
        conv2d = Conv2D(64,3,strides=(1,1),activation='relu', data_format="channels_first")(conv2d)
        fltn = Flatten()(conv2d)
        v = Dense(512, activation='relu', name="dense_v1")(fltn)
        v = Dense(1, name="dense_v2")(v)
        adv = Dense(512, activation='relu', name="dense_adv1")(fltn)
        adv = Dense(self.num_actions, name="dense_adv2")(adv)
        y = concatenate([v,adv])
        l_output = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - tf.stop_gradient(K.mean(a[:,1:],keepdims=True)), output_shape=(self.num_actions,))(y)
        model = Model(input=l_input,output=l_output)

        s = tf.placeholder(tf.float32, [None, self.state_length, self.frame_width, self.frame_height])
        q_values = model(s)

        return s, q_values, model 
Example 10
Project: apex_dqn   Author: omurammm   File: test_agent.py    MIT License 6 votes vote down vote up
def build_network(self):
        l_input = Input(shape=(4,84,84))
        conv2d = Conv2D(32,8,strides=(4,4),activation='relu', data_format="channels_first")(l_input)
        conv2d = Conv2D(64,4,strides=(2,2),activation='relu', data_format="channels_first")(conv2d)
        conv2d = Conv2D(64,3,strides=(1,1),activation='relu', data_format="channels_first")(conv2d)
        fltn = Flatten()(conv2d)
        v = Dense(512, activation='relu', name="dense_v1")(fltn)
        v = Dense(1, name="dense_v2")(v)
        adv = Dense(512, activation='relu', name="dense_adv1")(fltn)
        adv = Dense(self.num_actions, name="dense_adv2")(adv)
        y = concatenate([v,adv])
        l_output = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - tf.stop_gradient(K.mean(a[:,1:],keepdims=True)), output_shape=(self.num_actions,))(y)
        model = Model(input=l_input,output=l_output)

        s = tf.placeholder(tf.float32, [None, self.state_length, self.frame_width, self.frame_height])
        q_values = model(s)

        return s, q_values, model 
Example 11
Project: apex_dqn   Author: omurammm   File: actor.py    MIT License 6 votes vote down vote up
def build_network(self):
        l_input = Input(shape=(4,84,84))
        conv2d = Conv2D(32,8,strides=(4,4),activation='relu', data_format="channels_first")(l_input)
        conv2d = Conv2D(64,4,strides=(2,2),activation='relu', data_format="channels_first")(conv2d)
        conv2d = Conv2D(64,3,strides=(1,1),activation='relu', data_format="channels_first")(conv2d)
        fltn = Flatten()(conv2d)
        v = Dense(512, activation='relu', name="dense_v1_"+str(self.num))(fltn)
        v = Dense(1, name="dense_v2_"+str(self.num))(v)
        adv = Dense(512, activation='relu', name="dense_adv1_"+str(self.num))(fltn)
        adv = Dense(self.num_actions, name="dense_adv2_"+str(self.num))(adv)
        y = concatenate([v,adv])
        l_output = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - tf.stop_gradient(K.mean(a[:,1:],keepdims=True)), output_shape=(self.num_actions,))(y)
        model = Model(input=l_input,output=l_output)

        s = tf.placeholder(tf.float32, [None, self.state_length, self.frame_width, self.frame_height])
        q_values = model(s)

        return s, q_values, model 
Example 12
Project: Keras-GAN   Author: eriklindernoren   File: wgan_gp.py    MIT License 6 votes vote down vote up
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
        """
        Computes gradient penalty based on prediction and weighted real / fake samples
        """
        gradients = K.gradients(y_pred, averaged_samples)[0]
        # compute the euclidean norm by squaring ...
        gradients_sqr = K.square(gradients)
        #   ... summing over the rows ...
        gradients_sqr_sum = K.sum(gradients_sqr,
                                  axis=np.arange(1, len(gradients_sqr.shape)))
        #   ... and sqrt
        gradient_l2_norm = K.sqrt(gradients_sqr_sum)
        # compute lambda * (1 - ||grad||)^2 still for each single sample
        gradient_penalty = K.square(1 - gradient_l2_norm)
        # return the mean as loss over all the batch samples
        return K.mean(gradient_penalty) 
Example 13
Project: CalibrationNN   Author: Andres-Hernandez   File: neural_network.py    GNU General Public License v3.0 6 votes vote down vote up
def test_helper(func, exponent, layer, lr, dropout_first, dropout_middle, 
                dropout_last, alpha, prefix='SWO GBP ', postfix='',
                with_comparison=False):
    print('Test %s, %s, %s, %s, %s %s %s' % (exponent, layer, lr, dropout_first,
                                       dropout_middle, dropout_last, alpha))
    model = func(exponent=exponent, lr=lr, layers=layer, 
                 dropout_first=dropout_first, dropout_middle=dropout_middle,
                 dropout_last=dropout_last, prefix=prefix, postfix=postfix, 
                 alpha=alpha)
    model.train(200)
    val_loss = np.mean(model.history['history']['val_loss'][-5:])
    
#    if with_comparison:
#        swo = inst.get_swaptiongen(inst.hullwhite_analytic)
#        _, values = swo.compare_history(model, dates=dates)
#        
    
    return (val_loss, layer, exponent, lr, dropout_first, dropout_middle, 
            dropout_last, alpha) 
Example 14
Project: Colorful-Image-Colorization   Author: foamliu   File: utils.py    MIT License 6 votes vote down vote up
def categorical_crossentropy_color(y_true, y_pred):
    q = 313
    y_true = K.reshape(y_true, (-1, q))
    y_pred = K.reshape(y_pred, (-1, q))

    idx_max = K.argmax(y_true, axis=1)
    weights = K.gather(prior_factor, idx_max)
    weights = K.reshape(weights, (-1, 1))

    # multiply y_true by weights
    y_true = y_true * weights

    cross_ent = K.categorical_crossentropy(y_pred, y_true)
    cross_ent = K.mean(cross_ent, axis=-1)

    return cross_ent


# getting the number of GPUs 
Example 15
Project: speech_separation   Author: bill9800   File: model_loss.py    MIT License 6 votes vote down vote up
def audio_discriminate_loss2(gamma=0.1,beta = 2*0.1,num_speaker=2):
    def loss_func(S_true,S_pred,gamma=gamma,beta=beta,num_speaker=num_speaker):
        sum_mtr = K.zeros_like(S_true[:,:,:,:,0])
        for i in range(num_speaker):
            sum_mtr += K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,i])
            for j in range(num_speaker):
                if i != j:
                    sum_mtr -= gamma*(K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,j]))

        for i in range(num_speaker):
            for j in range(i+1,num_speaker):
                #sum_mtr -= beta*K.square(S_pred[:,:,:,i]-S_pred[:,:,:,j])
                #sum_mtr += beta*K.square(S_true[:,:,:,:,i]-S_true[:,:,:,:,j])
                pass
        #sum = K.sum(K.maximum(K.flatten(sum_mtr),0))

        loss = K.mean(K.flatten(sum_mtr))

        return loss
    return loss_func 
Example 16
Project: Keras-BiGAN   Author: manicman1999   File: bigan.py    MIT License 6 votes vote down vote up
def prepareSamples(self, cnum = 0, num = 1000): #8x8 images, bottom row is constant

        try:
            os.mkdir("Results/Samples-c" + str(cnum))
        except:
            x = 0

        im = self.im.get_class(cnum)
        e = self.GAN.E.predict(im, batch_size = BATCH_SIZE * k_images)

        mean = np.mean(e, axis = 0)
        std = np.std(e, axis = 0)

        n = noise(num)
        nc = nClass(num, mean, std)

        im = self.GAN.G.predict([n, nc], batch_size = BATCH_SIZE)

        for i in range(im.shape[0]):

            x = Image.fromarray(np.uint8(im[i]*255), mode = 'RGB')

            x.save("Results/Samples-c" + str(cnum) + "/im ("+str(i+1)+").png") 
Example 17
Project: deepflying   Author: dslab-deepflying   File: wgan_gp.py    GNU General Public License v3.0 6 votes vote down vote up
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
        """
        Computes gradient penalty based on prediction and weighted real / fake samples
        """
        gradients = K.gradients(y_pred, averaged_samples)[0]
        # compute the euclidean norm by squaring ...
        gradients_sqr = K.square(gradients)
        #   ... summing over the rows ...
        gradients_sqr_sum = K.sum(gradients_sqr,
                                  axis=np.arange(1, len(gradients_sqr.shape)))
        #   ... and sqrt
        gradient_l2_norm = K.sqrt(gradients_sqr_sum)
        # compute lambda * (1 - ||grad||)^2 still for each single sample
        gradient_penalty = K.square(1 - gradient_l2_norm)
        # return the mean as loss over all the batch samples
        return K.mean(gradient_penalty) 
Example 18
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 6 votes vote down vote up
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
    """RPN anchor classifier loss.

    rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
               -1=negative, 0=neutral anchor.
    rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
    """
    # Squeeze last dim to simplify
    rpn_match = tf.squeeze(rpn_match, -1)
    # Get anchor classes. Convert the -1/+1 match to 0/1 values.
    anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
    # Positive and Negative anchors contribute to the loss,
    # but neutral anchors (match value = 0) don't.
    indices = tf.where(K.not_equal(rpn_match, 0))
    # Pick rows that contribute to the loss and filter out the rest.
    rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
    anchor_class = tf.gather_nd(anchor_class, indices)
    # Cross entropy loss
    loss = K.sparse_categorical_crossentropy(target=anchor_class,
                                             output=rpn_class_logits,
                                             from_logits=True)
    loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
    return loss 
Example 19
Project: blackbox-attacks   Author: sunblaze-ucb   File: attack_utils.py    MIT License 6 votes vote down vote up
def gen_adv_loss(logits, y, loss='logloss', mean=False):
    """
    Generate the loss function.
    """

    if loss == 'training':
        # use the model's output instead of the true labels to avoid
        # label leaking at training time
        y = K.cast(K.equal(logits, K.max(logits, 1, keepdims=True)), "float32")
        y = y / K.sum(y, 1, keepdims=True)
        out = K.categorical_crossentropy(y, logits, from_logits=True)
    elif loss == 'logloss':
        out = K.categorical_crossentropy(y, logits, from_logits=True)
    else:
        raise ValueError("Unknown loss: {}".format(loss))

    if mean:
        out = K.mean(out)
    # else:
    #     out = K.sum(out)
    return out 
Example 20
Project: reinforcement-learning-kr   Author: rlcode   File: breakout_dqn.py    MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        prediction = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(prediction * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # 상태가 입력, 큐함수가 출력인 인공신경망 생성 
Example 21
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet.py    MIT License 5 votes vote down vote up
def margin_loss(y_true, y_pred):
    """
    Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
    :param y_true: [None, n_classes]
    :param y_pred: [None, num_capsule]
    :return: a scalar loss value.
    """
    L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
        0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))

    return K.mean(K.sum(L, 1)) 
Example 22
Project: MODS_ConvNet   Author: santiagolopezg   File: filter_visualize.py    MIT License 5 votes vote down vote up
def normalize(x):
    # utility function to normalize a tensor by its L2 norm
    return x / (K.sqrt(K.mean(K.square(x))) + 1e-5) 
Example 23
Project: MODS_ConvNet   Author: santiagolopezg   File: filter_visualize.py    MIT License 5 votes vote down vote up
def normalize(x):
    # utility function to normalize a tensor by its L2 norm
    return x / (K.sqrt(K.mean(K.square(x))) + 1e-5) 
Example 24
Project: models   Author: kipoi   File: model.py    MIT License 5 votes vote down vote up
def profile_contrib(p):
    return kl.Lambda(lambda p:
                     K.mean(K.sum(K.stop_gradient(tf.nn.softmax(p, dim=-2)) * p, axis=-2), axis=-1)
                     )(p) 
Example 25
Project: deep-learning-note   Author: wdxtub   File: 7_visualize_filters.py    MIT License 5 votes vote down vote up
def deprocess_image(x):
    # 将张量转换为有效图像的函数
    x -= x.mean()
    x /= (x.std() + 1e-5)
    x *= 0.1

    x += 0.5
    x = np.clip(x, 0, 1)

    x *= 255
    x = np.clip(x, 0, 255).astype('uint8')
    return x 
Example 26
Project: deep-models   Author: LaurentMazare   File: lstm_ln.py    Apache License 2.0 5 votes vote down vote up
def norm(self, xs, norm_id):
    mu = K.mean(xs, axis=-1, keepdims=True)
    sigma = K.sqrt(K.var(xs, axis=-1, keepdims=True) + 1e-3)
    xs = self.gs[norm_id] * (xs - mu) / (sigma + 1e-3) + self.bs[norm_id]
    return xs 
Example 27
Project: phoneticSimilarity   Author: ronggong   File: losses.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def triplet_loss(inputs, margin=0.5):
    """calculate triplet loss"""
    anchor, same, diff = inputs
    same_dist = cosine_distance(anchor, same, vects_are_normalized=False)
    diff_dist = cosine_distance(anchor, diff, vects_are_normalized=False)

    loss = K.maximum(K.constant(0), margin + same_dist/2 - diff_dist/2)

    return K.mean(loss) 
Example 28
Project: phoneticSimilarity   Author: ronggong   File: models_siamese_tripletloss.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def calculate_loss(triplet_model,
                   generator,
                   iter_time,
                   batch_size,
                   N_diff,
                   margin):
    """calculate the max loss during Ndiff iterations"""
    max_loss = -np.inf
    ii_Ndiff = 0
    list_loss = []
    ii_counter = 0
    for input_batch in generator:
        outputs_batch = triplet_model.predict_on_batch(input_batch)
        loss_batch = K.eval(K.mean(triplet_loss(outputs_batch, margin=margin)))
        # print('predict on iter', ii_counter, loss_batch)

        if loss_batch > max_loss:
            max_loss = loss_batch

        ii_Ndiff += 1
        if ii_Ndiff >= N_diff: # every Ndiff iterations append and reset max_loss
            # print(max_loss)
            list_loss.append(max_loss)
            max_loss = -np.inf
            ii_Ndiff = 0

        ii_counter += 1
        if ii_counter >= iter_time: # after iterating all samples, return mean loss
            return np.mean(list_loss) 
Example 29
Project: gandlf   Author: codekansas   File: losses.py    MIT License 5 votes vote down vote up
def negative_binary_crossentropy(y_true, y_pred):
    """Instead of minimizing log(1-D), maximize log(D).

    Note that when using this loss function, you should not change the target.
    For example, if you want G -> 0 and D -> 1, then you should replace your
    binary_crossentropy loss with negative_binary_crossentropy loss without
    changing to G -> 1.
    """

    return -K.mean(K.binary_crossentropy(y_pred, 1 - y_true), axis=-1) 
Example 30
Project: gandlf   Author: codekansas   File: losses.py    MIT License 5 votes vote down vote up
def maximize(_, y_pred):
    """Maximizes y_pred, regardless of y_true."""

    return -K.mean(y_pred) 
Example 31
Project: gandlf   Author: codekansas   File: losses.py    MIT License 5 votes vote down vote up
def minimize(_, y_pred):
    """Minimizes y_pred, regardless of y_true."""

    return K.mean(y_pred) 
Example 32
Project: gandlf   Author: codekansas   File: similarities.py    MIT License 5 votes vote down vote up
def cosine(a, b):
    """Cosine similarity. Maximum is 1 (a == b), minimum is -1 (a == -b)."""

    a = K.l2_normalize(a)
    b = K.l2_normalize(b)
    return 1 - K.mean(a * b, axis=-1) 
Example 33
Project: gandlf   Author: codekansas   File: similarities.py    MIT License 5 votes vote down vote up
def geometric(a, b):
    """Geometric mean of sigmoid and euclidian similarity."""

    return sigmoid(a, b) * euclidean(a, b) 
Example 34
Project: kaggle-carvana-2017   Author: killthekitten   File: ensemble_gpu.py    MIT License 5 votes vote down vote up
def create_model(gpu):
    with tf.device(gpu):
        input = Input((1280, 1918, len(dirs)))
        x = Lambda(lambda x: K.mean(x, axis=-1, keepdims=True))(input)
        model = Model(input, x)
        model.summary()
    return model 
Example 35
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    MIT License 5 votes vote down vote up
def bootstrapped_crossentropy(y_true, y_pred, bootstrap_type='hard', alpha=0.95):
    target_tensor = y_true
    prediction_tensor = y_pred
    _epsilon = _to_tensor(K.epsilon(), prediction_tensor.dtype.base_dtype)
    prediction_tensor = K.tf.clip_by_value(prediction_tensor, _epsilon, 1 - _epsilon)
    prediction_tensor = K.tf.log(prediction_tensor / (1 - prediction_tensor))

    if bootstrap_type == 'soft':
        bootstrap_target_tensor = alpha * target_tensor + (1.0 - alpha) * K.tf.sigmoid(prediction_tensor)
    else:
        bootstrap_target_tensor = alpha * target_tensor + (1.0 - alpha) * K.tf.cast(
            K.tf.sigmoid(prediction_tensor) > 0.5, K.tf.float32)
    return K.mean(K.tf.nn.sigmoid_cross_entropy_with_logits(
        labels=bootstrap_target_tensor, logits=prediction_tensor)) 
Example 36
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    MIT License 5 votes vote down vote up
def online_bootstrapping(y_true, y_pred, pixels=512, threshold=0.5):
    """ Implements nline Bootstrapping crossentropy loss, to train only on hard pixels,
        see  https://arxiv.org/abs/1605.06885 Bridging Category-level and Instance-level Semantic Image Segmentation
        The implementation is a bit different as we use binary crossentropy instead of softmax
        SUPPORTS ONLY MINIBATCH WITH 1 ELEMENT!
    # Arguments
        y_true: A tensor with labels.

        y_pred: A tensor with predicted probabilites.

        pixels: number of hard pixels to keep

        threshold: confidence to use, i.e. if threshold is 0.7, y_true=1, prediction=0.65 then we consider that pixel as hard
    # Returns
        Mean loss value
    """
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    difference = K.abs(y_true - y_pred)

    values, indices = K.tf.nn.top_k(difference, sorted=True, k=pixels)
    min_difference = (1 - threshold)
    y_true = K.tf.gather(K.gather(y_true, indices), K.tf.where(values > min_difference))
    y_pred = K.tf.gather(K.gather(y_pred, indices), K.tf.where(values > min_difference))

    return K.mean(K.binary_crossentropy(y_true, y_pred)) 
Example 37
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 5 votes vote down vote up
def softmax_sparse_crossentropy_ignoring_first_label(y_true, y_pred):
    y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))
    log = tf.nn.log_softmax(y_pred)

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1)
    unpacked = tf.unstack(y_true, axis=-1)
    y_true = tf.stack(unpacked[1:], axis=-1)

    cross_entropy = -K.sum(y_true * log, axis=1)
    cross_entropy_mean = K.mean(cross_entropy)

    return cross_entropy_mean


# Accuracy for segmentation (ignoring first label) 
Example 38
Project: Keras-GAN   Author: eriklindernoren   File: infogan.py    MIT License 5 votes vote down vote up
def mutual_info_loss(self, c, c_given_x):
        """The mutual information metric we aim to minimize"""
        eps = 1e-8
        conditional_entropy = K.mean(- K.sum(K.log(c_given_x + eps) * c, axis=1))
        entropy = K.mean(- K.sum(K.log(c + eps) * c, axis=1))

        return conditional_entropy + entropy 
Example 39
Project: Keras-GAN   Author: eriklindernoren   File: wgan.py    MIT License 5 votes vote down vote up
def wasserstein_loss(self, y_true, y_pred):
        return K.mean(y_true * y_pred) 
Example 40
Project: Keras-GAN   Author: eriklindernoren   File: wgan_gp.py    MIT License 5 votes vote down vote up
def wasserstein_loss(self, y_true, y_pred):
        return K.mean(y_true * y_pred) 
Example 41
Project: Keras-GAN   Author: eriklindernoren   File: dualgan.py    MIT License 5 votes vote down vote up
def wasserstein_loss(self, y_true, y_pred):
        return K.mean(y_true * y_pred) 
Example 42
Project: CapsNet   Author: l11x0m7   File: capsule.py    MIT License 5 votes vote down vote up
def margin_loss(y, pred):
    """
    For the first part of loss(classification loss)
    """
    return K.mean(K.sum(y * K.square(K.maximum(0.9 - pred, 0)) + \
        0.5 *  K.square((1 - y) * K.maximum(pred - 0.1, 0)), axis=1)) 
Example 43
Project: FasterRCNN_KERAS   Author: akshaylamba   File: losses.py    Apache License 2.0 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
Example 44
Project: ccm-aae   Author: danielegrattarola   File: mnist.py    MIT License 5 votes vote down vote up
def mean_pred(y_true, y_pred):
    return K.mean(y_pred)


# MNIST dataset 
Example 45
Project: 3DGCN   Author: blackmints   File: loss.py    MIT License 5 votes vote down vote up
def std_mae(std=1):
    def mae(y_true, y_pred):
        return K.mean(K.abs(y_pred - y_true)) * std

    return mae 
Example 46
Project: 3DGCN   Author: blackmints   File: loss.py    MIT License 5 votes vote down vote up
def std_rmse(std=1):
    def rmse(y_true, y_pred):
        return K.sqrt(K.mean(K.square((y_pred - y_true)))) * std

    return rmse 
Example 47
Project: 3DGCN   Author: blackmints   File: loss.py    MIT License 5 votes vote down vote up
def std_r2(std=1):
    def r2(y_true, y_pred):
        ss_res = K.sum(K.square((y_true - y_pred) * std))
        ss_tot = K.sum(K.square((y_true - K.mean(y_true) * std)))
        return 1 - ss_res / (ss_tot + K.epsilon())

    return r2 
Example 48
Project: CalibrationNN   Author: Andres-Hernandez   File: neural_network.py    GNU General Public License v3.0 5 votes vote down vote up
def logarithmic_mean_squared_error(y_true, y_pred):
    return -K.mean(K.log(1.-K.clip(K.square(y_pred-y_true),0., 1.-K.epsilon())))

#_paper 
Example 49
Project: CalibrationNN   Author: Andres-Hernandez   File: neural_network.py    GNU General Public License v3.0 5 votes vote down vote up
def test_helper_cnn(func, dropout, lr, exponent, exp_filter_ir,
                    exp_filter_swo, nb_conv_ir, nb_conv_swo,
                    prefix='SWO GBP ', postfix=''):
    print('Test %s, %s, %s, %s, %s, %s, %s' % (dropout, lr, exponent, exp_filter_ir, 
                                   exp_filter_swo, nb_conv_ir, nb_conv_swo))
    model = func(lr=lr, exponent=exponent, dropout_conv=dropout, 
                 dropout_dense=dropout, nb_filters_swo=2**exp_filter_swo, 
                 nb_filters_ir=2**exp_filter_ir, nb_conv_swo=nb_conv_swo, 
                 nb_conv_ir=nb_conv_ir, prefix=prefix, postfix=postfix)
    model.train(500)
    loss = np.mean(model.history['history']['val_loss'][-5:])
    return (loss, dropout, lr, exponent, exp_filter_ir, exp_filter_swo,
            nb_conv_ir, nb_conv_swo) 
Example 50
Project: focal-loss-keras   Author: mkocabas   File: focal_loss.py    MIT License 5 votes vote down vote up
def focal_loss(gamma=2., alpha=.25):
	def focal_loss_fixed(y_true, y_pred):
		pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
		pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
		return -K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.mean((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
	return focal_loss_fixed 
Example 51
Project: Keras-BiGAN   Author: manicman1999   File: bigan.py    MIT License 5 votes vote down vote up
def gradient_penalty_loss(y_true, y_pred, averaged_samples, weight):
    gradients = K.gradients(y_pred, averaged_samples)[0]
    gradients_sqr = K.square(gradients)
    gradient_penalty = K.sum(gradients_sqr,
                              axis=np.arange(1, len(gradients_sqr.shape)))

    # (weight / 2) * ||grad||^2
    # Penalize the gradient norm
    return K.mean(gradient_penalty) * (weight / 2) 
Example 52
Project: Keras-BiGAN   Author: manicman1999   File: bigan.py    MIT License 5 votes vote down vote up
def hinge_d(y_true, y_pred):
    return K.mean(K.relu(1.0 - (y_true * y_pred))) 
Example 53
Project: Keras-BiGAN   Author: manicman1999   File: bigan.py    MIT License 5 votes vote down vote up
def w_loss(y_true, y_pred):
    return K.mean(y_true * y_pred) 
Example 54
Project: cyclegan-keras-art-attrs   Author: hollygrimm   File: cyclegan_attr_model.py    MIT License 5 votes vote down vote up
def mse_loss(self, y_true, y_pred):
        loss = K.mean(K.square(y_true - y_pred))
        return loss 
Example 55
Project: deepflying   Author: dslab-deepflying   File: wgan.py    GNU General Public License v3.0 5 votes vote down vote up
def wasserstein_loss(self, y_true, y_pred):
        return K.mean(y_true * y_pred) 
Example 56
Project: deepflying   Author: dslab-deepflying   File: wgan_gp.py    GNU General Public License v3.0 5 votes vote down vote up
def wasserstein_loss(self, y_true, y_pred):
        return K.mean(y_true * y_pred) 
Example 57
Project: deepflying   Author: dslab-deepflying   File: vae.py    GNU General Public License v3.0 5 votes vote down vote up
def build_vae(self):

        x = Input(shape=self.img_shape,name='def')

        h = self.encoder(x)

        z_mean = Dense(self.latent_dim)(h)
        z_log_var = Dense(self.latent_dim)(h)

        def sampling(args):
            z_mean, z_log_var = args
            epsilon = K.random_normal(shape=(K.shape(z_mean)[0], self.latent_dim), mean=0.)
            return z_mean + K.exp(z_log_var / 2) * epsilon

        z = Lambda(sampling, output_shape=(self.latent_dim,))([z_mean, z_log_var])

        decoder_h = self.generator

        decoder_mean = Dense(self.channels, activation='sigmoid')
        h_decoded = decoder_h(z)
        x_decoded_mean = decoder_mean(h_decoded)

        vae = Model(x, x_decoded_mean)

        print('VAE :')
        xent_loss = self.img_rows * self.img_cols * metrics.binary_crossentropy(x, x_decoded_mean)
        kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        vae_loss = K.mean(xent_loss + kl_loss)

        vae.add_loss(vae_loss)
        vae.compile(optimizer='rmsprop')
        vae.summary()

        return vae 
Example 58
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 5 votes vote down vote up
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
    """Return the RPN bounding box loss graph.

    config: the model config object.
    target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
        Uses 0 padding to fill in unsed bbox deltas.
    rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
               -1=negative, 0=neutral anchor.
    rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
    """
    # Positive anchors contribute to the loss, but negative and
    # neutral anchors (match value of 0 or -1) don't.
    rpn_match = K.squeeze(rpn_match, -1)
    indices = tf.where(K.equal(rpn_match, 1))

    # Pick bbox deltas that contribute to the loss
    rpn_bbox = tf.gather_nd(rpn_bbox, indices)

    # Trim target bounding box deltas to the same length as rpn_bbox.
    batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
    target_bbox = batch_pack_graph(target_bbox, batch_counts,
                                   config.IMAGES_PER_GPU)

    # TODO: use smooth_l1_loss() rather than reimplementing here
    #       to reduce code duplication
    diff = K.abs(target_bbox - rpn_bbox)
    less_than_one = K.cast(K.less(diff, 1.0), "float32")
    loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)

    loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
    return loss 
Example 59
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 5 votes vote down vote up
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
                           active_class_ids):
    """Loss for the classifier head of Mask RCNN.

    target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
        padding to fill in the array.
    pred_class_logits: [batch, num_rois, num_classes]
    active_class_ids: [batch, num_classes]. Has a value of 1 for
        classes that are in the dataset of the image, and 0
        for classes that are not in the dataset.
    """
    # During model building, Keras calls this function with
    # target_class_ids of type float32. Unclear why. Cast it
    # to int to get around it.
    target_class_ids = tf.cast(target_class_ids, 'int64')

    # Find predictions of classes that are not in the dataset.
    pred_class_ids = tf.argmax(pred_class_logits, axis=2)
    # TODO: Update this line to work with batch > 1. Right now it assumes all
    #       images in a batch have the same active_class_ids
    pred_active = tf.gather(active_class_ids[0], pred_class_ids)

    # Loss
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=target_class_ids, logits=pred_class_logits)

    # Erase losses of predictions of classes that are not in the active
    # classes of the image.
    loss = loss * pred_active

    # Computer loss mean. Use only predictions that contribute
    # to the loss to get a correct mean.
    loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
    return loss 
Example 60
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 5 votes vote down vote up
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
    """Loss for Mask R-CNN bounding box refinement.

    target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
    target_class_ids: [batch, num_rois]. Integer class IDs.
    pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
    """
    # Reshape to merge batch and roi dimensions for simplicity.
    target_class_ids = K.reshape(target_class_ids, (-1,))
    target_bbox = K.reshape(target_bbox, (-1, 4))
    pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))

    # Only positive ROIs contribute to the loss. And only
    # the right class_id of each ROI. Get their indices.
    positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
    positive_roi_class_ids = tf.cast(
        tf.gather(target_class_ids, positive_roi_ix), tf.int64)
    indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)

    # Gather the deltas (predicted and true) that contribute to loss
    target_bbox = tf.gather(target_bbox, positive_roi_ix)
    pred_bbox = tf.gather_nd(pred_bbox, indices)

    # Smooth-L1 Loss
    loss = K.switch(tf.size(target_bbox) > 0,
                    smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
                    tf.constant(0.0))
    loss = K.mean(loss)
    return loss 
Example 61
Project: keras-ctpn   Author: yizt   File: losses.py    Apache License 2.0 5 votes vote down vote up
def ctpn_regress_loss(predict_deltas, deltas, indices):
    """
    高度方向中心点偏移和高度尺寸缩放回归损失
    :param predict_deltas: 预测的回归目标,(batch_num, anchors_num, 2)
    :param deltas: 真实的回归目标,(batch_num, ctpn_train_anchors, 3+1), 最后一位为tag, tag=0 为padding
    :param indices: 正负样本索引,(batch_num, ctpn_train_anchors, (idx,tag)),
             idx:指定anchor索引位置,最后一位为tag, tag=0 为padding; 1为正样本,-1为负样本
    :return:
    """
    # 去除padding和负样本
    positive_indices = tf.where(tf.equal(indices[:, :, -1], 1))
    deltas = tf.gather_nd(deltas[..., :-2], positive_indices)  # (n,(dy,dh,dx,tag))
    true_positive_indices = tf.gather_nd(indices[..., 0], positive_indices)  # 一维,正anchor索引

    # batch索引
    batch_indices = positive_indices[:, 0]
    # 正样本anchor的2维索引
    train_indices_2d = tf.stack([batch_indices, tf.cast(true_positive_indices, dtype=tf.int64)], axis=1)
    # 正样本anchor预测的回归类型
    predict_deltas = tf.gather_nd(predict_deltas, train_indices_2d, name='ctpn_regress_loss_predict_deltas')

    # Smooth-L1 # 非常重要,不然报NAN
    loss = K.switch(tf.size(deltas) > 0,
                    smooth_l1_loss(deltas, predict_deltas),
                    tf.constant(0.0))
    loss = K.mean(loss)
    return loss 
Example 62
Project: keras-ctpn   Author: yizt   File: losses.py    Apache License 2.0 5 votes vote down vote up
def side_regress_loss(predict_deltas, deltas, indices):
    """
    侧边改善回归目标
    :param predict_deltas: 预测的x周偏移回归目标,(batch_num, anchors_num, 1)
    :param deltas: 真实的回归目标,(batch_num, ctpn_train_anchors, 3+1), 最后一位为tag, tag=0 为padding
    :param indices: 正负样本索引,(batch_num, ctpn_train_anchors, (idx,tag)),
             idx:指定anchor索引位置,最后一位为tag, tag=0 为padding; 1为正样本,-1为负样本
    :return:
    """
    # 去除padding和负样本
    positive_indices = tf.where(tf.equal(indices[:, :, -1], 1))
    deltas = tf.gather_nd(deltas[..., 2:3], positive_indices)  # (n,(dy,dh,dx,tag))  取 dx
    true_positive_indices = tf.gather_nd(indices[..., 0], positive_indices)  # 一维,正anchor索引

    # batch索引
    batch_indices = positive_indices[:, 0]
    # 正样本anchor的2维索引
    train_indices_2d = tf.stack([batch_indices, tf.cast(true_positive_indices, dtype=tf.int64)], axis=1)
    # 正样本anchor预测的回归类型
    predict_deltas = tf.gather_nd(predict_deltas, train_indices_2d, name='ctpn_regress_loss_predict_side_deltas')

    # Smooth-L1 # 非常重要,不然报NAN
    loss = K.switch(tf.size(deltas) > 0,
                    smooth_l1_loss(deltas, predict_deltas),
                    tf.constant(0.0))
    loss = K.mean(loss)
    return loss 
Example 63
Project: BlurbGenreCollection-HMC   Author: uhh-lt   File: networks.py    Apache License 2.0 5 votes vote down vote up
def margin_loss(y_true, y_pred):
    """
    Margin loss as described in Sabour et al. (2017)
    """
    L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
        0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))

    return K.mean(K.sum(L, 1)) 
Example 64
Project: HippMapp3r   Author: mgoubran   File: metrics.py    GNU General Public License v3.0 5 votes vote down vote up
def weighted_dice_coefficient(y_true, y_pred, axis=(-3, -2, -1), smooth=0.00001):
    """
    Weighted dice coefficient. Default axis assumes a "channels first" data structure
    :param smooth:
    :param y_true:
    :param y_pred:
    :param axis:
    :return:
    """
    return K.mean(2. * (K.sum(y_true * y_pred,
                              axis=axis) + smooth/2)/(K.sum(y_true,
                                                            axis=axis) + K.sum(y_pred,
                                                                               axis=axis) + smooth)) 
Example 65
Project: ODENet   Author: uqyge   File: utils.py    MIT License 5 votes vote down vote up
def cubic_loss(y_true, y_pred):
    return K.mean(K.square(y_true - y_pred)*K.abs(y_true - y_pred), axis=-1) 
Example 66
Project: ODENet   Author: uqyge   File: utils.py    MIT License 5 votes vote down vote up
def coeff_r2(y_true, y_pred):
    from keras import backend as K
    SS_res = K.sum(K.square(y_true-y_pred))
    SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
    return (1 - SS_res/(SS_tot + K.epsilon())) 
Example 67
Project: trVAE   Author: theislab   File: _losses.py    MIT License 5 votes vote down vote up
def kl_recon(mu, log_var, alpha=0.1, eta=1.0):
    def kl_recon_loss(y_true, y_pred):
        kl_loss = 0.5 * K.mean(K.exp(log_var) + K.square(mu) - 1. - log_var, 1)
        y_true_min, y_true_max = K.min(y_true), K.max(y_true)
        recon_loss = K.switch(K.equal(y_true_min, y_true_max),
                              then_expression=lambda: 0.5 * K.sum(K.zeros_like(y_true), axis=1),
                              else_expression=lambda: 0.5 * K.sum(K.square((y_true - y_pred)), axis=1)
                              )
        return _nan2inf(eta * recon_loss + alpha * kl_loss)

    return kl_recon_loss 
Example 68
Project: trVAE   Author: theislab   File: _losses.py    MIT License 5 votes vote down vote up
def kl_loss(mu, log_var, alpha=0.1):
    def kl_recon_loss(y_true, y_pred):
        kl_loss = 0.5 * K.mean(K.exp(log_var) + K.square(mu) - 1. - log_var, 1)
        return _nan2inf(alpha * kl_loss)

    return kl_recon_loss 
Example 69
Project: trVAE   Author: theislab   File: _losses.py    MIT License 5 votes vote down vote up
def perceptual_loss(x_dim, gamma=1.0):
    def percept_loss(input_image, reconstructed_image):
        vggface = VGGFace(include_top=False, input_shape=x_dim, model='vgg16')
        vgg_layers = ['conv1_1']
        outputs = [vggface.get_layer(l).output for l in vgg_layers]
        model = Model(inputs=vggface.input, outputs=outputs)

        for layer in model.layers:
            layer.trainable = False

        input_image *= 255.0
        reconstructed_image *= 255.0

        input_image = preprocess_input(input_image, mode='tf', data_format='channels_last')
        reconstructed_image = preprocess_input(reconstructed_image, mode='tf', data_format='channels_last')

        h1_list = model(input_image)
        h2_list = model(reconstructed_image)

        if not isinstance(h1_list, list):
            h1_list = [h1_list]
            h2_list = [h2_list]

        p_loss = 0.0
        for h1, h2 in zip(h1_list, h2_list):
            h1 = K.batch_flatten(h1)
            h2 = K.batch_flatten(h2)
            p_loss += K.mean(K.square(h1 - h2), axis=-1)

        return gamma * p_loss

    return percept_loss 
Example 70
Project: trVAE   Author: theislab   File: _losses.py    MIT License 5 votes vote down vote up
def loss(self, y_true, y_pred, mean=True):
        scale_factor = self.scale_factor
        eps = self.eps

        with tf.name_scope(self.scope):
            y_true = tf.cast(y_true, tf.float32)
            y_pred = tf.cast(y_pred, tf.float32) * scale_factor

            if self.masking:
                nelem = _nelem(y_true)
                y_true = _nan2zero(y_true)

            # Clip theta
            theta = tf.minimum(self.theta, 1e6)

            t1 = tf.lgamma(theta + eps) + tf.lgamma(y_true + 1.0) - tf.lgamma(y_true + theta + eps)
            t2 = (theta + y_true) * tf.log(1.0 + (y_pred / (theta + eps))) + (
                    y_true * (tf.log(theta + eps) - tf.log(y_pred + eps)))
            final = t1 + t2

            final = _nan2inf(final)

            if mean:
                if self.masking:
                    final = tf.divide(tf.reduce_sum(final), nelem)
                else:
                    final = tf.reduce_mean(final)

        return final 
Example 71
Project: trVAE   Author: theislab   File: _losses.py    MIT License 5 votes vote down vote up
def loss(self, y_true, y_pred, mean=True):
        scale_factor = self.scale_factor
        eps = self.eps

        with tf.name_scope(self.scope):
            # reuse existing NB neg.log.lik.
            # mean is always False here, because everything is calculated
            # element-wise. we take the mean only in the end
            nb_case = super().loss(y_true, y_pred, mean=False) - tf.log(1.0 - self.pi + eps)

            y_true = tf.cast(y_true, tf.float32)
            y_pred = tf.cast(y_pred, tf.float32) * scale_factor
            theta = tf.minimum(self.theta, 1e6)

            zero_nb = tf.pow(theta / (theta + y_pred + eps), theta)
            zero_case = -tf.log(self.pi + ((1.0 - self.pi) * zero_nb) + eps)
            result = tf.where(tf.less(y_true, 1e-8), zero_case, nb_case)
            ridge = self.ridge_lambda * tf.square(self.pi)
            result += ridge

            if mean:
                if self.masking:
                    result = _reduce_mean(result)
                else:
                    result = tf.reduce_mean(result)

            result = _nan2inf(result)

        return result 
Example 72
Project: trVAE   Author: theislab   File: _utils.py    MIT License 5 votes vote down vote up
def compute_mmd(x, y, kernel, **kwargs):  # [batch_size, z_dim] [batch_size, z_dim]
    """
        Computes Maximum Mean Discrepancy(MMD) between x and y.
        # Parameters
            x: Tensor
                Tensor with shape [batch_size, z_dim]
            y: Tensor
                Tensor with shape [batch_size, z_dim]
        # Returns
            returns the computed MMD between x and y
    """
    x_kernel = compute_kernel(x, x, kernel=kernel, **kwargs)
    y_kernel = compute_kernel(y, y, kernel=kernel, **kwargs)
    xy_kernel = compute_kernel(x, y, kernel=kernel, **kwargs)
    return K.mean(x_kernel) + K.mean(y_kernel) - 2 * K.mean(xy_kernel) 
Example 73
Project: Neural_Temporality_Adaptation   Author: xiaoleihuang   File: model_helper.py    Apache License 2.0 5 votes vote down vote up
def myloss(y_true, y_pred, weights):
    """Customized loss function
	"""
    from keras import backend as K
    return K.mean(K.square(y_pred - y_true), axis=-1) + K.sum(0.001 * K.square(weights)) 
Example 74
Project: reinforcement-learning-kr   Author: rlcode   File: breakout_a3c.py    MIT License 5 votes vote down vote up
def critic_optimizer(self):
        discounted_prediction = K.placeholder(shape=(None,))

        value = self.critic.output

        # [반환값 - 가치]의 제곱을 오류함수로 함
        loss = K.mean(K.square(discounted_prediction - value))

        optimizer = RMSprop(lr=self.critic_lr, rho=0.99, epsilon=0.01)
        updates = optimizer.get_updates(self.critic.trainable_weights, [],loss)
        train = K.function([self.critic.input, discounted_prediction],
                           [loss], updates=updates)
        return train 
Example 75
Project: reinforcement-learning-kr   Author: rlcode   File: cartpole_a2c.py    MIT License 5 votes vote down vote up
def critic_optimizer(self):
        target = K.placeholder(shape=[None, ])

        loss = K.mean(K.square(target - self.critic.output))

        optimizer = Adam(lr=self.critic_lr)
        updates = optimizer.get_updates(self.critic.trainable_weights, [], loss)
        train = K.function([self.critic.input, target], [], updates=updates)

        return train

    # 각 타임스텝마다 정책신경망과 가치신경망을 업데이트 
Example 76
Project: typhon   Author: atmtools   File: qrnn.py    MIT License 5 votes vote down vote up
def skewed_absolute_error(y_true, y_pred, tau):
    """
    The quantile loss function for a given quantile tau:

    L(y_true, y_pred) = (tau - I(y_pred < y_true)) * (y_pred - y_true)

    Where I is the indicator function.
    """
    dy = y_pred - y_true
    return K.mean((1.0 - tau) * K.relu(dy) + tau * K.relu(-dy), axis=-1) 
Example 77
Project: keras_mixnets   Author: titu1994   File: mixnets.py    MIT License 4 votes vote down vote up
def SEBlock(input_filters, se_ratio, expand_ratio, activation_fn, data_format=None):
    if data_format is None:
        data_format = K.image_data_format()

    num_reduced_filters = max(
        1, int(input_filters * se_ratio))
    filters = input_filters * expand_ratio

    if data_format == 'channels_first':
        channel_axis = 1
        spatial_dims = [2, 3]
    else:
        channel_axis = -1
        spatial_dims = [1, 2]

    def block(inputs):
        x = inputs
        x = layers.Lambda(lambda a: K.mean(a, axis=spatial_dims, keepdims=True))(x)
        x = GroupedConv2D(
            num_reduced_filters,
            kernel_size=[1],
            strides=[1, 1],
            kernel_initializer=MixNetConvInitializer(),
            padding='same',
            use_bias=True)(x)

        x = activation_fn()(x)

        # Excite
        x = GroupedConv2D(
            filters,
            kernel_size=[1],
            strides=[1, 1],
            kernel_initializer=MixNetConvInitializer(),
            padding='same',
            use_bias=True)(x)
        x = layers.Activation('sigmoid')(x)
        out = layers.Multiply()([x, inputs])
        return out

    return block


# Obtained from 
Example 78
Project: phoneticSimilarity   Author: ronggong   File: models_siamese_tripletloss.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def train_embedding_siamese_batch_teacher_student(list_feature_fold_train,
                                                  labels_fold_train,
                                                  list_feature_fold_val,
                                                  labels_fold_val,
                                                  batch_size,
                                                  input_shape,
                                                  output_shape,
                                                  margin,
                                                  file_path_model,
                                                  filename_log,
                                                  patience,
                                                  reverse_anchor=False):
    """siamese teacher student labels"""

    print("organizing features...")

    generator_train = generator_triplet(list_feature=list_feature_fold_train,
                                        labels=labels_fold_train,
                                        batch_size=1,
                                        shuffle=True,
                                        reverse_anchor=reverse_anchor)

    generator_val = generator_triplet(list_feature=list_feature_fold_val,
                                      labels=labels_fold_val,
                                      batch_size=1,
                                      shuffle=True,
                                      reverse_anchor=reverse_anchor)

    if output_shape == 2:
        base_model = embedding_1_lstm_base  # best model for 2 class
    else:
        base_model = embedding_2_lstm_1_dense_base  # best model for 54 class

    embedding_model, triplet_model, outputs = embedding_triplet_model(input_shape, output_shape, base_model)

    triplet_model.add_loss(K.mean(triplet_loss(outputs, margin=margin)))
    triplet_model.compile(loss=None, optimizer='adam')

    callbacks = [ModelCheckpoint(file_path_model, monitor='val_loss', verbose=0, save_best_only=True),
                 EarlyStopping(monitor='val_loss', patience=patience, verbose=0),
                 CSVLogger(filename=filename_log, separator=';')]

    print("start training with validation...")

    triplet_model.fit_generator(generator=generator_train,
                                steps_per_epoch=len(list_feature_fold_train)/batch_size,
                                validation_data=generator_val,
                                validation_steps=len(list_feature_fold_val)/batch_size,
                                callbacks=callbacks,
                                epochs=500,
                                verbose=2) 
Example 79
Project: Keras-BiGAN   Author: manicman1999   File: bigan.py    MIT License 4 votes vote down vote up
def evaluate(self, num = 0):

        n1 = noise(32)

        generated_images = self.GAN.G.predict(n1, batch_size = BATCH_SIZE)

        real_images = self.im.get_test_batch(16)
        latent_codes = self.GAN.E.predict(real_images, batch_size = BATCH_SIZE)
        reconstructed_images = self.GAN.G.predict(latent_codes, batch_size = BATCH_SIZE)

        print("E Mean: " + str(np.mean(latent_codes)))
        print("E Std: " + str(np.std(latent_codes)))
        print("E Std Featurewise: " + str(np.mean(np.std(latent_codes, axis = 0))))
        print()

        r = []

        for i in range(0, 32, 8):
            r.append(np.concatenate(generated_images[i:i+8], axis = 1))

        hline = np.zeros([16, 8 * im_size, 3])
        r.append(hline)

        for i in range(0, 16, 8):
            r.append(np.concatenate(real_images[i:i+8], axis = 1))
            r.append(np.concatenate(reconstructed_images[i:i+8], axis = 1))

        c1 = np.concatenate(r, axis = 0)

        x = Image.fromarray(np.uint8(c1*255))

        x.save("Results/i"+str(num)+".png")

        # Moving Average

        n1 = noise(32)

        generated_images = self.GAN.GE.predict(n1, batch_size = BATCH_SIZE)

        latent_codes = self.GAN.EE.predict(real_images, batch_size = BATCH_SIZE)
        reconstructed_images = self.GAN.GE.predict(latent_codes, batch_size = BATCH_SIZE)

        r = []

        for i in range(0, 32, 8):
            r.append(np.concatenate(generated_images[i:i+8], axis = 1))

        hline = np.zeros([16, 8 * im_size, 3])
        r.append(hline)

        for i in range(0, 16, 8):
            r.append(np.concatenate(real_images[i:i+8], axis = 1))
            r.append(np.concatenate(reconstructed_images[i:i+8], axis = 1))

        c1 = np.concatenate(r, axis = 0)

        x = Image.fromarray(np.uint8(c1*255))

        x.save("Results/i"+str(num)+"-ema.png") 
Example 80
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 4 votes vote down vote up
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
    """Mask binary cross-entropy loss for the masks head.

    target_masks: [batch, num_rois, height, width].
        A float32 tensor of values 0 or 1. Uses zero padding to fill array.
    target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
    pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
                with values from 0 to 1.
    """
    # Reshape for simplicity. Merge first two dimensions into one.
    target_class_ids = K.reshape(target_class_ids, (-1,))
    mask_shape = tf.shape(target_masks)
    target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
    pred_shape = tf.shape(pred_masks)
    pred_masks = K.reshape(pred_masks,
                           (-1, pred_shape[2], pred_shape[3], pred_shape[4]))
    # Permute predicted masks to [N, num_classes, height, width]
    pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])

    # Only positive ROIs contribute to the loss. And only
    # the class specific mask of each ROI.
    positive_ix = tf.where(target_class_ids > 0)[:, 0]
    positive_class_ids = tf.cast(
        tf.gather(target_class_ids, positive_ix), tf.int64)
    indices = tf.stack([positive_ix, positive_class_ids], axis=1)

    # Gather the masks (predicted and true) that contribute to loss
    y_true = tf.gather(target_masks, positive_ix)
    y_pred = tf.gather_nd(pred_masks, indices)

    # Compute binary cross entropy. If no positive ROIs, then return 0.
    # shape: [batch, roi, num_classes]
    loss = K.switch(tf.size(y_true) > 0,
                    K.binary_crossentropy(target=y_true, output=y_pred),
                    tf.constant(0.0))
    loss = K.mean(loss)
    return loss


############################################################
#  Data Generator
############################################################