Python keras.backend.sum() Examples

The following are code examples for showing how to use keras.backend.sum(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulelayers.py    MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        if type(inputs) is list:  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
Example 2
Project: MODS_ConvNet   Author: santiagolopezg   File: test_lillabcrossval_network.py    MIT License 6 votes vote down vote up
def sens(y_true, y_pred):

	y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    	y_pred_neg = 1 - y_pred_pos

   	y_pos = K.round(K.clip(y_true, 0, 1))
   	y_neg = 1 - y_pos

   	tp = K.sum(y_pos * y_pred_pos)
   	tn = K.sum(y_neg * y_pred_neg)

  	fp = K.sum(y_neg * y_pred_pos)
  	fn = K.sum(y_pos * y_pred_neg)

	se = tp / (tp + fn)
	return se 
Example 3
Project: MODS_ConvNet   Author: santiagolopezg   File: test_lillabcrossval_network.py    MIT License 6 votes vote down vote up
def spec(y_true, y_pred):

	y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    	y_pred_neg = 1 - y_pred_pos

   	y_pos = K.round(K.clip(y_true, 0, 1))
   	y_neg = 1 - y_pos

   	tp = K.sum(y_pos * y_pred_pos)
   	tn = K.sum(y_neg * y_pred_neg)

  	fp = K.sum(y_neg * y_pred_pos)
  	fn = K.sum(y_pos * y_pred_neg)

	sp = tn / (fp + tn)
	return sp 
Example 4
Project: MODS_ConvNet   Author: santiagolopezg   File: test_network.py    MIT License 6 votes vote down vote up
def sens(y_true, y_pred):

	y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    	y_pred_neg = 1 - y_pred_pos

   	y_pos = K.round(K.clip(y_true, 0, 1))
   	y_neg = 1 - y_pos

   	tp = K.sum(y_pos * y_pred_pos)
   	tn = K.sum(y_neg * y_pred_neg)

  	fp = K.sum(y_neg * y_pred_pos)
  	fn = K.sum(y_pos * y_pred_neg)

	se = tp / (tp + fn)
	return se 
Example 5
Project: MODS_ConvNet   Author: santiagolopezg   File: test_network.py    MIT License 6 votes vote down vote up
def spec(y_true, y_pred):

	y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    	y_pred_neg = 1 - y_pred_pos

   	y_pos = K.round(K.clip(y_true, 0, 1))
   	y_neg = 1 - y_pos

   	tp = K.sum(y_pos * y_pred_pos)
   	tn = K.sum(y_neg * y_pred_neg)

  	fp = K.sum(y_neg * y_pred_pos)
  	fn = K.sum(y_pos * y_pred_neg)

	sp = tn / (fp + tn)
	return sp 
Example 6
Project: MODS_ConvNet   Author: santiagolopezg   File: test_labcrossval_network.py    MIT License 6 votes vote down vote up
def sens(y_true, y_pred):

	y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    	y_pred_neg = 1 - y_pred_pos

   	y_pos = K.round(K.clip(y_true, 0, 1))
   	y_neg = 1 - y_pos

   	tp = K.sum(y_pos * y_pred_pos)
   	tn = K.sum(y_neg * y_pred_neg)

  	fp = K.sum(y_neg * y_pred_pos)
  	fn = K.sum(y_pos * y_pred_neg)

	se = tp / (tp + fn)
	return se 
Example 7
Project: MODS_ConvNet   Author: santiagolopezg   File: test_lilfoo.py    MIT License 6 votes vote down vote up
def sens(y_true, y_pred):

	y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    	y_pred_neg = 1 - y_pred_pos

   	y_pos = K.round(K.clip(y_true, 0, 1))
   	y_neg = 1 - y_pos

   	tp = K.sum(y_pos * y_pred_pos)
   	tn = K.sum(y_neg * y_pred_neg)

  	fp = K.sum(y_neg * y_pred_pos)
  	fn = K.sum(y_pos * y_pred_neg)

	se = tp / (tp + fn)
	return se 
Example 8
Project: MODS_ConvNet   Author: santiagolopezg   File: test_lilfoo.py    MIT License 6 votes vote down vote up
def spec(y_true, y_pred):

	y_pred_pos = K.round(K.clip(y_pred, 0, 1))
    	y_pred_neg = 1 - y_pred_pos

   	y_pos = K.round(K.clip(y_true, 0, 1))
   	y_neg = 1 - y_pos

   	tp = K.sum(y_pos * y_pred_pos)
   	tn = K.sum(y_neg * y_pred_neg)

  	fp = K.sum(y_neg * y_pred_pos)
  	fn = K.sum(y_pos * y_pred_neg)

	sp = tn / (fp + tn)
	return sp 
Example 9
Project: CapsAttnNet   Author: rstager   File: train.py    MIT License 6 votes vote down vote up
def margin_loss(y_true, y_pred):
    """
    Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
    :param y_true: [None, n_classes, n_instance]
    :param y_pred: [None, n_classes, n_instance]
    :return: a scalar loss value.
    """

    L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
        0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))

    loss = K.mean(K.sum(L, 1))

    acc = K.equal(K.argmax(y_true, axis=1), K.argmax(y_pred, axis=1))

    # loss = tf.Print(loss,[tf.shape(y_true)],message=" margin loss y_true shape",summarize=6,first_n=1)
    # loss = tf.Print(loss,[tf.shape(y_pred)],message=" margin loss y_pred shape",summarize=6,first_n=1)
    # loss = tf.Print(loss,[tf.shape(L)],message=" margin loss L shape",summarize=6,first_n=1)
    # loss = tf.Print(loss,[tf.shape(acc)],message=" margin loss acc shape",summarize=6,first_n=1)
    # loss = tf.Print(loss,[y_true[0,0,:],y_pred[0,0,:]],message=" margin loss y_true/y_pred",summarize=20)
    # loss = tf.Print(loss,[L[0,0,:]],message=" margin loss L",summarize=6)
    # loss = tf.Print(loss,[loss],message=" margin loss loss",summarize=6)
    # loss = tf.Print(loss,[acc[0,0]],message=" margin loss acc",summarize=6)

    return loss 
Example 10
Project: CapsAttnNet   Author: rstager   File: train.py    MIT License 6 votes vote down vote up
def pose_loss(y_true, y_pred):
    """.
    :param y_true: [None, n_classes, n_instance,pose]
    :param y_pred: [None, n_classes, n_instance,pose]
    :return: a scalar loss value.
    """
    loss = K.sum( K.square(y_true-y_pred),-1)

    # loss = tf.Print(loss,[tf.shape(y_true)],message=" pose loss y_true shape",summarize=6,first_n=1)
    # loss = tf.Print(loss,[tf.shape(y_pred)],message=" pose loss y_pred shape",summarize=6,first_n=1)
    # idx=8
    # loss = tf.Print(loss,[loss[idx,0]],message=" pose loss loss",summarize=6)
    # loss = tf.Print(loss,[y_true[idx,0,0]],message=" pose true y_true",summarize=20)
    # loss = tf.Print(loss,[y_pred[idx,0,0]],message=" pose loss y_pred",summarize=20)
    # loss = tf.Print(loss,[loss[idx,0]],message=" pose loss loss",summarize=6)

    return loss 
Example 11
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    MIT License 6 votes vote down vote up
def step(self, x, states):   
        h = states[0]
        # states[1] necessary?

        # equals K.dot(X, self._W1) + self._b2 with X.shape=[bs, T, input_dim]
        total_x_prod = states[-1]
        # comes from the constants (equals the input sequence)
        X = states[-2]
        
        # expand dims to add the vector which is only valid for this time step
        # to total_x_prod which is valid for all time steps
        hw = K.expand_dims(K.dot(h, self._W2), 1)
        additive_atn = total_x_prod + hw
        attention = K.softmax(K.dot(additive_atn, self._V), axis=1)
        x_weighted = K.sum(attention * X, [1])

        x = K.dot(K.concatenate([x, x_weighted], 1), self._W3) + self._b3
        
        h, new_states = self.layer.cell.call(x, states[:-2])
        
        return h, new_states 
Example 12
Project: keras-utility-layer-collection   Author: zimmerrol   File: attention.py    MIT License 6 votes vote down vote up
def step(self, x, states):  
        h = states[0]
        # states[1] necessary?
        
        # comes from the constants
        X_static = states[-2]
        # equals K.dot(static_x, self._W1) + self._b2 with X.shape=[bs, L, static_input_dim]
        total_x_static_prod = states[-1]

        # expand dims to add the vector which is only valid for this time step
        # to total_x_prod which is valid for all time steps
        hw = K.expand_dims(K.dot(h, self._W2), 1)
        additive_atn = total_x_static_prod + hw
        attention = K.softmax(K.dot(additive_atn, self._V), axis=1)
        static_x_weighted = K.sum(attention * X_static, [1])
        
        x = K.dot(K.concatenate([x, static_x_weighted], 1), self._W3) + self._b3

        h, new_states = self.layer.cell.call(x, states[:-2])
        
        # append attention to the states to "smuggle" it out of the RNN wrapper
        attention = K.squeeze(attention, -1)
        h = K.concatenate([h, attention])

        return h, new_states 
Example 13
Project: cdc   Author: ckbjimmy   File: Attention.py    MIT License 6 votes vote down vote up
def call(self, x, mask=None):
        eij = dot_product(x, self.W)

        if self.bias:
            eij += self.b

        eij = K.tanh(eij)

        a = K.exp(eij)

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask, K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = x * a
        return K.sum(weighted_input, axis=1) 
Example 14
Project: gandlf   Author: codekansas   File: reversing_gan.py    MIT License 6 votes vote down vote up
def build_generator():
    """Builds the big generator model."""

    latent = keras.layers.Input((100,), name='latent')

    image_class = keras.layers.Input((10,), dtype='float32',
                                     name='image_class')
    d = keras.layers.Dense(100)(image_class)
    merged = keras.layers.merge([latent, d], mode='sum')

    hidden = keras.layers.Dense(512)(merged)
    hidden = keras.layers.LeakyReLU()(hidden)

    hidden = keras.layers.Dense(512)(hidden)
    hidden = keras.layers.LeakyReLU()(hidden)

    output_layer = keras.layers.Dense(28 * 28,
        activation='tanh')(hidden)
    fake_image = keras.layers.Reshape((28, 28, 1))(output_layer)

    return keras.models.Model(input=[latent, image_class],
                              output=fake_image) 
Example 15
Project: gandlf   Author: codekansas   File: reversing_gan.py    MIT License 6 votes vote down vote up
def reverse_generator(generator, X_sample, y_sample, title):
    """Gradient descent to map images back to their latent vectors."""

    latent_vec = np.random.normal(size=(1, 100))

    # Function for figuring out how to bump the input.
    target = K.placeholder()
    loss = K.sum(K.square(generator.outputs[0] - target))
    grad = K.gradients(loss, generator.inputs[0])[0]
    update_fn = K.function(generator.inputs + [target], [grad])

    # Repeatedly apply the update rule.
    xs = []
    for i in range(60):
        print('%d: latent_vec mean=%f, std=%f'
              % (i, np.mean(latent_vec), np.std(latent_vec)))
        xs.append(generator.predict_on_batch([latent_vec, y_sample]))
        for _ in range(10):
            update_vec = update_fn([latent_vec, y_sample, X_sample])[0]
            latent_vec -= update_vec * update_rate

    # Plots the samples.
    xs = np.concatenate(xs, axis=0)
    plot_as_gif(xs, X_sample, title) 
Example 16
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn_all.py    MIT License 6 votes vote down vote up
def softmax_sparse_crossentropy_ignoring_first_label(y_true, y_pred):
    y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))
    log_softmax = tf.nn.log_softmax(y_pred)

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[0], tf.bool)
    y_true = tf.stack(unpacked[1:], axis=-1)

    cross_entropy = -K.sum(y_true * log_softmax, axis=1)
    cross_entropy_mean = K.sum(cross_entropy) / K.sum(tf.to_float(legal_labels))

    return cross_entropy_mean


# Accuracy for segmentation (ignoring first label) 
Example 17
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 6 votes vote down vote up
def sparse_accuracy(y_true, y_pred):
    classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[0], tf.bool)
    y_true = tf.stack(unpacked[1:], axis=-1)

    return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels))


# Define different models


# 3D-FCN model 
Example 18
Project: Keras-GAN   Author: eriklindernoren   File: wgan_gp.py    MIT License 6 votes vote down vote up
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
        """
        Computes gradient penalty based on prediction and weighted real / fake samples
        """
        gradients = K.gradients(y_pred, averaged_samples)[0]
        # compute the euclidean norm by squaring ...
        gradients_sqr = K.square(gradients)
        #   ... summing over the rows ...
        gradients_sqr_sum = K.sum(gradients_sqr,
                                  axis=np.arange(1, len(gradients_sqr.shape)))
        #   ... and sqrt
        gradient_l2_norm = K.sqrt(gradients_sqr_sum)
        # compute lambda * (1 - ||grad||)^2 still for each single sample
        gradient_penalty = K.square(1 - gradient_l2_norm)
        # return the mean as loss over all the batch samples
        return K.mean(gradient_penalty) 
Example 19
Project: FasterRCNN_KERAS   Author: akshaylamba   File: losses.py    Apache License 2.0 6 votes vote down vote up
def rpn_loss_regr(num_anchors):
	def rpn_loss_regr_fixed_num(y_true, y_pred):
		if K.image_dim_ordering() == 'th':
			x = y_true[:, 4 * num_anchors:, :, :] - y_pred
			x_abs = K.abs(x)
			x_bool = K.less_equal(x_abs, 1.0)
			return lambda_rpn_regr * K.sum(
				y_true[:, :4 * num_anchors, :, :] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :4 * num_anchors, :, :])
		else:
			x = y_true[:, :, :, 4 * num_anchors:] - y_pred
			x_abs = K.abs(x)
			x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)

			return lambda_rpn_regr * K.sum(
				y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])

	return rpn_loss_regr_fixed_num 
Example 20
Project: speech_separation   Author: bill9800   File: model_loss.py    MIT License 6 votes vote down vote up
def audio_discriminate_loss2(gamma=0.1,beta = 2*0.1,num_speaker=2):
    def loss_func(S_true,S_pred,gamma=gamma,beta=beta,num_speaker=num_speaker):
        sum_mtr = K.zeros_like(S_true[:,:,:,:,0])
        for i in range(num_speaker):
            sum_mtr += K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,i])
            for j in range(num_speaker):
                if i != j:
                    sum_mtr -= gamma*(K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,j]))

        for i in range(num_speaker):
            for j in range(i+1,num_speaker):
                #sum_mtr -= beta*K.square(S_pred[:,:,:,i]-S_pred[:,:,:,j])
                #sum_mtr += beta*K.square(S_true[:,:,:,:,i]-S_true[:,:,:,:,j])
                pass
        #sum = K.sum(K.maximum(K.flatten(sum_mtr),0))

        loss = K.mean(K.flatten(sum_mtr))

        return loss
    return loss_func 
Example 21
Project: VisualNN   Author: angelhunt   File: cifar10_cnn_capsule.py    GNU General Public License v3.0 5 votes vote down vote up
def squash(x, axis=-1):
    s_squared_norm = K.sum(K.square(x), axis, keepdims=True) + K.epsilon()
    scale = K.sqrt(s_squared_norm) / (0.5 + s_squared_norm)
    return scale * x


# define our own softmax function instead of K.softmax
# because K.softmax can not specify axis. 
Example 22
Project: VisualNN   Author: angelhunt   File: cifar10_cnn_capsule.py    GNU General Public License v3.0 5 votes vote down vote up
def softmax(x, axis=-1):
    ex = K.exp(x - K.max(x, axis=axis, keepdims=True))
    return ex / K.sum(ex, axis=axis, keepdims=True)


# define the margin loss like hinge loss 
Example 23
Project: VisualNN   Author: angelhunt   File: cifar10_cnn_capsule.py    GNU General Public License v3.0 5 votes vote down vote up
def margin_loss(y_true, y_pred):
    lamb, margin = 0.5, 0.1
    return K.sum(y_true * K.square(K.relu(1 - margin - y_pred)) + lamb * (
        1 - y_true) * K.square(K.relu(y_pred - margin)), axis=-1) 
Example 24
Project: VisualNN   Author: angelhunt   File: cifar10_cnn_capsule.py    GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs):
        """Following the routing algorithm from Hinton's paper,
        but replace b = b + <u,v> with b = <u,v>.

        This change can improve the feature representation of Capsule.

        However, you can replace
            b = K.batch_dot(outputs, hat_inputs, [2, 3])
        with
            b += K.batch_dot(outputs, hat_inputs, [2, 3])
        to realize a standard routing.
        """

        if self.share_weights:
            hat_inputs = K.conv1d(inputs, self.kernel)
        else:
            hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1])

        batch_size = K.shape(inputs)[0]
        input_num_capsule = K.shape(inputs)[1]
        hat_inputs = K.reshape(hat_inputs,
                               (batch_size, input_num_capsule,
                                self.num_capsule, self.dim_capsule))
        hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3))

        b = K.zeros_like(hat_inputs[:, :, :, 0])
        for i in range(self.routings):
            c = softmax(b, 1)
            o = self.activation(K.batch_dot(c, hat_inputs, [2, 2]))
            if i < self.routings - 1:
                b = K.batch_dot(o, hat_inputs, [2, 3])
                if K.backend() == 'theano':
                    o = K.sum(o, axis=1)

        return o 
Example 25
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulelayers.py    MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        return K.sqrt(K.sum(K.square(inputs), -1)) 
Example 26
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulelayers.py    MIT License 5 votes vote down vote up
def squash(vectors, axis=-1):
    """
    The non-linear activation used in Capsule. It drives the length of a large vector to near 1 and small vector to 0
    :param vectors: some vectors to be squashed, N-dim tensor
    :param axis: the axis to squash
    :return: a Tensor with same shape as input vectors
    """
    s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)
    scale = s_squared_norm / (1 + s_squared_norm) / K.sqrt(s_squared_norm + K.epsilon())
    return scale * vectors 
Example 27
Project: deep-learning-note   Author: wdxtub   File: 3_nerual_style_transfer.py    MIT License 5 votes vote down vote up
def content_loss(base, combination):
    return K.sum(K.square(combination - base)) 
Example 28
Project: deep-learning-note   Author: wdxtub   File: 3_nerual_style_transfer.py    MIT License 5 votes vote down vote up
def style_loss(style, combination):
    S = gram_matrix(style)
    C = gram_matrix(combination)
    channels = 3
    size = img_height * img_width
    return K.sum(K.square(S - C)) / ( 4. * (channels ** 2) * (size ** 2)) 
Example 29
Project: deep-learning-note   Author: wdxtub   File: 3_nerual_style_transfer.py    MIT License 5 votes vote down vote up
def total_variation_loss(x):
    a = K.square(
        x[:, :img_height-1, :img_width-1, :] -
        x[:, 1:, :img_width-1, :])
    b = K.square(
        x[:, :img_height-1, :img_width-1, :] -
        x[:, :img_height-1, 1:, :])
    return K.sum(K.pow(a+b, 1.25)) 
Example 30
Project: AnisotropicMultiStreamCNN   Author: AnnekeMeyer   File: UNET3D_MultiStream_v2.py    MIT License 5 votes vote down vote up
def dice_coef(y_true, y_pred):
  y_true_f = K.flatten(y_true)
  y_pred_f = K.flatten(y_pred)
  intersection = K.sum(y_true_f * y_pred_f)
  return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 31
Project: CapsAttnNet   Author: rstager   File: canlayer.py    MIT License 5 votes vote down vote up
def squash_scale(vectors, axis=-1):
    """
    The non-linear activation used in Capsule. It drives the length of a large vector to near 1 and small vector to 0
    :param vectors: some vectors to be squashed, N-dim tensor
    :param axis: the axis to squash
    :return: a Tensor with same shape as input vectors
    """
    s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)
    scale = s_squared_norm / (1 + s_squared_norm) / K.sqrt(s_squared_norm + K.epsilon())
    return scale 
Example 32
Project: CapsAttnNet   Author: rstager   File: canlayer.py    MIT License 5 votes vote down vote up
def _best_guess(self, c, inputs_hat):
        '''
        Combine the predicted poses 'input_hats' weighted by c to come up with best_guess of the capsule poses

        :param c: weights to apply to the input poses
        :param inputs_hat: input poses
        :return: best guess at pose
        '''
        # c.shape=[None, num_capsule * num_instance, num_part * input_num_capsule * input_num_instance]
        # inputs_hat.shape = [None,num_instance * num_capsule, num_parts, input_num_capsule * input_num_instance, dim_capsule]
        # guess.shape = [None,num_instance * num_capsule,dim_capsule]

        # take the mean probility
        probability = tf.reduce_mean(inputs_hat[:,:,:,0:1],axis=2)

        # find the mean weighted geometric pose
        sum_weighted_geoms = K.batch_dot(c,inputs_hat[:,:,:,1:dim_geom+1], [2, 2])
        one_over_weight_sums = tf.tile(tf.expand_dims(tf.reciprocal(K.sum(c,axis=-1)),-1),[1,1,dim_geom])
        mean_geom =  one_over_weight_sums*sum_weighted_geoms

        # squash the weighted sum of attributes
        weighted_attrs = K.batch_dot(c,inputs_hat[:,:,:,dim_geom+1:], [2, 2])
        scale = squash_scale(weighted_attrs)

        # use the magnitude of the squashedweighted sum of attributes for probability
        probability = scale

        guess = layers.concatenate([probability,mean_geom,weighted_attrs])
        return guess 
Example 33
Project: phoneticSimilarity   Author: ronggong   File: attentionWithContext.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def call(self, x, mask=None):
        uit = dot_product(x, self.W)

        if self.bias:
            uit += self.b

        uit = K.tanh(uit)
        ait = dot_product(uit, self.u)

        a = K.exp(ait)

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask, K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = x * a
        result = K.sum(weighted_input, axis=1)

        if self.return_attention:
            return [result, a]
        return result 
Example 34
Project: phoneticSimilarity   Author: ronggong   File: attention.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def call(self, x, mask=None):
        eij = dot_product(x, self.W)  # (samples, steps)

        if self.bias:
            eij += self.b

        eij = K.tanh(eij)

        a = K.exp(eij)

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask, K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a_expand = K.expand_dims(a)

        # element wise
        weighted_input = x * a_expand

        result = K.sum(weighted_input, axis=1)

        if self.return_attention:
            return [result, a]
        return result 
Example 35
Project: cdc   Author: ckbjimmy   File: EmbCRNN.py    MIT License 5 votes vote down vote up
def precision(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision 
Example 36
Project: cdc   Author: ckbjimmy   File: EmbCRNN.py    MIT License 5 votes vote down vote up
def recall(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall 
Example 37
Project: cdc   Author: ckbjimmy   File: EmbCRNN.py    MIT License 5 votes vote down vote up
def fbeta_score(y_true, y_pred, beta=1):
    if beta < 0:
        raise ValueError('The lowest choosable beta is zero (only precision).')
    if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
        return 0
    p = precision(y_true, y_pred)
    r = recall(y_true, y_pred)
    bb = beta ** 2
    fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
    return fbeta_score 
Example 38
Project: cdc   Author: ckbjimmy   File: Attention.py    MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        uit = K.dot(x, self.W)
        
        if self.bias:
            uit += self.b
        
        uit = K.tanh(uit)
        
        mul_a = uit  * self.u # with this
        ait = K.sum(mul_a, axis=2) # and this
        
        a = K.exp(ait)

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask, K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = x * a
        return K.sum(weighted_input, axis=1) 
Example 39
Project: cdc   Author: ckbjimmy   File: EmbGRUattention.py    MIT License 5 votes vote down vote up
def recall(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall 
Example 40
Project: cdc   Author: ckbjimmy   File: EmbGRUattention.py    MIT License 5 votes vote down vote up
def fbeta_score(y_true, y_pred, beta=1):
    if beta < 0:
        raise ValueError('The lowest choosable beta is zero (only precision).')
    if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
        return 0
    p = precision(y_true, y_pred)
    r = recall(y_true, y_pred)
    bb = beta ** 2
    fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
    return fbeta_score 
Example 41
Project: gandlf   Author: codekansas   File: losses.py    MIT License 5 votes vote down vote up
def rbf_moment_matching(y_true, y_pred, sigmas=[2, 5, 10, 20, 40, 80]):
    """Generative moment matching loss with RBF kernel.

    Reference: https://arxiv.org/abs/1502.02761
    """

    warnings.warn('Moment matching loss is still in development.')

    if len(K.int_shape(y_pred)) != 2 or len(K.int_shape(y_true)) != 2:
        raise ValueError('RBF Moment Matching function currently only works '
                         'for outputs with shape (batch_size, num_features).'
                         'Got y_true="%s" and y_pred="%s".' %
                         (str(K.int_shape(y_pred)), str(K.int_shape(y_true))))

    sigmas = list(sigmas) if isinstance(sigmas, (list, tuple)) else [sigmas]

    x = K.concatenate([y_pred, y_true], 0)

    # Performs dot product between all combinations of rows in X.
    xx = K.dot(x, K.transpose(x))  # (batch_size, batch_size)

    # Performs dot product of all rows with themselves.
    x2 = K.sum(x * x, 1, keepdims=True)  # (batch_size, None)

    # Gets exponent entries of the RBF kernel (without sigmas).
    exponent = xx - 0.5 * x2 - 0.5 * K.transpose(x2)

    # Applies all the sigmas.
    total_loss = None
    for sigma in sigmas:
        kernel_val = K.exp(exponent / sigma)
        loss = K.sum(kernel_val)
        total_loss = loss if total_loss is None else loss + total_loss

    return total_loss 
Example 42
Project: gandlf   Author: codekansas   File: similarities.py    MIT License 5 votes vote down vote up
def l1(a, b):
    """L1 similarity. Maximum is 0 (a == b), minimum is -inf."""

    return -K.sum(K.abs(a - b), axis=-1) 
Example 43
Project: gandlf   Author: codekansas   File: similarities.py    MIT License 5 votes vote down vote up
def l2(a, b):
    """L2 similarity. Maximum is 0 (a == b), minimum is -inf."""

    return -K.sum(K.square(a - b), axis=-1) 
Example 44
Project: gandlf   Author: codekansas   File: similarities.py    MIT License 5 votes vote down vote up
def sigmoid(a, b):
    """Sigmoid similarity. Maximum is 1 (a == b), minimum is 0."""

    return K.sigmoid(K.sum(a * b, axis=-1)) 
Example 45
Project: gandlf   Author: codekansas   File: similarities.py    MIT License 5 votes vote down vote up
def euclidean(a, b):
    """Euclidian similarity. Maximum is 1 (a == b), minimum is 0 (a == -b)."""

    x = K.sum(K.square(a - b), axis=-1)
    return 1. / (1. + x) 
Example 46
Project: AI_Competition   Author: Decalogue   File: attention.py    MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        input_shape = K.int_shape(x)

        features_dim = self.features_dim
        # step_dim = self.step_dim
        step_dim = input_shape[1]

        eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))), (-1, step_dim))

        if self.bias:
            eij += self.b[:input_shape[1]]

        eij = K.tanh(eij)

        a = K.exp(eij)

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask, K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = x * a
    	# print weigthted_input.shape
        return K.sum(weighted_input, axis=1) 
Example 47
Project: AI_Competition   Author: Decalogue   File: attention.py    MIT License 5 votes vote down vote up
def call(self, inputs, mask=None):
        en = inputs[0]
        de = inputs[1]
        de_shape = K.int_shape(de)
        step_dim = de_shape[1]

        hid_en = K.dot(en, self.W_en1)
        hid_de = K.dot(de, self.W_en2)
        if self.bias:
            hid_en += self.b_en1
            hid_de += self.b_en2
        hid = K.tanh(K.expand_dims(hid_en, axis=1) + hid_de)
        eij = K.reshape(K.dot(hid, K.reshape(self.W_de, (self.hid_size, 1))), (-1, step_dim))
        if self.bias:
            eij += self.b_de[:step_dim]

        a = K.exp(eij - K.max(eij, axis=-1, keepdims=True))

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask[1], K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = de * a
        return K.sum(weighted_input, axis=1) 
Example 48
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    MIT License 5 votes vote down vote up
def dice_coef_clipped(y_true, y_pred, smooth=1.0):
    y_true_f = K.flatten(K.round(y_true))
    y_pred_f = K.flatten(K.round(y_pred))
    intersection = K.sum(y_true_f * y_pred_f)
    return 100. * (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 49
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    MIT License 5 votes vote down vote up
def dice_coef(y_true, y_pred, smooth=1.0):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 
Example 50
Project: Quora-Question-Pairs   Author: rupak-118   File: MaLSTM_train.py    MIT License 5 votes vote down vote up
def exponent_neg_manhattan_distance(left, right):
    ''' 
    Purpose : Helper function for the similarity estimate of the LSTMs outputs
    Inputs : Two n-dimensional vectors
    Output : Manhattan distance between the input vectors
    
    '''
    return K.exp(-K.sum(K.abs(left-right), axis=1, keepdims=True))


# Applying the pre-processing function on the combined text corpus 
Example 51
Project: Quora-Question-Pairs   Author: rupak-118   File: test.py    MIT License 5 votes vote down vote up
def exponent_neg_manhattan_distance(left, right):
    ''' 
    Purpose : Helper function for the similarity estimate of the LSTMs outputs
    Inputs : Two n-dimensional vectors
    Output : Manhattan distance between the input vectors
    
    '''
    return K.exp(-K.sum(K.abs(left-right), axis=1, keepdims=True))


#print("\n Helper functions loaded")

# Based on the training set, a keep list of common dot words was prepared 
Example 52
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn_all.py    MIT License 5 votes vote down vote up
def sparse_accuracy(y_true, y_pred):
    classes = K.int_shape(y_pred)[-1]
    y_pred = K.reshape(y_pred, (-1, classes))

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)),
                       classes + 1)
    unpacked = tf.unstack(y_true, axis=-1)
    legal_labels = ~tf.cast(unpacked[0], tf.bool)
    y_true = tf.stack(unpacked[1:], axis=-1)

    return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels)) 
Example 53
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 5 votes vote down vote up
def softmax_sparse_crossentropy_ignoring_first_label(y_true, y_pred):
    y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))
    log = tf.nn.log_softmax(y_pred)

    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1)
    unpacked = tf.unstack(y_true, axis=-1)
    y_true = tf.stack(unpacked[1:], axis=-1)

    cross_entropy = -K.sum(y_true * log, axis=1)
    cross_entropy_mean = K.mean(cross_entropy)

    return cross_entropy_mean


# Accuracy for segmentation (ignoring first label) 
Example 54
Project: Keras-GAN   Author: eriklindernoren   File: infogan.py    MIT License 5 votes vote down vote up
def mutual_info_loss(self, c, c_given_x):
        """The mutual information metric we aim to minimize"""
        eps = 1e-8
        conditional_entropy = K.mean(- K.sum(K.log(c_given_x + eps) * c, axis=1))
        entropy = K.mean(- K.sum(K.log(c + eps) * c, axis=1))

        return conditional_entropy + entropy 
Example 55
Project: visual_turing_test-tutorial   Author: mateuszmalinowski   File: keras_extensions.py    MIT License 5 votes vote down vote up
def time_distributed_masked_ave(x, m):
    """
    Computes average along the first (time) dimension.
    
    In:
        x - input; a 3D tensor
        m - mask
    """
    tmp = K.sum(x, axis=1)
    nonzeros = K.sum(m, axis=-1)
    return tmp / K.expand_dims(K.cast(nonzeros, tmp.dtype)) 
Example 56
Project: CapsNet   Author: l11x0m7   File: capsule.py    MIT License 5 votes vote down vote up
def softmax(x, axis=-1):
    """
    Self-defined softmax function
    """
    x = K.exp(x - K.max(x, axis=axis, keepdims=True))
    x /= K.sum(x, axis=axis, keepdims=True)
    return x 
Example 57
Project: CapsNet   Author: l11x0m7   File: capsule.py    MIT License 5 votes vote down vote up
def margin_loss(y, pred):
    """
    For the first part of loss(classification loss)
    """
    return K.mean(K.sum(y * K.square(K.maximum(0.9 - pred, 0)) + \
        0.5 *  K.square((1 - y) * K.maximum(pred - 0.1, 0)), axis=1)) 
Example 58
Project: CapsNet   Author: l11x0m7   File: capsule.py    MIT License 5 votes vote down vote up
def squash(s, axis=-1):
    """
    Squash function. This could be viewed as one kind of activations.
    """
    squared_s = K.sum(K.square(s), axis=axis, keepdims=True)
    scale = squared_s / (1 + squared_s) / K.sqrt(squared_s + K.epsilon())
    return scale * s 
Example 59
Project: CapsNet   Author: l11x0m7   File: capsule.py    MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        return K.sqrt(K.sum(K.square(inputs), axis=-1)) 
Example 60
Project: CapsNet   Author: l11x0m7   File: capsule.py    MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        # inputs -> (X, y), then output the mask of y
        # inputs -> X, then output the mask of prediction
        if type(inputs) is list or tuple:
            inputs, mask = inputs
        else:
            pred = K.sqrt(K.sum(K.square(inputs), axis=-1) + K.epsilon())
            mask = K.one_hot(indices=K.argmax(pred, 1), num_classes=pred.get_shape().as_list()[1])
        return K.batch_flatten(inputs * K.expand_dims(mask, axis=-1)) 
Example 61
Project: FasterRCNN_KERAS   Author: akshaylamba   File: losses.py    Apache License 2.0 5 votes vote down vote up
def rpn_loss_cls(num_anchors):
	def rpn_loss_cls_fixed_num(y_true, y_pred):
		if K.image_dim_ordering() == 'tf':
			return lambda_rpn_class * K.sum(y_true[:, :, :, :num_anchors] * K.binary_crossentropy(y_pred[:, :, :, :], y_true[:, :, :, num_anchors:])) / K.sum(epsilon + y_true[:, :, :, :num_anchors])
		else:
			return lambda_rpn_class * K.sum(y_true[:, :num_anchors, :, :] * K.binary_crossentropy(y_pred[:, :, :, :], y_true[:, num_anchors:, :, :])) / K.sum(epsilon + y_true[:, :num_anchors, :, :])

	return rpn_loss_cls_fixed_num 
Example 62
Project: FasterRCNN_KERAS   Author: akshaylamba   File: losses.py    Apache License 2.0 5 votes vote down vote up
def class_loss_regr(num_classes):
	def class_loss_regr_fixed_num(y_true, y_pred):
		x = y_true[:, :, 4*num_classes:] - y_pred
		x_abs = K.abs(x)
		x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
		return lambda_cls_regr * K.sum(y_true[:, :, :4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4*num_classes])
	return class_loss_regr_fixed_num 
Example 63
Project: ccm-aae   Author: danielegrattarola   File: geometry.py    MIT License 5 votes vote down vote up
def hyperbolic_uniform(size, dim=3, r=-1., low=-1., high=1., projection='upper'):
    """
    Samples points from a uniform distribution on a hyperbolic manifold. Uniform
    sampling on a hyperbolic CCM can be achieved by sampling from a uniform
    distribution in the ambient space of the CCM, and then projecting the
    samples onto the CCM.
    :param size: number of points to sample;
    :param dim: dimension of the ambient space;
    :param r: negative float, the radius of the CCM;
    :param low: lower bound of the uniform distribution from which to sample;
    :param high: upper bound of the uniform distribution from which to sample;
    :param projection: 'upper', 'lower', or 'both'. Whether to project points
    always on the upper or lower branch of the hyperboloid, or on both based
    on the sign of the last coordinate.
    :return: np.array of shape (size, dim).
    """
    samples = np.random.uniform(low, high, (size, dim))
    if projection == 'both':
        sign = np.sign(samples[..., -1:])
    elif projection == 'upper':
        sign = 1
    elif projection == 'lower':
        sign = -1
    else:
        raise NotImplementedError('Possible projection modes: \'both\', '
                                  '\'upper\', \'lower\'.')
    samples[..., -1:] = sign * np.sqrt((samples[..., :-1] ** 2).sum(-1, keepdims=True) + r ** 2)

    return samples 
Example 64
Project: ccm-aae   Author: danielegrattarola   File: geometry.py    MIT License 5 votes vote down vote up
def is_spherical(x, r=1.):
    """
    Boolean membership to spherical manifold.
    :param x: np.array, coordinates are assumed to be in the last axis;
    :param r: positive float, the radius of the CCM;
    :return: boolean np.array, True if the points are on the CCM.
    """
    return (x ** 2).sum(-1).astype(np.float32) == r ** 2 
Example 65
Project: ccm-aae   Author: danielegrattarola   File: geometry.py    MIT License 5 votes vote down vote up
def is_hyperbolic(x, r=-1.):
    """
    Boolean membership to hyperbolic manifold.
    :param x: np.array, coordinates are assumed to be in the last axis;
    :param r: negative float, the radius of the CCM;
    :return: boolean np.array, True if the points are on the CCM.
    """
    return ((x[..., :-1] ** 2).sum(-1) - x[..., -1] ** 2).astype(np.float32) == - r ** 2 
Example 66
Project: ccm-aae   Author: danielegrattarola   File: geometry.py    MIT License 5 votes vote down vote up
def hyperbolic_clip(x, r=-1., axis=-1):
    """
    Clips points in the ambient space to a hyperbolic CCM of radius `r`, by f
    orcing the `axis` coordinate of the points to be
    \(X_{axis} = \sqrt{\sum\limits_{i \neq {axis}} X_{i}^{2} + r^{2}}\).
    :param x: np.array, coordinates are assumed to be in the last axis;
    :param r: negative float, the radius of the CCM;
    :param axis: int, the axis along which to clip;
    :return: np.array of same shape as x.
    """
    x = x.copy()
    free_components_idxs = np.delete(np.arange(x.shape[-1]), axis)
    x[..., axis] = np.sqrt(np.sum(x[..., free_components_idxs] ** 2, -1) + (r ** 2))
    return x 
Example 67
Project: ccm-aae   Author: danielegrattarola   File: geometry.py    MIT License 5 votes vote down vote up
def call(self, inputs):
        output_part = []
        manifold_size = K.int_shape(inputs)[-1] // len(self.r)

        for idx, r_ in enumerate(self.r):
            start = idx * manifold_size
            stop = start + manifold_size
            part = inputs[..., start:stop]
            sign = np.sign(r_)
            if sign == 0.:
                # This is weird but necessary to make the layer differentiable
                output_pre = K.sum(inputs, -1, keepdims=True) * 0. + 1.
            else:
                free_components = part[..., :-1] ** 2
                bound_component = sign * part[..., -1:] ** 2
                all_components = K.concatenate((free_components, bound_component), -1)
                ext_product = K.sum(all_components, -1, keepdims=True)
                output_pre = K.exp(-(ext_product - sign * r_ ** 2) ** 2 / (2 * self.sigma ** 2))

            output_part.append(output_pre)

        if len(output_part) >= 2:
            if self.mode == 'average':
                output = Average()(output_part)
            elif self.mode == 'concat':
                output = Concatenate()(output_part)
            else:
                raise ValueError()  # Never gets here
        else:
            output = output_part[0]

        return output 
Example 68
Project: 3DGCN   Author: blackmints   File: loss.py    MIT License 5 votes vote down vote up
def std_r2(std=1):
    def r2(y_true, y_pred):
        ss_res = K.sum(K.square((y_true - y_pred) * std))
        ss_tot = K.sum(K.square((y_true - K.mean(y_true) * std)))
        return 1 - ss_res / (ss_tot + K.epsilon())

    return r2 
Example 69
Project: ismir2018-artist   Author: jongpillee   File: train.py    MIT License 5 votes vote down vote up
def hinge_loss(y_true,y_pred):
	# hinge loss
	y_pos = y_pred[:,:1]
	y_neg = y_pred[:,1:]
	loss = K.sum(K.maximum(0., args.margin - y_pos + y_neg))
	#print(loss.shape)
	return loss 
Example 70
Project: eye-in-the-sky   Author: manideep2510   File: iou.py    Apache License 2.0 5 votes vote down vote up
def iou(y_true, y_pred, smooth = 100):
    intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
    union = K.sum(y_true,-1) + K.sum(y_pred,-1) - intersection
    #sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
    iou_acc = (intersection + smooth) / (union + smooth)
    return iou_acc 
Example 71
Project: eye-in-the-sky   Author: manideep2510   File: main_unet.py    Apache License 2.0 5 votes vote down vote up
def iou(y_true, y_pred, smooth = 100):
    intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
    union = K.sum(y_true,-1) + K.sum(y_pred,-1) - intersection
    #sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
    iou_acc = (intersection + smooth) / (union + smooth)
    return iou_acc 
Example 72
Project: speech_separation   Author: bill9800   File: model_loss.py    MIT License 5 votes vote down vote up
def audio_discriminate_loss(gamma=0.1,num_speaker=2):
    def loss_func(S_true,S_pred,gamma=gamma,num_speaker=num_speaker):
        sum = 0
        for i in range(num_speaker):
            sum += K.sum(K.flatten((K.square(S_true[:,:,:,i]-S_pred[:,:,:,i]))))
            for j in range(num_speaker):
                if i != j:
                    sum -= gamma*K.sum(K.flatten((K.square(S_true[:,:,:,i]-S_pred[:,:,:,j]))))

        loss = sum / (num_speaker*298*257*2)
        return loss
    return loss_func 
Example 73
Project: isl-gaze-demo   Author: djpetti   File: metrics.py    MIT License 5 votes vote down vote up
def distance_metric(y_true, y_pred):
  """ Calculates the euclidean distance between the two labels and the
  predictions.
  Args:
    y_true: The true labels.
    y_pred: The predictions.
  Returns:
    The element-wise euclidean distance between the labels and the predictions.
  """
  diff = y_true - y_pred
  sqr = K.square(diff)
  total = K.sum(sqr, axis=1)
  return K.sqrt(total) 
Example 74
Project: isl-gaze-demo   Author: djpetti   File: train_eyes.py    MIT License 5 votes vote down vote up
def distance_metric(y_true, y_pred):
  """ Calculates the euclidean distance between the two labels and the
  predictions.
  Args:
    y_true: The true labels.
    y_pred: The predictions.
  Returns:
    The element-wise euclidean distance between the labels and the predictions.
  """
  diff = y_true - y_pred
  sqr = K.square(diff)
  total = K.sum(sqr, axis=1)
  return K.sqrt(total) 
Example 75
Project: Keras-BiGAN   Author: manicman1999   File: bigan.py    MIT License 5 votes vote down vote up
def gradient_penalty_loss(y_true, y_pred, averaged_samples, weight):
    gradients = K.gradients(y_pred, averaged_samples)[0]
    gradients_sqr = K.square(gradients)
    gradient_penalty = K.sum(gradients_sqr,
                              axis=np.arange(1, len(gradients_sqr.shape)))

    # (weight / 2) * ||grad||^2
    # Penalize the gradient norm
    return K.mean(gradient_penalty) * (weight / 2) 
Example 76
Project: deepflying   Author: dslab-deepflying   File: styleTransfer.py    GNU General Public License v3.0 5 votes vote down vote up
def content_loss(base,combination):
    return  K.sum(K.square(combination- base)) 
Example 77
Project: deepflying   Author: dslab-deepflying   File: styleTransfer.py    GNU General Public License v3.0 5 votes vote down vote up
def style_loss(style,combination):
    S = gram_matrix(style)
    C = gram_matrix(combination)
    channels = 3
    size = img_height * img_width
    return  K.sum(K.square(S-C) )/ (4.*(channels**2) *(size**2)) 
Example 78
Project: deepflying   Author: dslab-deepflying   File: styleTransfer.py    GNU General Public License v3.0 5 votes vote down vote up
def total_variation_loss(x):
    a= K.square(
        x[: , :img_height - 1,:img_width-1,:] -
        x[: ,1:              ,:img_width-1,:]
    )
    b = K.square(
        x[:,:img_height - 1, :img_width - 1,:] -
        x[:,:img_height - 1,1:             ,:]
    )
    return K.sum(K.pow(a+b,1.25)) 
Example 79
Project: deepflying   Author: dslab-deepflying   File: neural_style_transfer.py    GNU General Public License v3.0 5 votes vote down vote up
def style_loss(style, combination):
    assert K.ndim(style) == 3
    assert K.ndim(combination) == 3
    S = gram_matrix(style)
    C = gram_matrix(combination)
    channels = 3
    size = img_nrows * img_ncols
    return K.sum(K.square(S - C)) / (4.0 * (channels ** 2) * (size ** 2))

# an auxiliary loss function
# designed to maintain the "content" of the
# base image in the generated image 
Example 80
Project: deepflying   Author: dslab-deepflying   File: neural_style_transfer.py    GNU General Public License v3.0 5 votes vote down vote up
def content_loss(base, combination):
    return K.sum(K.square(combination - base))

# the 3rd loss function, total variation loss,
# designed to keep the generated image locally coherent