Python keras.backend.greater() Examples

The following are code examples for showing how to use keras.backend.greater(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: FaceLandmarks   Author: JACKYLUO1991   File: loss.py    Apache License 2.0 6 votes vote down vote up
def wing_loss(y_true, y_pred, w=10.0, epsilon=2.0):
    """
    Arguments:
        landmarks, labels: float tensors with shape [batch_size, num_landmarks, 2].
        w, epsilon: a float numbers.
    Returns:
        a float tensor with shape [].
    """
    y_true = tf.reshape(y_true, [-1, N_LANDMARK, 2])
    y_pred = tf.reshape(y_pred, [-1, N_LANDMARK, 2])

    x = y_true - y_pred
    c = w * (1.0 - math.log(1.0 + w / epsilon))
    absolute_x = tf.abs(x)
    losses = tf.where(
        tf.greater(w, absolute_x),
        w * tf.log(1.0 + absolute_x/epsilon),
        absolute_x - c
    )
    loss = tf.reduce_mean(tf.reduce_sum(losses, axis=[1, 2]), axis=0)

    return loss 
Example 2
Project: cdt-ccm-aae   Author: danielegrattarola   File: layers.py    MIT License 6 votes vote down vote up
def call(self, inputs):
        zero = K.constant(0.)

        # Spherical clip
        spherical_clip = self.radius * K.l2_normalize(inputs, -1)
        # Hyperbolic clip
        free_components = inputs[..., :-1]
        bound_component = K.sqrt(K.sum(free_components ** 2, -1)[..., None] + (self.radius ** 2))
        hyperbolic_clip = K.concatenate((free_components, bound_component), -1)

        lt_cond = K.less(self.radius, zero)
        lt_check = K.switch(lt_cond, hyperbolic_clip, inputs)

        gt_cond = K.greater(self.radius, zero)
        output = K.switch(gt_cond, spherical_clip, lt_check)

        return output 
Example 3
Project: neural-candlestick   Author: Gab0   File: neuralGenesis.py    MIT License 6 votes vote down vote up
def pricevariation(Y_true, Y_predicted):
    def D(VAL):
        return VAL[0][0][3] - VAL[0][0][0]

    tC = D(Y_true)
    pC = D(Y_predicted)

    #percentile = (tC - pC) / tC
    #percentile = K.abs(percentile)
    #print(percentile)
    TCG = K.greater(tC, 0)
    PCG = K.greater(pC, 0)

    if TCG and PCG:
        L = 100 # - percentile
    elif not TCG and not PCG:
        L = 100 # - percentile
    else:
        L = 0

    return K.variable(np.array(L, dtype='float64'),
                      dtype='float64', name='loss') 
Example 4
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 5 votes vote down vote up
def FScore2(y_true, y_pred):
    '''
    The F score, beta=2
    '''
    B2 = K.variable(4)
    OnePlusB2 = K.variable(5)
    pred = K.round(y_pred)
    tp = K.sum(K.cast(K.less(K.abs(pred - K.clip(y_true, .5, 1.)), 0.01), 'float32'), -1)
    fp = K.sum(K.cast(K.greater(pred - y_true, 0.1), 'float32'), -1)
    fn = K.sum(K.cast(K.less(pred - y_true, -0.1), 'float32'), -1)

    f2 = OnePlusB2 * tp / (OnePlusB2 * tp + B2 * fn + fp)

    return K.mean(f2) 
Example 5
Project: FaceLandmarks   Author: JACKYLUO1991   File: loss.py    Apache License 2.0 5 votes vote down vote up
def normalized_mean_error(y_true, y_pred):
    '''
    normalised mean error
    '''
    y_pred = K.reshape(y_pred, (-1, N_LANDMARK, 2))
    y_true = K.reshape(y_true, (-1, N_LANDMARK, 2))
    # Distance between pupils
    interocular_distance = K.sqrt(
        K.sum((y_true[:, 38, :] - y_true[:, 92, :]) ** 2, axis=-1))
    return K.mean(K.sum(K.sqrt(K.sum((y_pred - y_true) ** 2, axis=-1)), axis=-1)) / \
        K.mean((interocular_distance * N_LANDMARK))


# def wing_loss(y_true, y_pred, w=10.0, epsilon=2.0):
#     """
#     Reference: wing loss for robust facial landmark localisation
#     with convolutional neural networks
#     """
#     x = y_true - y_pred
#     c = w * (1.0 - math.log(1.0 + w/epsilon))
#     absolute_x = K.abs(x)
#     losses = tf.where(
#         K.greater(w, absolute_x),
#         w * K.log(1.0 + absolute_x/epsilon),
#         absolute_x - c
#     )
#     loss = K.mean(K.sum(losses, axis=-1), axis=0)

#     return loss 
Example 6
Project: FaceLandmarks   Author: JACKYLUO1991   File: loss.py    Apache License 2.0 5 votes vote down vote up
def smoothL1(y_true, y_pred):
    """
    More robust to noise
    """
    THRESHOLD = K.variable(1.0)
    mae = K.abs(y_true - y_pred)
    flag = K.greater(mae, THRESHOLD)
    loss = K.mean(K.switch(flag, (mae - 0.5), K.pow(mae, 2)), axis=-1)

    return loss 
Example 7
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelorient.py    MIT License 5 votes vote down vote up
def orientation_loss2(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    anchors = K.sum(K.square(y_true), axis=2)
    anchors = K.greater(anchors, tf.constant(0.5))
    anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    loss = 1-loss
    print(loss.shape)

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss 
Example 8
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modeltree.py    MIT License 5 votes vote down vote up
def orientation_loss(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    y_true = K.reshape(y_true*obj_mask, [-1, BIN, 2])
    y_pred = K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    anchors = K.sum(K.square(y_true), axis=2)
    anchors = K.greater(anchors, tf.constant(0.5))
    anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
   # loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    loss = tf.cond(allobj > 0, lambda: 10.0*(1 - 1 * (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss
    #K.mean(loss) 
Example 9
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelmerge.py    MIT License 5 votes vote down vote up
def orientation_loss(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    y_true = K.reshape(y_true*obj_mask, [-1, BIN, 2])
    y_pred = K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    anchors = K.sum(K.square(y_true), axis=2)
    anchors = K.greater(anchors, tf.constant(0.5))
    anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss
    #K.mean(loss) 
Example 10
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelmergetree.py    MIT License 5 votes vote down vote up
def orientation_loss2(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    anchors = K.sum(K.square(y_true), axis=2)
    anchors = K.greater(anchors, tf.constant(0.5))
    anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    loss = 1-loss
    print(loss.shape)

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss 
Example 11
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelmergetree.py    MIT License 5 votes vote down vote up
def orientation_loss2(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    anchors = K.sum(K.square(y_true), axis=2)
    anchors = K.greater(anchors, tf.constant(0.5))
    anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    loss = 1-loss
    print(loss.shape)

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    #loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)
    loss = K.switch(allobj > 0, losssum/allobj, 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss 
Example 12
Project: icassp19   Author: edufonseca   File: losses.py    MIT License 5 votes vote down vote up
def crossentropy_max_wrap(_m):
    def crossentropy_max_core(y_true, y_pred):
        """
        This function is based on the one proposed in
        Il-Young Jeong and Hyungui Lim, "AUDIO TAGGING SYSTEM FOR DCASE 2018: FOCUSING ON LABEL NOISE,
         DATA AUGMENTATION AND ITS EFFICIENT LEARNING", Tech Report, DCASE 2018
        https://github.com/finejuly/dcase2018_task2_cochlearai

        :param y_true:
        :param y_pred:
        :return:
        """

        # hyper param
        print(_m)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # compute loss for every data point
        _loss = -K.sum(y_true * K.log(y_pred), axis=-1)

        # threshold
        t_m = K.max(_loss) * _m
        _mask_m = 1 - (K.cast(K.greater(_loss, t_m), 'float32'))
        _loss = _loss * _mask_m

        return _loss
    return crossentropy_max_core 
Example 13
Project: icassp19   Author: edufonseca   File: losses.py    MIT License 5 votes vote down vote up
def crossentropy_outlier_wrap(_l):
    def crossentropy_outlier_core(y_true, y_pred):

        # hyper param
        print(_l)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # compute loss for every data point
        _loss = -K.sum(y_true * K.log(y_pred), axis=-1)

        def _get_real_median(_v):
            """
            given a tensor with shape (batch_size,), compute and return the median

            :param v:
            :return:
            """
            _val = tf.nn.top_k(_v, 33).values
            return 0.5 * (_val[-1] + _val[-2])

        _mean_loss, _var_loss = tf.nn.moments(_loss, axes=[0])
        _median_loss = _get_real_median(_loss)
        _std_loss = tf.sqrt(_var_loss)

        # threshold
        t_l = _median_loss + _l*_std_loss
        _mask_l = 1 - (K.cast(K.greater(_loss, t_l), 'float32'))
        _loss = _loss * _mask_l

        return _loss
    return crossentropy_outlier_core



#########################################################################
# from here on we distinguish data points in the batch, based on its origin
# we only apply robustness measures to the data points coming from the noisy subset
# Therefore, the next functions are used only when training with the entire train set
######################################################################### 
Example 14
Project: icassp19   Author: edufonseca   File: losses.py    MIT License 5 votes vote down vote up
def crossentropy_max_origin_wrap(_m):
    def crossentropy_max_origin_core(y_true, y_pred):

        # hyper param
        print(_m)

        # 1) determine the origin of the patch, as a boolean vector y_true_flag
        # (True = patch from noisy subset)
        _y_true_flag = K.greater(K.sum(y_true, axis=-1), 90)

        # 2) convert the input y_true (with flags inside) into a valid y_true one-hot-vector format
        # attenuating factor for data points that need it (those that came with a one-hot of 100)
        _mask_reduce = K.cast(_y_true_flag, 'float32') * 0.01

        # identity factor for standard one-hot vectors
        _mask_keep = K.cast(K.equal(_y_true_flag, False), 'float32')

        # combine 2 masks
        _mask = _mask_reduce + _mask_keep

        _y_true_shape = K.shape(y_true)
        _mask = K.reshape(_mask, (_y_true_shape[0], 1))

        # applying mask to have a valid y_true that we can use as always
        y_true = y_true * _mask

        y_true = K.clip(y_true, K.epsilon(), 1)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # compute loss for every data point
        _loss = -K.sum(y_true * K.log(y_pred), axis=-1)

        # threshold m
        t_m = K.max(_loss) * _m

        _mask_m = 1 - (K.cast(K.greater(_loss, t_m), 'float32') * K.cast(_y_true_flag, 'float32'))
        _loss = _loss * _mask_m

        return _loss
    return crossentropy_max_origin_core 
Example 15
Project: onnx-keras   Author: leodestiny   File: backend.py    MIT License 5 votes vote down vote up
def handle_greater(cls, node, input_dict):
        return [cls._bin_op(node, input_dict, Lambda(lambda x, y: K.greater(x, y)), inputlist=False)] 
Example 16
Project: 2D-Vnet-Keras   Author: FENGShuanglang   File: model.py    GNU General Public License v3.0 5 votes vote down vote up
def dice_coef(y_true, y_pred, smooth, thresh):
    #y_pred =K.cast((K.greater(y_pred,thresh)), dtype='float32')#转换为float型
    #y_pred = y_pred[y_pred > thresh]=1.0
    y_true_f =y_true# K.flatten(y_true)
    y_pred_f =y_pred# K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f,axis=(0,1,2))
    denom =K.sum(y_true_f,axis=(0,1,2)) + K.sum(y_pred_f,axis=(0,1,2))
    return K.mean((2. * intersection + smooth) /(denom + smooth)) 
Example 17
Project: mfom_dcase16_task4   Author: Vanova   File: mfom.py    MIT License 5 votes vote down vote up
def _non_zero_mean(self, x):
        # All values will meet the criterion > 0
        mask = K.greater(K.abs(x), 0)
        n = K.sum(K.cast(mask, 'float32'), axis=1, keepdims=True)
        return K.sum(x, axis=-1, keepdims=True) / n 
Example 18
Project: mfom_dcase16_task4   Author: Vanova   File: objectives.py    MIT License 5 votes vote down vote up
def _non_zero_mean(x):
    # All values will meet the criterion > 0
    mask = K.greater(K.abs(x), 0)
    n = K.sum(K.cast(mask, 'float32'), axis=1, keepdims=True)
    return K.sum(x, axis=-1, keepdims=True) / n 
Example 19
Project: cyclegan_keras   Author: alecGraves   File: losses.py    The Unlicense 5 votes vote down vote up
def discriminator_loss(y_true, y_pred):
    loss = mean_squared_error(y_true, y_pred)
    is_large = k.greater(loss, k.constant(_disc_train_thresh)) # threshold
    is_large = k.cast(is_large, k.floatx())
    return loss * is_large # binary threshold the loss to prevent overtraining the discriminator 
Example 20
Project: onto-lstm   Author: pdasigi   File: preposition_predictors.py    Apache License 2.0 5 votes vote down vote up
def get_split_averages(input_tensor, input_mask, indices):
        # Splits input tensor into three parts based on the indices and
        # returns average of values prior to index, values at the index and
        # average of values after the index.
        # input_tensor: (batch_size, input_length, input_dim)
        # input_mask: (batch_size, input_length)
        # indices: (batch_size, 1)
        # (1, input_length)
        length_range = K.expand_dims(K.arange(K.shape(input_tensor)[1]), dim=0)
        # (batch_size, input_length)
        batched_range = K.repeat_elements(length_range, K.shape(input_tensor)[0], 0)
        tiled_indices = K.repeat_elements(indices, K.shape(input_tensor)[1], 1)  # (batch_size, input_length)
        greater_mask = K.greater(batched_range, tiled_indices)  # (batch_size, input_length)
        lesser_mask = K.lesser(batched_range, tiled_indices)  # (batch_size, input_length)
        equal_mask = K.equal(batched_range, tiled_indices)  # (batch_size, input_length)

        # We also need to mask these masks using the input mask.
        # (batch_size, input_length)
        if input_mask is not None:
            greater_mask = switch(input_mask, greater_mask, K.zeros_like(greater_mask))
            lesser_mask = switch(input_mask, lesser_mask, K.zeros_like(lesser_mask))

        post_sum = K.sum(switch(K.expand_dims(greater_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)
        pre_sum = K.sum(switch(K.expand_dims(lesser_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)
        values_at_indices = K.sum(switch(K.expand_dims(equal_mask), input_tensor, K.zeros_like(input_tensor)), axis=1)  # (batch_size, input_dim)

        post_normalizer = K.expand_dims(K.sum(greater_mask, axis=1) + K.epsilon(), dim=1)  # (batch_size, 1)
        pre_normalizer = K.expand_dims(K.sum(lesser_mask, axis=1) + K.epsilon(), dim=1)  # (batch_size, 1)

        return K.cast(pre_sum / pre_normalizer, 'float32'), values_at_indices, K.cast(post_sum / post_normalizer, 'float32') 
Example 21
Project: DeepPavlov   Author: deepmipt   File: bilstm_siamese_network.py    Apache License 2.0 5 votes vote down vote up
def _batch_all_triplet_loss(self, y_true: Tensor, pairwise_dist: Tensor) -> Tensor:
        anchor_positive_dist = K.expand_dims(pairwise_dist, 2)
        anchor_negative_dist = K.expand_dims(pairwise_dist, 1)
        triplet_loss = anchor_positive_dist - anchor_negative_dist + self.margin
        mask = self._get_triplet_mask(y_true, pairwise_dist)
        triplet_loss = mask * triplet_loss
        triplet_loss = K.clip(triplet_loss, 0.0, None)
        valid_triplets = K.cast(K.greater(triplet_loss, 1e-16), K.dtype(triplet_loss))
        num_positive_triplets = K.sum(valid_triplets)
        triplet_loss = K.sum(triplet_loss) / (num_positive_triplets + 1e-16)
        return triplet_loss 
Example 22
Project: DeepPavlov   Author: deepmipt   File: bilstm_siamese_network.py    Apache License 2.0 5 votes vote down vote up
def _get_semihard_anchor_negative_triplet_mask(self, negative_dist: Tensor,
                                                   hardest_positive_dist: Tensor,
                                                   mask_negative: Tensor) -> Tensor:
        # mask max(dist(a,p)) < dist(a,n)
        mask = K.greater(negative_dist, hardest_positive_dist)
        mask = K.cast(mask, K.dtype(negative_dist))
        mask_semihard = K.cast(K.expand_dims(K.greater(K.sum(mask, 1), 0.0), 1), K.dtype(negative_dist))
        mask = mask_negative * (1 - mask_semihard) + mask * mask_semihard
        return mask 
Example 23
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelorient.py    MIT License 4 votes vote down vote up
def orientation_loss(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    y_true = K.reshape(y_true*obj_mask, [-1, BIN, 2])
    y_pred = y_pred*obj_mask
    y_pred = K.l2_normalize(K.reshape(y_pred, [-1, BIN, 2]), 2)
    obj_mask = K.reshape(obj_mask, [-1, 1])
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    #anchors = K.sum(K.square(y_true), axis=2)
    #anchors = K.greater(anchors, tf.constant(0.5))
    #anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    loss = 1-loss
    loss = K.reshape(loss, [-1, 2])
    loss = loss*obj_mask

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss
    #K.mean(loss) 
Example 24
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelorient.py    MIT License 4 votes vote down vote up
def orientation_loss3(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    y_true = K.reshape(y_true*obj_mask, [-1, BIN, 2])
    y_pred = y_pred*obj_mask
    y_pred = K.l2_normalize(K.reshape(y_pred, [-1, BIN, 2]), 2)
    obj_mask = K.reshape(obj_mask, [-1, 1])
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    #anchors = K.sum(K.square(y_true), axis=2)
    #anchors = K.greater(anchors, tf.constant(0.5))
    #anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    #loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    #loss = 1-loss
    cosd = K.square(y_true[:,:,0] - y_pred[:,:,0])
    sind = K.square(y_true[:,:,1] - y_pred[:,:,1])
    loss = cosd+sind
    #loss = K.reshape(loss, [-1, 2])
    loss = loss*obj_mask

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss
    #K.mean(loss) 
Example 25
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelmergetree.py    MIT License 4 votes vote down vote up
def orientation_loss3(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    y_true = K.reshape(y_true*obj_mask, [-1, BIN, 2])
    y_pred = y_pred*obj_mask
    y_pred = K.l2_normalize(K.reshape(y_pred, [-1, BIN, 2]), 2)
    obj_mask = K.reshape(obj_mask, [-1, 1])
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    #anchors = K.sum(K.square(y_true), axis=2)
    #anchors = K.greater(anchors, tf.constant(0.5))
    #anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    #loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    #loss = 1-loss
    cosd = K.square(y_true[:,:,0] - y_pred[:,:,0])
    sind = K.square(y_true[:,:,1] - y_pred[:,:,1])
    loss = cosd+sind
    #loss = K.reshape(loss, [-1, 2])
    loss = loss*obj_mask

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss
    #K.mean(loss) 
Example 26
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelmergetree.py    MIT License 4 votes vote down vote up
def orientation_loss(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    y_true = K.reshape(y_true*obj_mask, [-1, BIN, 2])
    y_pred = y_pred*obj_mask
    y_pred = K.l2_normalize(K.reshape(y_pred, [-1, BIN, 2]), 2)
    obj_mask = K.reshape(obj_mask, [-1, 1])
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    anchors = K.sum(K.square(y_true), axis=2)
    anchors = K.greater(anchors, 0.5) #tf.constant(0.5))
    anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    loss = 1-loss
    loss = K.reshape(loss, [-1, 2])
    loss = loss*obj_mask
    print(loss.shape)

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)
    #loss = K.switch(allobj > 0, losssum/allobj, 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss
    #K.mean(loss) 
Example 27
Project: icassp19   Author: edufonseca   File: losses.py    MIT License 4 votes vote down vote up
def crossentropy_reed_origin_wrap(_beta):
    def crossentropy_reed_origin_core(y_true, y_pred):
        # hyper param
        print(_beta)

        # 1) determine the origin of the patch, as a boolean vector in y_true_flag
        # (True = patch from noisy subset)
        _y_true_flag = K.greater(K.sum(y_true, axis=-1), 90)

        # 2) convert the input y_true (with flags inside) into a valid y_true one-hot-vector format
        # attenuating factor for data points that need it (those that came with a one-hot of 100)
        _mask_reduce = K.cast(_y_true_flag, 'float32') * 0.01

        # identity factor for standard one-hot vectors
        _mask_keep = K.cast(K.equal(_y_true_flag, False), 'float32')

        # combine 2 masks
        _mask = _mask_reduce + _mask_keep

        _y_true_shape = K.shape(y_true)
        _mask = K.reshape(_mask, (_y_true_shape[0], 1))

        # applying mask to have a valid y_true that we can use as always
        y_true = y_true * _mask

        y_true = K.clip(y_true, K.epsilon(), 1)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # (1) dynamically update the targets based on the current state of the model: bootstrapped target tensor
        # use predicted class proba directly to generate regression targets
        y_true_bootstrapped = _beta * y_true + (1 - _beta) * y_pred

        # at this point we have 2 versions of y_true
        # decide which target label to use for each datapoint
        _mask_noisy = K.cast(_y_true_flag, 'float32')                   # only allows patches from noisy set
        _mask_clean = K.cast(K.equal(_y_true_flag, False), 'float32')   # only allows patches from clean set
        _mask_noisy = K.reshape(_mask_noisy, (_y_true_shape[0], 1))
        _mask_clean = K.reshape(_mask_clean, (_y_true_shape[0], 1))

        # points coming from clean set use the standard true one-hot vector. dim is (batch_size, 1)
        # points coming from noisy set use the Reed bootstrapped target tensor
        y_true_final = y_true * _mask_clean + y_true_bootstrapped * _mask_noisy

        # (2) compute loss as always
        _loss = -K.sum(y_true_final * K.log(y_pred), axis=-1)

        return _loss
    return crossentropy_reed_origin_core 
Example 28
Project: icassp19   Author: edufonseca   File: losses.py    MIT License 4 votes vote down vote up
def lq_loss_origin_wrap(_q):
    def lq_loss_origin_core(y_true, y_pred):

        # hyper param
        print(_q)

        # 1) determine the origin of the patch, as a boolean vector in y_true_flag
        # (True = patch from noisy subset)
        _y_true_flag = K.greater(K.sum(y_true, axis=-1), 90)

        # 2) convert the input y_true (with flags inside) into a valid y_true one-hot-vector format
        # attenuating factor for data points that need it (those that came with a one-hot of 100)
        _mask_reduce = K.cast(_y_true_flag, 'float32') * 0.01

        # identity factor for standard one-hot vectors
        _mask_keep = K.cast(K.equal(_y_true_flag, False), 'float32')

        # combine 2 masks
        _mask = _mask_reduce + _mask_keep

        _y_true_shape = K.shape(y_true)
        _mask = K.reshape(_mask, (_y_true_shape[0], 1))

        # applying mask to have a valid y_true that we can use as always
        y_true = y_true * _mask

        y_true = K.clip(y_true, K.epsilon(), 1)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # compute two types of losses, for all the data points
        # (1) compute CCE loss for every data point
        _loss_CCE = -K.sum(y_true * K.log(y_pred), axis=-1)

        # (2) compute lq_loss for every data point
        _tmp = y_pred * y_true
        _loss_tmp = K.max(_tmp, axis=-1)
        # compute the Lq loss between the one-hot encoded label and the predictions
        _loss_q = (1 - (_loss_tmp + 10 ** (-8)) ** _q) / _q

        # decide which loss to take for each datapoint
        _mask_noisy = K.cast(_y_true_flag, 'float32')                   # only allows patches from noisy set
        _mask_clean = K.cast(K.equal(_y_true_flag, False), 'float32')   # only allows patches from clean set

        # points coming from clean set contribute with CCE loss
        # points coming from noisy set contribute with lq_loss
        _loss_final = _loss_CCE * _mask_clean + _loss_q * _mask_noisy

        return _loss_final
    return lq_loss_origin_core