Python keras.backend.categorical_crossentropy() Examples

The following are code examples for showing how to use keras.backend.categorical_crossentropy(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Scene-Understanding   Author: foamliu   File: utils.py    MIT License 7 votes vote down vote up
def categorical_crossentropy_with_class_rebal(y_true, y_pred):
    y_true = K.reshape(y_true, (-1, num_classes))
    y_pred = K.reshape(y_pred, (-1, num_classes))

    idx_max = K.argmax(y_true, axis=1)
    weights = K.gather(factor, idx_max)
    weights = K.reshape(weights, (-1, 1))

    # multiply y_true by weights
    y_true = y_true * weights

    cross_ent = K.categorical_crossentropy(y_pred, y_true)
    cross_ent = K.mean(cross_ent, axis=-1)

    return cross_ent


# getting the number of GPUs 
Example 2
Project: cloudFCN   Author: aliFrancis   File: weighted_loss.py    Apache License 2.0 7 votes vote down vote up
def w_categorical_crossentropy(y_true, y_pred, weights):
    """
    Keras-style categorical crossentropy loss function, with weighting for each class.

    Parameters
    ----------
    y_true : Tensor
        Truth labels.
    y_pred : Tensor
        Predicted values.
    weights: Tensor
        Multiplicative factor for loss per class.

    Returns
    -------
    loss : Tensor
        Weighted crossentropy loss between labels and predictions.

    """
    y_true_max = K.argmax(y_true, axis=-1)
    weighted_true = K.gather(weights, y_true_max)
    loss = K.categorical_crossentropy(y_pred, y_true) * weighted_true
    return loss 
Example 3
Project: Colorful-Image-Colorization   Author: foamliu   File: utils.py    MIT License 6 votes vote down vote up
def categorical_crossentropy_color(y_true, y_pred):
    q = 313
    y_true = K.reshape(y_true, (-1, q))
    y_pred = K.reshape(y_pred, (-1, q))

    idx_max = K.argmax(y_true, axis=1)
    weights = K.gather(prior_factor, idx_max)
    weights = K.reshape(weights, (-1, 1))

    # multiply y_true by weights
    y_true = y_true * weights

    cross_ent = K.categorical_crossentropy(y_pred, y_true)
    cross_ent = K.mean(cross_ent, axis=-1)

    return cross_ent


# getting the number of GPUs 
Example 4
Project: blackbox-attacks   Author: sunblaze-ucb   File: attack_utils.py    MIT License 6 votes vote down vote up
def gen_adv_loss(logits, y, loss='logloss', mean=False):
    """
    Generate the loss function.
    """

    if loss == 'training':
        # use the model's output instead of the true labels to avoid
        # label leaking at training time
        y = K.cast(K.equal(logits, K.max(logits, 1, keepdims=True)), "float32")
        y = y / K.sum(y, 1, keepdims=True)
        out = K.categorical_crossentropy(y, logits, from_logits=True)
    elif loss == 'logloss':
        out = K.categorical_crossentropy(y, logits, from_logits=True)
    else:
        raise ValueError("Unknown loss: {}".format(loss))

    if mean:
        out = K.mean(out)
    # else:
    #     out = K.sum(out)
    return out 
Example 5
Project: blackbox-attacks   Author: sunblaze-ucb   File: attack_utils.py    MIT License 6 votes vote down vote up
def gen_adv_loss(logits, y, loss='logloss', mean=False):
    """
    Generate the loss function.
    """

    if loss == 'training':
        # use the model's output instead of the true labels to avoid
        # label leaking at training time
        y = K.cast(K.equal(logits, K.max(logits, 1, keepdims=True)), "float32")
        y = y / K.sum(y, 1, keepdims=True)
        out = K.categorical_crossentropy(logits, y, from_logits=True)
    elif loss == 'logloss':
        # out = K.categorical_crossentropy(logits, y, from_logits=True)
        out = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)
        out = tf.reduce_mean(out)
    else:
        raise ValueError("Unknown loss: {}".format(loss))

    if mean:
        out = tf.mean(out)
    # else:
    #     out = K.sum(out)
    return out 
Example 6
Project: GlyphsScripts   Author: simoncozens   File: Autokern.py    MIT License 6 votes vote down vote up
def w_categorical_crossentropy(self, y_true, y_pred):
    nb_cl = len(self.weights)
    final_mask = K.zeros_like(y_pred[..., 0])
    y_pred_max = K.max(y_pred, axis=-1)
    y_pred_max = K.expand_dims(y_pred_max, axis=-1)
    y_pred_max_mat = K.equal(y_pred, y_pred_max)
    for c_p, c_t in itertools.product(range(nb_cl), range(nb_cl)):
        w = K.cast(self.weights[c_t, c_p], K.floatx())
        y_p = K.cast(y_pred_max_mat[..., c_p], K.floatx())
        y_t = K.cast(y_true[..., c_t], K.floatx())
        final_mask += w * y_p * y_t
    return K.categorical_crossentropy(y_pred, y_true) * final_mask 
Example 7
Project: MCF-3D-CNN   Author: xyj77   File: liver_model.py    MIT License 6 votes vote down vote up
def build_3dcnn_model(self, fusion_type, Fusion):
        if len(Fusion[0]) == 1: 
            input_shape = (32, 32, len(Fusion))
            model_in,model = self.cnn_2D(input_shape) 
        else:
            input_shape = (32, 32, 5, len(Fusion))
            model_in,model = self.cnn_3D(input_shape) 
        model = Dropout(0.5)(model)
        model = Dense(32, activation='relu', name = 'fc2')(model)
        model = Dense(self.config.classes, activation='softmax', name = 'fc3')(model) 
        model = Model(input=model_in,output=model)
        # 统计参数
        # model.summary()
        plot_model(model,to_file='experiments/img/' + str(Fusion) + fusion_type + r'_model.png',show_shapes=True)
        print('    Saving model  Architecture')
        
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        # model.compile(optimizer=adam, loss=self.mycrossentropy, metrics=['accuracy']) #有改善,但不稳定
        model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) 
        
        return model 
Example 8
Project: isic2018-skin-lesion-classifier-tensorflow   Author: abhishekrana   File: w_categorical_crossentropy.py    Apache License 2.0 6 votes vote down vote up
def w_categorical_crossentropy(y_true, y_pred, weights):
    print(y_true)
    nb_cl = len(weights)
    final_mask = K.zeros_like(y_pred[:, 0])
    y_pred_max = K.max(y_pred, axis=1)
    y_pred_max = K.reshape(y_pred_max, (K.shape(y_pred)[0], 1))
    y_pred_max_mat = K.equal(y_pred, y_pred_max)
    for c_p, c_t in product(range(nb_cl), range(nb_cl)):
        final_mask += (weights[c_t, c_p] * y_pred_max_mat[:, c_p] * y_true[:, c_t])
    return K.categorical_crossentropy(y_pred, y_true) * final_mask 
Example 9
Project: iust_deep_fuzz   Author: m-zakeri   File: learn_and_fuzz_2.py    MIT License 6 votes vote down vote up
def load_model_and_generate(self, model_name='model7_laf', epochs=10):
        dt = datetime.datetime.now().strftime('_date_%Y-%m-%d_%H-%M-%S')
        dir_name = './generated_results/pdfs/' + model_name + dt + 'epochs_' + str(epochs) + '/'
        if not os.path.exists(dir_name):
            os.makedirs(dir_name)

        model = load_model('./model_checkpoint/best_models/'
                           'model7_laf_date_2018-06-19_12-23-39_epoch_30_val_loss_0.8395.h5',
                           compile=False)
        optimizer = Adam(lr=0.0001)  # Reduce from 0.001 to 0.0001 for model_10
        model.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      # metrics=['accuracy']
                      metrics=['accuracy'])

        seq = self.generate_and_fuzz_new_samples(model=model,
                                      model_name=model_name,
                                      epochs=epochs,
                                      current_epoch=10,
                                      dir_name=dir_name)

        list_of_obj = preprocess.get_list_of_object(seq=seq, is_sort=False)
        return list_of_obj 
Example 10
Project: iust_deep_fuzz   Author: m-zakeri   File: metadata_neural_fuzz_pdf_obj.py    MIT License 6 votes vote down vote up
def load_model_and_generate(self, model_name='model_7', epochs=38):
        dt = datetime.datetime.now().strftime('_date_%Y-%m-%d_%H-%M-%S')
        dir_name = './generated_results/pdfs/' + model_name + dt + 'epochs_' + str(epochs) + '/'
        if not os.path.exists(dir_name):
            os.makedirs(dir_name)

        model = load_model('./model_checkpoint/best_models/'
                           'model_7_date_2018-05-14_21-44-21_epoch_38_val_loss_0.3300.h5',
                           compile=False)
        optimizer = Adam(lr=0.001)  # Reduce from 0.001 to 0.0001 just for model_10

        model.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      # metrics=['accuracy']
                      metrics=['accuracy'])

        seq = self.generate_and_fuzz_new_samples(model=model,
                                      model_name=model_name,
                                      epochs=epochs,
                                      current_epoch=38,
                                      dir_name=dir_name)

        list_of_obj = preprocess.get_list_of_object(seq=seq, is_sort=False)
        return list_of_obj 
Example 11
Project: iust_deep_fuzz   Author: m-zakeri   File: learn_and_fuzz_3_sample_fuzz.py    MIT License 6 votes vote down vote up
def load_model_and_generate(self, model_name='model7_laf', epochs=50):
        dt = datetime.datetime.now().strftime('_date_%Y-%m-%d_%H-%M-%S')
        dir_name = './generated_results/pdfs/' + model_name + dt + 'epochs_' + str(epochs) + '/'
        if not os.path.exists(dir_name):
            os.makedirs(dir_name)

        model = load_model('./model_checkpoint/best_models/'
                           'model7_laf_date_2018-06-19_12-23-39_epoch_50_val_loss_0.7242.h5',
                           compile=False)
        optimizer = Adam(lr=0.0001)  # Reduce from 0.001 to 0.0001 for model_10
        model.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      # metrics=['accuracy']
                      metrics=['accuracy'])

        seq = self.generate_and_fuzz_new_samples(model=model,
                                      model_name=model_name,
                                      epochs=epochs,
                                      current_epoch=50,
                                      dir_name=dir_name)

        list_of_obj = preprocess.get_list_of_object(seq=seq, is_sort=False)
        return list_of_obj 
Example 12
Project: tying-wv-and-wc   Author: icoxfog417   File: augmented_model.py    MIT License 6 votes vote down vote up
def augmented_loss(self, y_true, y_pred):
        _y_pred = Activation("softmax")(y_pred)
        loss = K.categorical_crossentropy(_y_pred, y_true)

        # y is (batch x seq x vocab)
        y_indexes = K.argmax(y_true, axis=2)  # turn one hot to index. (batch x seq)
        y_vectors = self.embedding(y_indexes)  # lookup the vector (batch x seq x vector_length)

        #v_length = self.setting.vector_length
        #y_vectors = K.reshape(y_vectors, (-1, v_length))
        #y_t = K.map_fn(lambda v: K.dot(self.embedding.embeddings, K.reshape(v, (-1, 1))), y_vectors)
        #y_t = K.squeeze(y_t, axis=2)  # unknown but necessary operation
        #y_t = K.reshape(y_t, (-1, self.sequence_size, self.vocab_size))

        # vector x embedding dot products (batch x seq x vocab)
        y_t = tf.tensordot(y_vectors, K.transpose(self.embedding.embeddings), 1)
        y_t = K.reshape(y_t, (-1, self.sequence_size, self.vocab_size))  # explicitly set shape
        y_t = K.softmax(y_t / self.temperature)
        _y_pred_t = Activation("softmax")(y_pred / self.temperature)
        aug_loss = kullback_leibler_divergence(y_t, _y_pred_t)
        loss += (self.gamma * self.temperature) * aug_loss
        return loss 
Example 13
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 6 votes vote down vote up
def constrained_loss(mat_list, weights=1.0, loss_type='orthogonal'):
    
    def loss(y_true, y_pred):
        constraint = 0

        for i in range(len(mat_list)):
            if loss_type == 'orthogonal':
                constraint0 = (K.dot(K.transpose(mat_list[i]), mat_list[i] ) - 
                                    K.eye(mat_list[i].get_shape().as_list()[1]) )**2 
                

            elif loss_type == 'softbinary':
                constraint0 = K.mean( K.abs(mat_list[i]**2 - 1.0))

            constraint += K.sum(constraint0)

        return weights*(constraint) + K.categorical_crossentropy(y_true, y_pred)

    return loss 
Example 14
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari_multi.py    MIT License 5 votes vote down vote up
def categoricalCrossentropyWithWeights(ytrueWithWeights, ypred):
    '''Like regular categorical cross entropy, but with sample weights for every row.
    ytrueWithWeights is a matrix where the first columns are one hot encoder for the
    classes, while the last column contains the sample weights.
    '''
    return K.categorical_crossentropy(ypred, ytrueWithWeights[:, :-1]) * ytrueWithWeights[:, -1] 
Example 15
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari_multi.py    MIT License 5 votes vote down vote up
def entropyLoss(ypred):
    '''Entropy loss.
    Loss = - sum(pred * log(pred))
    '''
    return K.categorical_crossentropy(ypred, ypred) 
Example 16
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari.py    MIT License 5 votes vote down vote up
def setupModel(self):
        '''Setup models:
        self.actionModel is the action predictions.
        self.valueModel is the prediction of the value function.
        self.model is the model with both outputs
        '''
        if self.resume:
            self.model = load_model(self.modelFileName)
            # Need the other models as well...
            return
        inputShape = (self.D, self.D, self.nbImgInState)
        model = self.deepMindAtariNet(self.nbClasses, inputShape, includeTop=False)
        inp = Input(shape=inputShape)
        x = model(inp)
        x = Flatten()(x)
        x = Dense(512, activation='relu', name='dense1')(x)

        action = Dense(self.nbClasses, activation='softmax', name='action')(x)
        self.actionModel = Model(inp, action)
        # Should we compile model?

        value = Dense(1, activation='linear', name='value')(x)
        self.valueModel = Model(inp, value)
        # Should we compile model?

        self.model = Model(inp, [action, value])
        # loss = {'action': 'categorical_crossentropy', 'value': 'mse'}
        # loss = {'action': categoricalCrossentropyWithWeights, 'value': 'mse'}
        actionAndEntropyLoss = makeActionAndEntropyLossA3C(self.entropyBeta)
        loss = {'action': actionAndEntropyLoss, 'value': 'mse'}
        loss_weights = {'action': 1, 'value': self.mseBeta}
        optim = RMSprop(self.learningRate, self.decayRate)
        self.model.compile(optim, loss) # Need to make it possible to set other optimizers 
Example 17
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari.py    MIT License 5 votes vote down vote up
def categoricalCrossentropyWithWeights(ytrueWithWeights, ypred):
    '''Like regular categorical cross entropy, but with sample weights for every row.
    ytrueWithWeights is a matrix where the first columns are one hot encoder for the
    classes, while the last column contains the sample weights.
    '''
    return K.categorical_crossentropy(ypred, ytrueWithWeights[:, :-1]) * ytrueWithWeights[:, -1] 
Example 18
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari.py    MIT License 5 votes vote down vote up
def entropyLoss(ypred):
    '''Entropy loss.
    Loss = - sum(pred * log(pred))
    '''
    return K.categorical_crossentropy(ypred, ypred) 
Example 19
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_a2c.py    MIT License 5 votes vote down vote up
def entropyLoss(ypred):
    '''Entropy loss.
    Loss = - sum(pred * log(pred))
    '''
    return K.categorical_crossentropy(ypred, ypred) 
Example 20
Project: blackbox-attacks   Author: sunblaze-ucb   File: attack_utils.py    MIT License 5 votes vote down vote up
def gen_grad_ens(x, logits, y):

    adv_loss = K.categorical_crossentropy(logits[0], y, from_logits=True)
    if len(logits) >= 1:
        for i in range(1, len(logits)):
            adv_loss += K.categorical_crossentropy(logits[i], y, from_logits=True)

    grad = K.gradients(adv_loss, [x])[0]
    return adv_loss, grad 
Example 21
Project: trVAE   Author: theislab   File: _losses.py    MIT License 5 votes vote down vote up
def categrical_crossentropy(gamma):
    def cce_loss(real_classes, pred_classes):
        return gamma * K.categorical_crossentropy(real_classes, pred_classes)

    return cce_loss 
Example 22
Project: zlyang-seq2seq-gec   Author: young-zonglin   File: basic_model.py    MIT License 5 votes vote down vote up
def _masked_loss(self, target, preds):
        y_mask = GetPadMask(self.batch_size)(target)
        cross_entropy = K.categorical_crossentropy(target, preds)
        assert K.ndim(cross_entropy) == 2
        loss = K.sum(cross_entropy * y_mask, axis=1, keepdims=True) / K.sum(y_mask, axis=1, keepdims=True)
        return K.reshape(loss, [self.batch_size, -1])

    # Get done => masked acc 
Example 23
Project: MCF-3D-CNN   Author: xyj77   File: liver_model.py    MIT License 5 votes vote down vote up
def build_fusion_model(self, fusion_type, Fusion):
        model_list = []
        input_list = []
        for modual in Fusion:
            if len(modual) == 1: 
                input_shape = (32, 32, 1)
                signle_input,single_model = self.cnn_2D(input_shape, modual) 
            else:
                input_shape = (32, 32, 5, 1)
                signle_input,single_model = self.cnn_3D(input_shape, modual) 
                  
            model_list.append(single_model)
            input_list.append(signle_input)
        # 融合模型
        model = self.nn_fusion(input_list,model_list, self.config.classes, fusion_type)
        # 统计参数
        model.summary()
        plot_model(model,to_file='experiments/img/' + str(Fusion) + fusion_type + r'_model.png',show_shapes=True)
        print('    Saving model  Architecture')
        # raw_input()
        
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
        # model.compile(optimizer=adam, loss=self.mycrossentropy, metrics=['accuracy']) #有改善,但不稳定
        model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) 
        
        return model 
Example 24
Project: MCF-3D-CNN   Author: xyj77   File: liver_model.py    MIT License 5 votes vote down vote up
def mycrossentropy(self, y_true, y_pred):
        e = 0.3
        # for i in range(y_true.shape[0]):
            # for j in range(3):
                # sum += 0.1*(-1**y_true(i,j))*exp(abs(np.argmax(y_true[i,:])-j))*log(y_pred(i,j))
        # return sum/len

        # y = np.argmax(y_true, axis=1)
        # y_ = np.argmax(y_pred, axis=1)
        # print '*****************',y_pred
                
        # return (1-e)*K.categorical_crossentropy(y_pred,y_true) - e*K.categorical_crossentropy(y_pred, (1-y_true)/(self.config.classes-1)) 
        return (1-e)*K.categorical_crossentropy(y_pred,y_true) + e*K.categorical_crossentropy(y_pred, K.ones_like(y_pred)/2) 
Example 25
Project: deep-spell-checkr   Author: vuptran   File: model.py    MIT License 5 votes vote down vote up
def truncated_loss(y_true, y_pred):
    y_true = y_true[:, :VAL_MAXLEN, :]
    y_pred = y_pred[:, :VAL_MAXLEN, :]
    
    loss = K.categorical_crossentropy(
        target=y_true, output=y_pred, from_logits=False)
    return K.mean(loss, axis=-1) 
Example 26
Project: Voiceprint-Recognition   Author: SunYanCN   File: run.py    Apache License 2.0 5 votes vote down vote up
def amsoftmax_loss(y_true, y_pred, scale=10, margin=10.0):
    y_pred = y_true * (y_pred - margin) + (1 - y_true) * y_pred
    y_pred *= scale
    return K.categorical_crossentropy(y_true, y_pred, from_logits=True) 
Example 27
Project: Voiceprint-Recognition   Author: SunYanCN   File: kws.py    Apache License 2.0 5 votes vote down vote up
def amsoftmax_loss(y_true, y_pred, scale=10, margin=10.0):
    y_pred = y_true * (y_pred - margin) + (1 - y_true) * y_pred
    y_pred *= scale
    return K.categorical_crossentropy(y_true, y_pred, from_logits=True) 
Example 28
Project: Voiceprint-Recognition   Author: SunYanCN   File: kws.py    Apache License 2.0 5 votes vote down vote up
def train_cnn(selected_lable):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    K.set_session(session)
    dest = app.config['PROCESSED_TRAIN_FOLDER']
    train_data_path = os.path.join(dest,'train_data.npy')
    train_label_path = os.path.join(dest,'train_label.npy')

    x_train = np.load(train_data_path, allow_pickle=True)
    y_train = np.load(train_label_path ,allow_pickle=True)

    epochs = 80
    batch_size = 64
    num_type = len(selected_lable)
    x_train = proress(x_train)
    # word label to number label
    y_train = label_to_category(y_train, selected_lable)

    # number label to onehot
    y_train = keras.utils.to_categorical(y_train, num_type)

    # shuffle data
    permutation = np.random.permutation(x_train.shape[0])
    x_train = x_train[permutation, :]
    y_train = y_train[permutation]

    history = train(x_train, y_train, type=num_type, batch_size=batch_size,
                    epochs=epochs, labels=selected_lable)

    # reload the best model
    model = load_model(model_path, custom_objects={'f1': f1, 'amsoftmax_loss': amsoftmax_loss})

    layerName = "flatten"
    targetModel = Model(inputs=model.input, outputs=model.get_layer(layerName).output)
    targetModel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    targetModel.save("dvector.h5")

    del model
    del targetModel
    K.clear_session() 
Example 29
Project: Voiceprint-Recognition   Author: SunYanCN   File: r_model.py    Apache License 2.0 5 votes vote down vote up
def amsoftmax_loss(y_true, y_pred, scale=10, margin=5.0):
    y_pred = y_true * (y_pred - margin) + (1 - y_true) * y_pred
    y_pred *= scale
    return K.categorical_crossentropy(y_true, y_pred, from_logits=True) 
Example 30
Project: Voiceprint-Recognition   Author: SunYanCN   File: nni_speaker.py    Apache License 2.0 5 votes vote down vote up
def amsoftmax_loss(y_true, y_pred, scale=10, margin=10.0):
    y_pred = y_true * (y_pred - margin) + (1 - y_true) * y_pred
    y_pred *= scale
    return K.categorical_crossentropy(y_true, y_pred, from_logits=True) 
Example 31
Project: Semantic_Segmentation_Keras   Author: liuph0119   File: loss_utils.py    Apache License 2.0 5 votes vote down vote up
def log_loss(y_true, y_pred):
    return K.categorical_crossentropy(y_true, y_pred)


# TODO: TO BE TESTED
# binary dice loss 
Example 32
Project: nips-2017-adversarial   Author: sangxia   File: network_utils.py    MIT License 5 votes vote down vote up
def call(self, x):
        # label, logit
        return K.categorical_crossentropy(x[1], x[0], from_logits=self._from_logits) 
Example 33
Project: nips-2017-adversarial   Author: sangxia   File: network_utils.py    MIT License 5 votes vote down vote up
def call(self, x):
        # label, logit
        return K.categorical_crossentropy(x[1], x[0], from_logits=self._from_logits) 
Example 34
Project: nips-2017-adversarial   Author: sangxia   File: network_utils.py    MIT License 5 votes vote down vote up
def call(self, x):
        # label, logit
        return K.categorical_crossentropy(x[1], x[0], from_logits=self._from_logits) 
Example 35
Project: nips-2017-adversarial   Author: sangxia   File: network_utils.py    MIT License 5 votes vote down vote up
def call(self, x):
        # label, logit
        return K.categorical_crossentropy(x[1], x[0], from_logits=self._from_logits) 
Example 36
Project: iust_deep_fuzz   Author: m-zakeri   File: learn_and_fuzz_2.py    MIT License 5 votes vote down vote up
def cross_entropy(y_true, y_pred):
    """
    Compute cross_entropy loss metric

    :param y_true:
    :param y_pred:
    :return:
    """
    return K.categorical_crossentropy(y_true, y_pred) 
Example 37
Project: iust_deep_fuzz   Author: m-zakeri   File: learn_and_fuzz_2.py    MIT License 5 votes vote down vote up
def perplexity(y_true, y_pred):
    """
    Compute perplexity metric

    :param y_true:
    :param y_pred:
    :return:
    """
    ce = K.categorical_crossentropy(y_true, y_pred)
    # pp = K.pow(np.e, ce)  # Or 2?
    # pp = K.pow(2., ce)  # Or np.e
    pp = K.exp(ce)
    # print('Perplexity value in perplexity function: ', K.eval(pp))
    return pp 
Example 38
Project: iust_deep_fuzz   Author: m-zakeri   File: neural_fuzz_pdf_obj.py    MIT License 5 votes vote down vote up
def cross_entropy(y_true, y_pred):
    """
    Compute cross_entropy loss metric

    :param y_true:
    :param y_pred:
    :return:
    """
    return K.categorical_crossentropy(y_true, y_pred) 
Example 39
Project: iust_deep_fuzz   Author: m-zakeri   File: neural_fuzz_pdf_obj.py    MIT License 5 votes vote down vote up
def perplexity(y_true, y_pred):
    """
    Compute perplexity metric

    :param y_true:
    :param y_pred:
    :return:
    """
    ce = K.categorical_crossentropy(y_true, y_pred)
    # pp = K.pow(np.e, ce)  # Or 2?
    # pp = K.pow(2., ce)  # Or np.e
    pp = K.exp(ce)
    # print('Perplexity value in perplexity function: ', K.eval(pp))
    return pp 
Example 40
Project: iust_deep_fuzz   Author: m-zakeri   File: metadata_neural_fuzz_pdf_obj.py    MIT License 5 votes vote down vote up
def cross_entropy(y_true, y_pred):
    """
    Compute cross_entropy loss metric

    :param y_true:
    :param y_pred:
    :return:
    """
    return K.categorical_crossentropy(y_true, y_pred) 
Example 41
Project: iust_deep_fuzz   Author: m-zakeri   File: metadata_neural_fuzz_pdf_obj.py    MIT License 5 votes vote down vote up
def perplexity(y_true, y_pred):
    """
    Compute perplexity metric

    :param y_true:
    :param y_pred:
    :return:
    """
    ce = K.categorical_crossentropy(y_true, y_pred)
    # pp = K.pow(np.e, ce)  # Or 2?
    # pp = K.pow(2., ce)  # Or np.e
    pp = K.exp(ce)
    # print('Perplexity value in perplexity function: ', K.eval(pp))
    return pp 
Example 42
Project: iust_deep_fuzz   Author: m-zakeri   File: data_neural_fuzz_pdf_obj.py    MIT License 5 votes vote down vote up
def cross_entropy(y_true, y_pred):
    """
    Compute cross_entropy loss metric

    :param y_true:
    :param y_pred:
    :return:
    """
    return K.categorical_crossentropy(y_true, y_pred) 
Example 43
Project: iust_deep_fuzz   Author: m-zakeri   File: data_neural_fuzz_pdf_obj.py    MIT License 5 votes vote down vote up
def perplexity(y_true, y_pred):
    """
    Compute perplexity metric

    :param y_true:
    :param y_pred:
    :return:
    """
    ce = K.categorical_crossentropy(y_true, y_pred)
    # pp = K.pow(np.e, ce)  # Or 2?
    # pp = K.pow(2., ce)  # Or np.e
    pp = K.exp(ce)
    # print('Perplexity value in perplexity function: ', K.eval(pp))
    return pp 
Example 44
Project: iust_deep_fuzz   Author: m-zakeri   File: learn_and_fuzz_3_sample_fuzz.py    MIT License 5 votes vote down vote up
def cross_entropy(y_true, y_pred):
    """
    Compute cross_entropy loss metric

    :param y_true:
    :param y_pred:
    :return:
    """
    return K.categorical_crossentropy(y_true, y_pred) 
Example 45
Project: iust_deep_fuzz   Author: m-zakeri   File: learn_and_fuzz_3_sample_fuzz.py    MIT License 5 votes vote down vote up
def perplexity(y_true, y_pred):
    """
    Compute perplexity metric

    :param y_true:
    :param y_pred:
    :return:
    """
    ce = K.categorical_crossentropy(y_true, y_pred)
    # pp = K.pow(np.e, ce)  # Or 2?
    # pp = K.pow(2., ce)  # Or np.e
    pp = K.exp(ce)
    # print('Perplexity value in perplexity function: ', K.eval(pp))
    return pp 
Example 46
Project: show-attend-and-tell-keras   Author: zimmerrol   File: train.py    MIT License 5 votes vote down vote up
def masked_categorical_crossentropy(y_true, y_pred):
    mask_value = le._word_index_map["<NULL>"]
    y_true_id = K.argmax(y_true)
    mask = K.cast(K.equal(y_true_id, mask_value), K.floatx())
    mask = 1.0 - mask
    loss = K.categorical_crossentropy(y_true, y_pred) * mask

    # take average w.r.t. the number of unmasked entries
    return K.sum(loss) / K.sum(mask) 
Example 47
Project: AutoSpeech2019   Author: DeepWisdom   File: module.py    Apache License 2.0 5 votes vote down vote up
def amsoftmax_loss(y_true, y_pred, scale=30, margin=0.35):
    y_pred = y_true * (y_pred - margin) + (1 - y_true) * y_pred
    y_pred *= scale
    return K.categorical_crossentropy(y_true, y_pred, from_logits=True) 
Example 48
Project: Nucleus-Segmentation   Author: ananthrn   File: wunet_model.py    MIT License 5 votes vote down vote up
def weighted_cross_entropy(y_true_and_w, y_pred):
    truth_mask, weights = splitMaskAndWeight(y_true_and_w)
    return weights*K.categorical_crossentropy(truth_mask,y_pred)
    
    
# Retrieved from Kaggle challenge 
Example 49
Project: nlp_toolkit   Author: stevewyl   File: custom_loss.py    MIT License 5 votes vote down vote up
def custom_categorical_crossentropy(y_true, y_pred, n):
    return K.categorical_crossentropy(y_true, y_pred[:, :n]) 
Example 50
Project: graph-representation-learning   Author: vuptran   File: ae.py    MIT License 5 votes vote down vote up
def masked_categorical_crossentropy(y_true, y_pred):
    """ Categorical/softmax cross-entropy loss with masking """
    mask = y_true[:, -1]
    y_true = y_true[:, :-1]
    loss = K.categorical_crossentropy(target=y_true,
                                      output=y_pred,
                                      from_logits=True)
    mask = K.cast(mask, dtype=np.float32)
    loss *= mask
    return K.mean(loss, axis=-1) 
Example 51
Project: mfom_dcase16_task4   Author: Vanova   File: objectives.py    MIT License 5 votes vote down vote up
def mfom_eer_embed(y_true, y_pred):
    """
    MFoM embedding: use MFoM scores as new "soft labels", aka Dark Knowledge by Hinton
    We apply MFoM n_embed = 2 times. We notice that it works better.
       y_true: [batch_sz, nclasses]
       y_pred: sigmoid scores, we preprocess these to d_k and l_k, i.e. loss function l_k(Z)
   """
    alpha = 3.
    beta = 0.
    n_embed = 2
    l = _uvz_loss_scores(y_true, y_pred, alpha, beta)
    l_score = 1 - l
    for t in xrange(n_embed):
        l = _uvz_loss_scores(y_true=y_true, y_pred=l_score, alpha=alpha, beta=beta)
        l_score = 1 - l
    # ===
    # MSE(y_pred - l_score), AvgEER = 13.02, NOTE: does not correlate!!!
    # ===
    # mse = K.mean(K.square(y_pred - l_score3), axis=-1)
    # ===
    # binXent(y_pred - l_score), NOTE: higher then baseline
    # ===
    binxent = K.mean(K.binary_crossentropy(y_pred, l_score), axis=-1)  # NOTE: 11.5 EER
    # ===
    # Xent(y_pred - l_score), NOTE: normal func value, EER is not decreasing
    # ===
    # xent = K.categorical_crossentropy(y_pred, l_score)
    return binxent 
Example 52
Project: EARL   Author: AskNowQA   File: ent_rel_predictor.py    GNU General Public License v3.0 5 votes vote down vote up
def w_categorical_crossentropy(y_true, y_pred, weights):

    nb_cl = len(weights)
    final_mask = K.zeros_like(y_pred[:, 0])
    y_pred_max = K.max(y_pred, axis=1)
    y_pred_max = K.expand_dims(y_pred_max, 1)
    y_pred_max_mat = K.equal(y_pred, y_pred_max)
    for c_p, c_t in product(range(nb_cl), range(nb_cl)):

        final_mask += (K.cast(weights[c_t, c_p],K.floatx()) * K.cast(y_pred_max_mat[:, c_p] ,K.floatx())* K.cast(y_true[:, c_t],K.floatx()))
    return K.categorical_crossentropy(y_pred, y_true) * final_mask 
Example 53
Project: goban-image-reader   Author: chaossy   File: model.py    MIT License 5 votes vote down vote up
def loss(y_true, y_pred):
    loss_list = []
    for i in range(BOARD_SIZE * BOARD_SIZE):
        loss = K.categorical_crossentropy(y_true[:, i*3:(i+1)*3], y_pred[:, i*3:(i+1)*3])
        loss_list.append(loss)
    total_loss = add(loss_list)
    return total_loss 
Example 54
Project: unet_keras_tensorboard   Author: YadavKapil   File: main.py    MIT License 5 votes vote down vote up
def categorical_crossentropy_with_logit(y_true, y_pred):
	return K.categorical_crossentropy(y_true,y_pred,from_logits=True) 
Example 55
Project: alphagozero   Author: Narsil   File: model.py    MIT License 5 votes vote down vote up
def loss(y_true, y_pred):
    mse = K.mean(K.square(y_pred - y_true), axis=-1)
    categorical_crossentropy = K.categorical_crossentropy(y_true, y_pred)
    return mse + categorical_crossentropy 
Example 56
Project: Look-Into-Person   Author: foamliu   File: utils.py    MIT License 5 votes vote down vote up
def cross_entropy(y_true, y_pred):
    y_true = K.reshape(y_true, (-1, num_classes))
    y_pred = K.reshape(y_pred, (-1, num_classes))

    idx_max = K.argmax(y_true, axis=1)
    weights = K.gather(prior_factor, idx_max)
    weights = K.reshape(weights, (-1, 1))

    # multiply y_true by weights
    y_true = y_true * weights

    cross_ent = K.categorical_crossentropy(y_pred, y_true)
    cross_ent = K.mean(cross_ent, axis=-1)

    return cross_ent 
Example 57
Project: C3AE   Author: StevenBanama   File: utils.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def focal_loss(classes_num, gamma=2., alpha=.25, e=0.1):
    # classes_num contains sample number of each classes
    # copy from https://github.com/maozezhong/focal_loss_multi_class/blob/master/focal_loss.py
    def focal_loss_fixed(target_tensor, prediction_tensor):
        '''
        prediction_tensor is the output tensor with shape [None, 100], where 100 is the number of classes
        target_tensor is the label tensor, same shape as predcition_tensor
        '''
        import tensorflow as tf
        from tensorflow.python.ops import array_ops
        from keras import backend as K

        #1# get focal loss with no balanced weight which presented in paper function (4)
        zeros = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype)
        one_minus_p = array_ops.where(tf.greater(target_tensor,zeros), target_tensor - prediction_tensor, zeros)
        FT = -1 * (one_minus_p ** gamma) * tf.log(tf.clip_by_value(prediction_tensor, 1e-8, 1.0))

        #2# get balanced weight alpha
        classes_weight = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype)

        total_num = float(sum(classes_num))
        classes_w_t1 = [ total_num / ff for ff in classes_num ]
        sum_ = sum(classes_w_t1)
        classes_w_t2 = [ ff/sum_ for ff in classes_w_t1 ]   #scale
        classes_w_tensor = tf.convert_to_tensor(classes_w_t2, dtype=prediction_tensor.dtype)
        classes_weight += classes_w_tensor

        alpha = array_ops.where(tf.greater(target_tensor, zeros), classes_weight, zeros)

        #3# get balanced focal loss
        balanced_fl = alpha * FT
        balanced_fl = tf.reduce_mean(balanced_fl)

        #4# add other op to prevent overfit
        # reference : https://spaces.ac.cn/archives/4493
        nb_classes = len(classes_num)
        fianal_loss = (1-e) * balanced_fl + e * K.categorical_crossentropy(K.ones_like(prediction_tensor)/nb_classes, prediction_tensor)

        return fianal_loss
    return focal_loss_fixed 
Example 58
Project: 2019-Kakao-Arena-Shopping-Classification   Author: junwoopark92   File: network.py    Apache License 2.0 5 votes vote down vote up
def masked_loss_function_s(y_true, y_pred):
    mask = K.max(K.cast(K.not_equal(y_true, mask_value_s), K.floatx()), axis=1)
    loss = K.categorical_crossentropy(y_true, y_pred) * mask * batchsize / K.sum(mask)
    return loss 
Example 59
Project: 2019-Kakao-Arena-Shopping-Classification   Author: junwoopark92   File: network.py    Apache License 2.0 5 votes vote down vote up
def masked_loss_function_d(y_true, y_pred):
    mask = K.max(K.cast(K.not_equal(y_true, mask_value_d), K.floatx()), axis=1)
    loss = K.categorical_crossentropy(y_true, y_pred) * mask * batchsize / K.sum(mask)
    return loss 
Example 60
Project: 3D-CNNs-for-Liver-Classification   Author: wcfzl   File: focal_loss.py    Apache License 2.0 5 votes vote down vote up
def focal_loss_fixed(target_tensor, prediction_tensor):
    '''
    prediction_tensor is the output tensor with shape [None, 100], where 100 is the number of classes
    target_tensor is the label tensor, same shape as predcition_tensor
    '''
    import tensorflow as tf
    from tensorflow.python.ops import array_ops
    from keras import backend as K
    classes_num=[1]
    gamma = 2.
    alpha = .75
    e = 0.1
    #1# get focal loss with no balanced weight which presented in paper function (4)
    zeros = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype)
    one_minus_p = array_ops.where(target_tensor > zeros, target_tensor - prediction_tensor, zeros)
    FT = -1 * (one_minus_p ** gamma) * tf.log(tf.clip_by_value(prediction_tensor, 1e-8, 1.0))

    #2# get balanced weight alpha
    classes_weight = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype)

    total_num = float(sum(classes_num))
    classes_w_t1 = [ total_num / ff for ff in classes_num ]
    sum_ = sum(classes_w_t1)
    classes_w_t2 = [ ff/sum_ for ff in classes_w_t1 ]   #scale
    classes_w_tensor = tf.convert_to_tensor(classes_w_t2, dtype=prediction_tensor.dtype)
    classes_weight += classes_w_tensor

    alpha = array_ops.where(target_tensor > zeros, classes_weight, zeros)

    #3# get balanced focal loss
    balanced_fl = alpha * FT
    balanced_fl = tf.reduce_sum(balanced_fl)

    #4# add other op to prevent overfit
    # reference : https://spaces.ac.cn/archives/4493
    nb_classes = len(classes_num)
    fianal_loss = (1-e) * balanced_fl + e * K.categorical_crossentropy(K.ones_like(prediction_tensor)/nb_classes, prediction_tensor)

    return fianal_loss 
Example 61
Project: deep-spell-checkr   Author: vuptran   File: model.py    MIT License 4 votes vote down vote up
def seq2seq(hidden_size, nb_input_chars, nb_target_chars):
    """Adapted from:
    https://github.com/keras-team/keras/blob/master/examples/lstm_seq2seq.py
    """
    
    # Define the main model consisting of encoder and decoder.
    encoder_inputs = Input(shape=(None, nb_input_chars),
                           name='encoder_data')
    encoder_lstm = LSTM(hidden_size, recurrent_dropout=0.2,
                        return_sequences=True, return_state=False,
                        name='encoder_lstm_1')
    encoder_outputs = encoder_lstm(encoder_inputs)
    
    encoder_lstm = LSTM(hidden_size, recurrent_dropout=0.2,
                        return_sequences=False, return_state=True,
                        name='encoder_lstm_2')
    encoder_outputs, state_h, state_c = encoder_lstm(encoder_outputs)
    # We discard `encoder_outputs` and only keep the states.
    encoder_states = [state_h, state_c]

    # Set up the decoder, using `encoder_states` as initial state.
    decoder_inputs = Input(shape=(None, nb_target_chars),
                           name='decoder_data')
    # We set up our decoder to return full output sequences,
    # and to return internal states as well. We don't use the return
    # states in the training model, but we will use them in inference.
    decoder_lstm = LSTM(hidden_size, dropout=0.2, return_sequences=True,
                        return_state=True, name='decoder_lstm')
    decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
                                         initial_state=encoder_states)
    decoder_softmax = Dense(nb_target_chars, activation='softmax',
                            name='decoder_softmax')
    decoder_outputs = decoder_softmax(decoder_outputs)

    # The main model will turn `encoder_input_data` & `decoder_input_data`
    # into `decoder_target_data`
    model = Model(inputs=[encoder_inputs, decoder_inputs],
                  outputs=decoder_outputs)
    
    adam = optimizers.Adam(lr=0.001, decay=0.0)
    model.compile(optimizer=adam, loss='categorical_crossentropy',
                  metrics=['accuracy', truncated_acc, truncated_loss])
    
    # Define the encoder model separately.
    encoder_model = Model(inputs=encoder_inputs, outputs=encoder_states)

    # Define the decoder model separately.
    decoder_state_input_h = Input(shape=(hidden_size,))
    decoder_state_input_c = Input(shape=(hidden_size,))
    decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
    decoder_outputs, state_h, state_c = decoder_lstm(
        decoder_inputs, initial_state=decoder_states_inputs)
    decoder_states = [state_h, state_c]
    decoder_outputs = decoder_softmax(decoder_outputs)
    decoder_model = Model(inputs=[decoder_inputs] + decoder_states_inputs,
                          outputs=[decoder_outputs] + decoder_states)

    return model, encoder_model, decoder_model 
Example 62
Project: 3D-CNNs-for-Liver-Classification   Author: wcfzl   File: focal_loss.py    Apache License 2.0 4 votes vote down vote up
def focal_loss(classes_num, gamma=2., alpha=.75, e=0.1):
    # classes_num contains sample number of each classes
    def focal_loss_fixed(target_tensor, prediction_tensor):
        '''
        prediction_tensor is the output tensor with shape [None, 100], where 100 is the number of classes
        target_tensor is the label tensor, same shape as predcition_tensor
        '''
        import tensorflow as tf
        from tensorflow.python.ops import array_ops
        from keras import backend as K
        classes_num=[1]
        gamma = 2.
        alpha = .75
        e = 0.1
        #1# get focal loss with no balanced weight which presented in paper function (4)
        zeros = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype)
        one_minus_p = array_ops.where(target_tensor > zeros, target_tensor - prediction_tensor, zeros)
        FT = -1 * (one_minus_p ** gamma) * tf.log(tf.clip_by_value(prediction_tensor, 1e-8, 1.0))

        #2# get balanced weight alpha
        classes_weight = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype)

        total_num = float(sum(classes_num))
        classes_w_t1 = [ total_num / ff for ff in classes_num ]
        sum_ = sum(classes_w_t1)
        classes_w_t2 = [ ff/sum_ for ff in classes_w_t1 ]   #scale
        classes_w_tensor = tf.convert_to_tensor(classes_w_t2, dtype=prediction_tensor.dtype)
        classes_weight += classes_w_tensor

        alpha = array_ops.where(target_tensor > zeros, classes_weight, zeros)

        #3# get balanced focal loss
        balanced_fl = alpha * FT
        balanced_fl = tf.reduce_sum(balanced_fl)

        #4# add other op to prevent overfit
        # reference : https://spaces.ac.cn/archives/4493
        nb_classes = len(classes_num)
        fianal_loss = (1-e) * balanced_fl + e * K.categorical_crossentropy(K.ones_like(prediction_tensor)/nb_classes, prediction_tensor)

        return fianal_loss
    return focal_loss_fixed