Python keras.objectives.categorical_crossentropy() Examples

The following are 20 code examples of keras.objectives.categorical_crossentropy(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.objectives , or try the search function .
Example #1
Source Project: FasterRCNN_KERAS   Author: akshaylamba   File: losses.py    License: Apache License 2.0 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
Example #2
Source Project: keras-frcnn   Author: kbardool   File: losses.py    License: Apache License 2.0 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
Example #3
Source Project: Keras-FasterRCNN   Author: you359   File: losses.py    License: MIT License 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
    return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
Example #4
Source Project: Keras_object_detection   Author: Abhijit-2592   File: losses.py    License: Apache License 2.0 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
Example #5
Source Project: Pseudo-Label-Keras   Author: koshian2   File: mobilenet_pseudo_cifar.py    License: MIT License 5 votes vote down vote up
def loss_function(self, y_true, y_pred):
        y_true_item = y_true[:, :self.n_classes]
        unlabeled_flag = y_true[:, self.n_classes]
        entropies = categorical_crossentropy(y_true_item, y_pred)
        coefs = 1.0-unlabeled_flag + self.alpha_t * unlabeled_flag # 1 if labeled, else alpha_t
        return coefs * entropies 
Example #6
Source Project: Pseudo-Label-Keras   Author: koshian2   File: pseudo_pretrain_cifar.py    License: MIT License 5 votes vote down vote up
def loss_function(self, y_true, y_pred):
        y_true_item = y_true[:, :self.n_classes]
        unlabeled_flag = y_true[:, self.n_classes]
        entropies = categorical_crossentropy(y_true_item, y_pred)
        coefs = 1.0-unlabeled_flag + self.alpha_t * unlabeled_flag # 1 if labeled, else alpha_t
        return coefs * entropies 
Example #7
Source Project: Pseudo-Label-Keras   Author: koshian2   File: pseudo_pretrain_cifar.py    License: MIT License 5 votes vote down vote up
def train(n_labeled_data):
    model = create_cnn()
    
    pseudo = PseudoCallback(model, n_labeled_data, min(512, n_labeled_data))

    # pretrain
    model.compile("adam", loss="categorical_crossentropy", metrics=["acc"])
    model.fit(pseudo.X_train_labeled/255.0, to_categorical(pseudo.y_train_labeled),
              batch_size=pseudo.batch_size, epochs=30,
              validation_data=(pseudo.X_test/255.0, to_categorical(pseudo.y_test)))
    pseudo.y_train_unlabeled_prediction = np.argmax(
            model.predict(pseudo.X_train_unlabeled), axis=-1,).reshape(-1, 1)

    #main-train
    model.compile("adam", loss=pseudo.loss_function, metrics=[pseudo.accuracy])

    if not os.path.exists("result_pseudo"):
        os.mkdir("result_pseudo")

    hist = model.fit_generator(pseudo.train_generator(), steps_per_epoch=pseudo.train_steps_per_epoch,
                               validation_data=pseudo.test_generator(), callbacks=[pseudo],
                               validation_steps=pseudo.test_stepes_per_epoch, epochs=100).history
    hist["labeled_accuracy"] = pseudo.labeled_accuracy
    hist["unlabeled_accuracy"] = pseudo.unlabeled_accuracy

    with open(f"result_pseudo/history_{n_labeled_data:05}.dat", "wb") as fp:
        pickle.dump(hist, fp) 
Example #8
Source Project: Pseudo-Label-Keras   Author: koshian2   File: mobilenet_transfer_pseudo_cifar.py    License: MIT License 5 votes vote down vote up
def loss_function(self, y_true, y_pred):
        y_true_item = y_true[:, :self.n_classes]
        unlabeled_flag = y_true[:, self.n_classes]
        entropies = categorical_crossentropy(y_true_item, y_pred)
        coefs = 1.0-unlabeled_flag + self.alpha_t * unlabeled_flag # 1 if labeled, else alpha_t
        return coefs * entropies 
Example #9
Source Project: Pseudo-Label-Keras   Author: koshian2   File: pseudo_cifar.py    License: MIT License 5 votes vote down vote up
def loss_function(self, y_true, y_pred):
        y_true_item = y_true[:, :self.n_classes]
        unlabeled_flag = y_true[:, self.n_classes]
        entropies = categorical_crossentropy(y_true_item, y_pred)
        coefs = 1.0-unlabeled_flag + self.alpha_t * unlabeled_flag # 1 if labeled, else alpha_t
        return coefs * entropies 
Example #10
Source Project: keras-faster-rcnn   Author: moyiliyi   File: losses.py    License: Apache License 2.0 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
Example #11
Source Project: nlp_xiaojiang   Author: yongzhuo   File: keras_bert_classify_bi_lstm.py    License: MIT License 5 votes vote down vote up
def compile_model(self):
        self.model.compile(optimizer=args.optimizers,
                           loss=categorical_crossentropy,
                           metrics=args.metrics) 
Example #12
Source Project: nlp_xiaojiang   Author: yongzhuo   File: keras_bert_classify_text_cnn.py    License: MIT License 5 votes vote down vote up
def compile_model(self):
        self.model.compile(optimizer=args.optimizers,
                           loss=categorical_crossentropy,
                           metrics=args.metrics) 
Example #13
Source Project: keras-frcnn   Author: small-yellow-duck   File: losses.py    License: Apache License 2.0 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * categorical_crossentropy(y_true, y_pred) 
Example #14
Source Project: ssbm_fox_detector   Author: AdamSpannbauer   File: losses.py    License: MIT License 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
Example #15
Source Project: gymexperiments   Author: tambetm   File: a2c.py    License: MIT License 5 votes vote down vote up
def create_model(env, args):
    h = x = Input(shape=(None,) + env.observation_space.shape, name="x")

    # policy network
    for i in range(args.layers):
        h = TimeDistributed(Dense(args.hidden_size, activation=args.activation), name="h%d" % (i + 1))(h)
    p = TimeDistributed(Dense(env.action_space.n, activation='softmax'), name="p")(h)

    # baseline network
    h = TimeDistributed(Dense(args.hidden_size, activation=args.activation), name="hb")(h)
    b = TimeDistributed(Dense(1), name="b")(h)

    # advantage is additional input
    A = Input(shape=(None,))

    # policy gradient loss and entropy bonus
    def policy_gradient_loss(l_sampled, l_predicted):
        return K.mean(A * categorical_crossentropy(l_sampled, l_predicted), axis=1) \
            - args.beta * K.mean(categorical_crossentropy(l_predicted, l_predicted), axis=1)

    # inputs to the model are observation and total reward,
    # outputs are action probabilities and baseline
    model = Model(input=[x, A], output=[p, b])

    # baseline is optimized with MSE
    model.compile(optimizer=args.optimizer, loss=[policy_gradient_loss, 'mse'])
    model.optimizer.lr = args.optimizer_lr

    return model 
Example #16
Source Project: gymexperiments   Author: tambetm   File: pg.py    License: MIT License 5 votes vote down vote up
def policy_gradient_loss(l_sampled, l_predicted):
    return A * categorical_crossentropy(l_sampled, l_predicted)[:, np.newaxis]

# inputs to the model are obesvation and advantage,
# outputs are action probabilities and baseline 
Example #17
Source Project: gymexperiments   Author: tambetm   File: a2c_atari.py    License: MIT License 5 votes vote down vote up
def create_model(env, batch_size, num_steps):
    # network inputs are observations and advantages
    h = x = Input(batch_shape=(batch_size, num_steps) + env.observation_space.shape, name="x")
    A = Input(batch_shape=(batch_size, num_steps), name="A")

    # convolutional layers
    h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c1')(h)
    h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c2')(h)
    h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c3')(h)
    h = TimeDistributed(Convolution2D(64, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c4')(h)
    h = TimeDistributed(Flatten(), name="fl")(h)

    # recurrent layer
    h = LSTM(32, return_sequences=True, stateful=True, name="r1")(h)

    # policy network
    p = TimeDistributed(Dense(env.action_space.n, activation='softmax'), name="p")(h)

    # baseline network
    b = TimeDistributed(Dense(1), name="b")(h)

    # inputs to the model are observation and advantages,
    # outputs are action probabilities and baseline
    model = Model(input=[x, A], output=[p, b])

    # policy gradient loss and entropy bonus
    def policy_gradient_loss(l_sampled, l_predicted):
        return K.mean(A * categorical_crossentropy(l_sampled, l_predicted), axis=1) \
            - 0.01 * K.mean(categorical_crossentropy(l_predicted, l_predicted), axis=1)

    # baseline is optimized with MSE
    model.compile(optimizer='adam', loss=[policy_gradient_loss, 'mse'])

    return model 
Example #18
Source Project: Hands-On-Deep-Learning-for-Games   Author: PacktPublishing   File: pitch-generator.py    License: MIT License 5 votes vote down vote up
def vae_loss(y_true, y_pred):
    xent_loss = objectives.categorical_crossentropy(y_true, y_pred)
    kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var))
    loss = xent_loss + kl_loss
    return loss  

# create the vocabulary 
Example #19
Source Project: Hands-On-Deep-Learning-for-Games   Author: PacktPublishing   File: note-generator.py    License: MIT License 5 votes vote down vote up
def vae_p_loss(y_true, y_pred):
    xent_loss = objectives.categorical_crossentropy(y_true, y_pred)
    kl_loss = - 0.5 * K.mean(1 + z_log_var_p - K.square(z_mean_p) - K.exp(z_log_var_p))
    loss = xent_loss + kl_loss
    return loss

# durations VAE loss 
Example #20
Source Project: Hands-On-Deep-Learning-for-Games   Author: PacktPublishing   File: note-generator.py    License: MIT License 5 votes vote down vote up
def vae_d_loss(y_true, y_pred):
    xent_loss = objectives.categorical_crossentropy(y_true, y_pred)
    kl_loss = - 0.5 * K.mean(1 + z_log_var_d - K.square(z_mean_d) - K.exp(z_log_var_d))
    loss = xent_loss + kl_loss
    return loss

# load Bach chorales