Python keras.objectives.categorical_crossentropy() Examples

The following are 20 code examples of keras.objectives.categorical_crossentropy(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.objectives , or try the search function .
Example #1
Source File: keras_bert_classify_bi_lstm.py    From nlp_xiaojiang with MIT License 5 votes vote down vote up
def compile_model(self):
        self.model.compile(optimizer=args.optimizers,
                           loss=categorical_crossentropy,
                           metrics=args.metrics) 
Example #2
Source File: note-generator.py    From Hands-On-Deep-Learning-for-Games with MIT License 5 votes vote down vote up
def vae_d_loss(y_true, y_pred):
    xent_loss = objectives.categorical_crossentropy(y_true, y_pred)
    kl_loss = - 0.5 * K.mean(1 + z_log_var_d - K.square(z_mean_d) - K.exp(z_log_var_d))
    loss = xent_loss + kl_loss
    return loss

# load Bach chorales 
Example #3
Source File: note-generator.py    From Hands-On-Deep-Learning-for-Games with MIT License 5 votes vote down vote up
def vae_p_loss(y_true, y_pred):
    xent_loss = objectives.categorical_crossentropy(y_true, y_pred)
    kl_loss = - 0.5 * K.mean(1 + z_log_var_p - K.square(z_mean_p) - K.exp(z_log_var_p))
    loss = xent_loss + kl_loss
    return loss

# durations VAE loss 
Example #4
Source File: pitch-generator.py    From Hands-On-Deep-Learning-for-Games with MIT License 5 votes vote down vote up
def vae_loss(y_true, y_pred):
    xent_loss = objectives.categorical_crossentropy(y_true, y_pred)
    kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var))
    loss = xent_loss + kl_loss
    return loss  

# create the vocabulary 
Example #5
Source File: a2c_atari.py    From gymexperiments with MIT License 5 votes vote down vote up
def create_model(env, batch_size, num_steps):
    # network inputs are observations and advantages
    h = x = Input(batch_shape=(batch_size, num_steps) + env.observation_space.shape, name="x")
    A = Input(batch_shape=(batch_size, num_steps), name="A")

    # convolutional layers
    h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c1')(h)
    h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c2')(h)
    h = TimeDistributed(Convolution2D(32, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c3')(h)
    h = TimeDistributed(Convolution2D(64, 3, 3, subsample=(2, 2), border_mode="same", activation='elu', dim_ordering='tf'), name='c4')(h)
    h = TimeDistributed(Flatten(), name="fl")(h)

    # recurrent layer
    h = LSTM(32, return_sequences=True, stateful=True, name="r1")(h)

    # policy network
    p = TimeDistributed(Dense(env.action_space.n, activation='softmax'), name="p")(h)

    # baseline network
    b = TimeDistributed(Dense(1), name="b")(h)

    # inputs to the model are observation and advantages,
    # outputs are action probabilities and baseline
    model = Model(input=[x, A], output=[p, b])

    # policy gradient loss and entropy bonus
    def policy_gradient_loss(l_sampled, l_predicted):
        return K.mean(A * categorical_crossentropy(l_sampled, l_predicted), axis=1) \
            - 0.01 * K.mean(categorical_crossentropy(l_predicted, l_predicted), axis=1)

    # baseline is optimized with MSE
    model.compile(optimizer='adam', loss=[policy_gradient_loss, 'mse'])

    return model 
Example #6
Source File: pg.py    From gymexperiments with MIT License 5 votes vote down vote up
def policy_gradient_loss(l_sampled, l_predicted):
    return A * categorical_crossentropy(l_sampled, l_predicted)[:, np.newaxis]

# inputs to the model are obesvation and advantage,
# outputs are action probabilities and baseline 
Example #7
Source File: a2c.py    From gymexperiments with MIT License 5 votes vote down vote up
def create_model(env, args):
    h = x = Input(shape=(None,) + env.observation_space.shape, name="x")

    # policy network
    for i in range(args.layers):
        h = TimeDistributed(Dense(args.hidden_size, activation=args.activation), name="h%d" % (i + 1))(h)
    p = TimeDistributed(Dense(env.action_space.n, activation='softmax'), name="p")(h)

    # baseline network
    h = TimeDistributed(Dense(args.hidden_size, activation=args.activation), name="hb")(h)
    b = TimeDistributed(Dense(1), name="b")(h)

    # advantage is additional input
    A = Input(shape=(None,))

    # policy gradient loss and entropy bonus
    def policy_gradient_loss(l_sampled, l_predicted):
        return K.mean(A * categorical_crossentropy(l_sampled, l_predicted), axis=1) \
            - args.beta * K.mean(categorical_crossentropy(l_predicted, l_predicted), axis=1)

    # inputs to the model are observation and total reward,
    # outputs are action probabilities and baseline
    model = Model(input=[x, A], output=[p, b])

    # baseline is optimized with MSE
    model.compile(optimizer=args.optimizer, loss=[policy_gradient_loss, 'mse'])
    model.optimizer.lr = args.optimizer_lr

    return model 
Example #8
Source File: losses.py    From ssbm_fox_detector with MIT License 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
Example #9
Source File: losses.py    From keras-frcnn with Apache License 2.0 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * categorical_crossentropy(y_true, y_pred) 
Example #10
Source File: keras_bert_classify_text_cnn.py    From nlp_xiaojiang with MIT License 5 votes vote down vote up
def compile_model(self):
        self.model.compile(optimizer=args.optimizers,
                           loss=categorical_crossentropy,
                           metrics=args.metrics) 
Example #11
Source File: losses.py    From FasterRCNN_KERAS with Apache License 2.0 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
Example #12
Source File: losses.py    From keras-faster-rcnn with Apache License 2.0 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
Example #13
Source File: pseudo_cifar.py    From Pseudo-Label-Keras with MIT License 5 votes vote down vote up
def loss_function(self, y_true, y_pred):
        y_true_item = y_true[:, :self.n_classes]
        unlabeled_flag = y_true[:, self.n_classes]
        entropies = categorical_crossentropy(y_true_item, y_pred)
        coefs = 1.0-unlabeled_flag + self.alpha_t * unlabeled_flag # 1 if labeled, else alpha_t
        return coefs * entropies 
Example #14
Source File: mobilenet_transfer_pseudo_cifar.py    From Pseudo-Label-Keras with MIT License 5 votes vote down vote up
def loss_function(self, y_true, y_pred):
        y_true_item = y_true[:, :self.n_classes]
        unlabeled_flag = y_true[:, self.n_classes]
        entropies = categorical_crossentropy(y_true_item, y_pred)
        coefs = 1.0-unlabeled_flag + self.alpha_t * unlabeled_flag # 1 if labeled, else alpha_t
        return coefs * entropies 
Example #15
Source File: pseudo_pretrain_cifar.py    From Pseudo-Label-Keras with MIT License 5 votes vote down vote up
def train(n_labeled_data):
    model = create_cnn()
    
    pseudo = PseudoCallback(model, n_labeled_data, min(512, n_labeled_data))

    # pretrain
    model.compile("adam", loss="categorical_crossentropy", metrics=["acc"])
    model.fit(pseudo.X_train_labeled/255.0, to_categorical(pseudo.y_train_labeled),
              batch_size=pseudo.batch_size, epochs=30,
              validation_data=(pseudo.X_test/255.0, to_categorical(pseudo.y_test)))
    pseudo.y_train_unlabeled_prediction = np.argmax(
            model.predict(pseudo.X_train_unlabeled), axis=-1,).reshape(-1, 1)

    #main-train
    model.compile("adam", loss=pseudo.loss_function, metrics=[pseudo.accuracy])

    if not os.path.exists("result_pseudo"):
        os.mkdir("result_pseudo")

    hist = model.fit_generator(pseudo.train_generator(), steps_per_epoch=pseudo.train_steps_per_epoch,
                               validation_data=pseudo.test_generator(), callbacks=[pseudo],
                               validation_steps=pseudo.test_stepes_per_epoch, epochs=100).history
    hist["labeled_accuracy"] = pseudo.labeled_accuracy
    hist["unlabeled_accuracy"] = pseudo.unlabeled_accuracy

    with open(f"result_pseudo/history_{n_labeled_data:05}.dat", "wb") as fp:
        pickle.dump(hist, fp) 
Example #16
Source File: pseudo_pretrain_cifar.py    From Pseudo-Label-Keras with MIT License 5 votes vote down vote up
def loss_function(self, y_true, y_pred):
        y_true_item = y_true[:, :self.n_classes]
        unlabeled_flag = y_true[:, self.n_classes]
        entropies = categorical_crossentropy(y_true_item, y_pred)
        coefs = 1.0-unlabeled_flag + self.alpha_t * unlabeled_flag # 1 if labeled, else alpha_t
        return coefs * entropies 
Example #17
Source File: mobilenet_pseudo_cifar.py    From Pseudo-Label-Keras with MIT License 5 votes vote down vote up
def loss_function(self, y_true, y_pred):
        y_true_item = y_true[:, :self.n_classes]
        unlabeled_flag = y_true[:, self.n_classes]
        entropies = categorical_crossentropy(y_true_item, y_pred)
        coefs = 1.0-unlabeled_flag + self.alpha_t * unlabeled_flag # 1 if labeled, else alpha_t
        return coefs * entropies 
Example #18
Source File: losses.py    From Keras_object_detection with Apache License 2.0 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
Example #19
Source File: losses.py    From Keras-FasterRCNN with MIT License 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
    return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :])) 
Example #20
Source File: losses.py    From keras-frcnn with Apache License 2.0 5 votes vote down vote up
def class_loss_cls(y_true, y_pred):
	return lambda_cls_class * K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :]))