Python keras.losses.binary_crossentropy() Examples

The following are code examples for showing how to use keras.losses.binary_crossentropy(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: deep-learning-keras   Author: arnaudvl   File: nn.py    MIT License 6 votes vote down vote up
def _calc_metric(self,y_true,y_pred):
        """
        Calculate evaluation metric.
        
        Supports: "roc-auc","norm-gini","mean_squared_error","mean_absolute_error",
                  "categorical_crossentropy","binary_crossentropy".
        """
        if self._val_loss=='roc-auc':
            metric = roc_auc_score(y_true, y_pred)
        elif self._val_loss=='norm-gini':
            metric = (2 * roc_auc_score(y_true, y_pred)) - 1
        elif self._val_loss=='mean_squared_error':
            metric = K.eval(mean_squared_error(K.variable(y_true), K.variable(y_pred)))
        elif self._val_loss=='mean_absolute_error':
            metric = K.eval(mean_absolute_error(K.variable(y_true), K.variable(y_pred)))
        elif self._val_loss=='categorical_crossentropy':
            metric = K.eval(categorical_crossentropy(K.variable(y_true), K.variable(y_pred)))
        elif self._val_loss=='binary_crossentropy':
            metric = K.eval(binary_crossentropy(K.variable(y_true), K.variable(y_pred)))
        else:
            raise ValueError('Invalid value for "custom_eval_stopping["name"], "roc-auc","norm-gini","mean_squared_error", \
                             "mean_absolute_error","categorical_crossentropy","binary_crossentropy" supported.')
        return metric 
Example 2
Project: deep-learning-keras   Author: arnaudvl   File: nn.py    MIT License 6 votes vote down vote up
def _get_metric(self,y_true,y_pred):
        """
        Calculate metric being logged.
        
        Supports: "roc-auc","norm-gini","mean_squared_error","mean_absolute_error",
                  "categorical_crossentropy","binary_crossentropy".
        """
        if self._metric=='roc-auc':
            metric = roc_auc_score(y_true, y_pred)
        elif self._metric=='norm-gini':
            metric = (2 * roc_auc_score(y_true, y_pred)) - 1
        elif self._metric=='mean_squared_error':
            metric = K.eval(mean_squared_error(K.variable(y_true), K.variable(y_pred)))
        elif self._metric=='mean_absolute_error':
            metric = K.eval(mean_absolute_error(K.variable(y_true), K.variable(y_pred)))
        elif self._metric=='categorical_crossentropy':
            metric = K.eval(categorical_crossentropy(K.variable(y_true), K.variable(y_pred)))
        elif self._metric=='binary_crossentropy':
            metric = K.eval(binary_crossentropy(K.variable(y_true), K.variable(y_pred)))
        else:
            raise ValueError('Invalid value for "custom_eval_stopping["name"], "roc-auc","norm-gini","mean_squared_error", \
                             "mean_absolute_error","categorical_crossentropy","binary_crossentropy" supported.')
        return metric 
Example 3
Project: kaggle-competitions-framework   Author: sergeyshilin   File: keras_nn.py    MIT License 6 votes vote down vote up
def dense_nn_model():
    layer_size = [16, 8, 8, 4]
    model = Sequential()

    for i, nodes in enumerate(layer_size):
        model.add(Dense(
            nodes,
            activation='tanh',
            kernel_initializer='glorot_uniform',
            kernel_regularizer=regularizers.l2(0.01),
            name='dense_{}'.format(i)))
        model.add(Dropout(rate=0.1))

    model.add(Dense(1, kernel_initializer='glorot_uniform',
        activation='sigmoid', name='output'))

    model.compile(loss='binary_crossentropy', optimizer='adam',
        metrics=[keras_auc, 'accuracy'])
    return model 
Example 4
Project: kpi2017   Author: deepmipt   File: model.py    Apache License 2.0 6 votes vote down vote up
def _init_from_saved(self, fname):

        with open(fname + '_opt.json', 'r') as opt_file:
            self.opt = json.load(opt_file)

        if self.model_type == 'nn':
            if self.model_name == 'cnn_word':
                self.model = self.cnn_word_model()
            if self.model_name == 'lstm_word':
                self.model = self.lstm_word_model()

            optimizer = Adam(lr=self.opt['learning_rate'], decay=self.opt['learning_decay'])
            self.model.compile(loss='binary_crossentropy',
                               optimizer=optimizer,
                               metrics=['binary_accuracy'])
            print('[ Loading model weights %s ]' % fname)
            self.model.load_weights(fname + '.h5')

        if self.model_type == 'ngrams':
            with open(fname + '_cls.pkl', 'rb') as model_file:
                self.model = pickle.load(model_file)
            print('CLS:', self.model) 
Example 5
Project: kpi2017   Author: deepmipt   File: model.py    Apache License 2.0 6 votes vote down vote up
def update(self, batch):
        x, y = batch
        y = np.array(y)
        y_pred = None

        if self.model_type == 'nn':
            self.train_loss, self.train_acc = self.model.train_on_batch(x, y)
            y_pred = self.model.predict_on_batch(x).reshape(-1)
            self.train_auc = roc_auc_score(y, y_pred)

        if self.model_type == 'ngrams':
            x = vectorize_select_from_data(x, self.vectorizers, self.selectors)
            self.model.fit(x, y.reshape(-1))
            y_pred = np.array(self.model.predict_proba(x)[:,1]).reshape(-1)
            y_pred_tensor = K.constant(y_pred, dtype='float64')
            self.train_loss = K.eval(binary_crossentropy(y.astype('float'), y_pred_tensor))
            self.train_acc = K.eval(binary_accuracy(y.astype('float'), y_pred_tensor))
            self.train_auc = roc_auc_score(y, y_pred)
        self.updates += 1
        return y_pred 
Example 6
Project: talos   Author: autonomio   File: params.py    MIT License 6 votes vote down vote up
def breast_cancer():

    from keras.optimizers import Adam, Nadam, RMSprop
    from keras.losses import logcosh, binary_crossentropy
    from keras.activations import relu, elu, sigmoid

    # then we can go ahead and set the parameter space
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16, 32, 64],
         'hidden_layers': [0, 1, 2],
         'batch_size': (2, 30, 10),
         'epochs': [50, 100, 150],
         'dropout': (0, 0.5, 5),
         'shapes': ['brick', 'triangle', 'funnel'],
         'optimizer': [Adam, Nadam, RMSprop],
         'losses': [logcosh, binary_crossentropy],
         'activation': [relu, elu],
         'last_activation': [sigmoid]}

    return p 
Example 7
Project: ae-review-resources   Author: fdavidcl   File: utils.py    Mozilla Public License 2.0 6 votes vote down vote up
def contractive_loss(model, rec_err = losses.binary_crossentropy, tanh = True, lam = 2e-4):
    # derive either tanh or sigmoid
    der_act = (lambda h: 1 - h * h) if tanh else (lambda: h * (1 - h))
    
    def loss(y_pred, y_true):
        rec = rec_err(y_pred, y_true)

        W = K.variable(value=model.get_layer('encoded').get_weights()[0])  # N x N_hidden
        W = K.transpose(W)  # N_hidden x N
        h = model.get_layer('encoded').output
        dh = der_act(h)  # N_batch x N_hidden
        
        # N_batch x N_hidden * N_hidden x 1 = N_batch x 1
        contractive = lam * K.sum(dh**2 * K.sum(W**2, axis=1), axis=1)
        
        return rec + contractive

    return loss 
Example 8
Project: deepArt-generation   Author: tuanle618   File: vae.py    MIT License 6 votes vote down vote up
def vae_loss(self, use_mse):
        if use_mse:
            reconstruction_loss = mse(K.flatten(self.vae_input),
                                      K.flatten(self.vae_output))
        else:
            reconstruction_loss = binary_crossentropy(K.flatten(self.vae_input),
                                                      K.flatten(self.vae_output))

        reconstruction_loss *= self.rows * self.cols
        ## kullback-leibler divergence in closed form
        kl_loss = 1 + self.z_log_var - K.square(self.z_mean) - K.exp(self.z_log_var)
        kl_loss = K.sum(kl_loss, axis=-1)
        #kl_loss = K.mean(kl_loss, axis=-1)
        kl_loss *= -0.5
        vae_loss = K.mean(reconstruction_loss +  kl_loss)

        return vae_loss

    ## Helper for scaling and unscaling: 
Example 9
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    MIT License 5 votes vote down vote up
def online_bootstrapping(y_true, y_pred, pixels=512, threshold=0.5):
    """ Implements nline Bootstrapping crossentropy loss, to train only on hard pixels,
        see  https://arxiv.org/abs/1605.06885 Bridging Category-level and Instance-level Semantic Image Segmentation
        The implementation is a bit different as we use binary crossentropy instead of softmax
        SUPPORTS ONLY MINIBATCH WITH 1 ELEMENT!
    # Arguments
        y_true: A tensor with labels.

        y_pred: A tensor with predicted probabilites.

        pixels: number of hard pixels to keep

        threshold: confidence to use, i.e. if threshold is 0.7, y_true=1, prediction=0.65 then we consider that pixel as hard
    # Returns
        Mean loss value
    """
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    difference = K.abs(y_true - y_pred)

    values, indices = K.tf.nn.top_k(difference, sorted=True, k=pixels)
    min_difference = (1 - threshold)
    y_true = K.tf.gather(K.gather(y_true, indices), K.tf.where(values > min_difference))
    y_pred = K.tf.gather(K.gather(y_pred, indices), K.tf.where(values > min_difference))

    return K.mean(K.binary_crossentropy(y_true, y_pred)) 
Example 10
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    MIT License 5 votes vote down vote up
def bce_border(y_true, y_pred):
    border = get_border_mask((21, 21), y_true)

    border = K.flatten(border)
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    y_true_f = K.tf.gather(y_true_f, K.tf.where(border > 0.5))
    y_pred_f = K.tf.gather(y_pred_f, K.tf.where(border > 0.5))

    return binary_crossentropy(y_true_f, y_pred_f) 
Example 11
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    MIT License 5 votes vote down vote up
def make_loss(loss_name):
    if loss_name == 'crossentropy':
        return K.binary_crossentropy
    elif loss_name == 'crossentropy_boot':
        def loss(y, p):
            return bootstrapped_crossentropy(y, p, 'hard', 0.9)
        return loss
    elif loss_name == 'dice':
        return dice_coef_loss
    elif loss_name == 'bce_dice':
        def loss(y, p):
            return dice_coef_loss_bce(y, p, dice=0.8, bce=0.2, bootstrapping='soft', alpha=1)

        return loss
    elif loss_name == 'boot_soft':
        def loss(y, p):
            return dice_coef_loss_bce(y, p, dice=0.8, bce=0.2, bootstrapping='soft', alpha=0.95)

        return loss
    elif loss_name == 'boot_hard':
        def loss(y, p):
            return dice_coef_loss_bce(y, p, dice=0.8, bce=0.2, bootstrapping='hard', alpha=0.95)

        return loss
    elif loss_name == 'online_bootstrapping':
        def loss(y, p):
            return online_bootstrapping(y, p, pixels=512 * 64, threshold=0.7)

        return loss
    elif loss_name == 'dice_coef_loss_border':
        return dice_coef_loss_border
    elif loss_name == 'bce_dice_loss_border':
        return bce_dice_loss_border
    else:
        ValueError("Unknown loss.") 
Example 12
Project: ColiCoords   Author: Jhsmit   File: losses.py    MIT License 5 votes vote down vote up
def bce_dice_loss(y_true, y_pred):
    loss = binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
    return loss 
Example 13
Project: Keras-FCN-template   Author: MchZys   File: losses.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def bce_dice_loss(y_true, y_pred):
    return binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred) 
Example 14
Project: Keras-FCN-template   Author: MchZys   File: losses.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def bce_logdice_loss(y_true, y_pred):
    return binary_crossentropy(y_true, y_pred) - K.log(1. - dice_loss(y_true, y_pred)) 
Example 15
Project: Road_Segmentation_ML   Author: TaoSunVoyage   File: losses.py    MIT License 5 votes vote down vote up
def bce_dice_loss(y_true, y_pred):
    loss = binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
    return loss 
Example 16
Project: adversarial-variational-bayes   Author: gdikov   File: losses.py    MIT License 5 votes vote down vote up
def discriminator_loss(discrim_output_prior, discrim_output_posterior, from_logits=False):
        if from_logits:
            discrim_output_posterior = ker.sigmoid(discrim_output_posterior)
            discrim_output_prior = ker.sigmoid(discrim_output_prior)
        # The dicriminator loss is the GAN loss with input from the prior and posterior distributions
        discriminator_loss = ker.mean(binary_crossentropy(y_pred=discrim_output_posterior,
                                                          y_true=ker.ones_like(discrim_output_posterior))
                                      + binary_crossentropy(y_pred=discrim_output_prior,
                                                            y_true=ker.zeros_like(discrim_output_prior)))
        return discriminator_loss 
Example 17
Project: thyroid_segmentation   Author: suryatejadev   File: loss.py    MIT License 5 votes vote down vote up
def bin_crossentropy_loss(y_true, y_pred):
    return binary_crossentropy(y_true, y_pred) 
Example 18
Project: kpi2017   Author: deepmipt   File: model.py    Apache License 2.0 5 votes vote down vote up
def _init_from_scratch(self):
        if self.model_name == 'log_reg':
            self.model = self.log_reg_model()
        if self.model_name == 'svc':
            self.model = self.svc_model()
        if self.model_name == 'cnn_word':
            self.model = self.cnn_word_model()
        if self.model_name == 'lstm_word':
            self.model = self.lstm_word_model()

        if self.model_type == 'nn':
            optimizer = Adam(lr=self.opt['learning_rate'], decay=self.opt['learning_decay'])
            self.model.compile(loss='binary_crossentropy',
                               optimizer=optimizer,
                               metrics=['binary_accuracy']) 
Example 19
Project: keras-pandas   Author: bjherger   File: Boolean.py    MIT License 5 votes vote down vote up
def output_suggested_loss(self):
        self._check_output_support()
        suggested_loss = losses.binary_crossentropy
        return suggested_loss 
Example 20
Project: image-segmentation   Author: nearthlab   File: semantic_model_wrapper.py    MIT License 5 votes vote down vote up
def bce_loss_graph(gt, pr):
    return K.mean(binary_crossentropy(gt, pr)) 
Example 21
Project: image-segmentation   Author: nearthlab   File: semantic_model_wrapper.py    MIT License 5 votes vote down vote up
def bce_loss_graph(gt, pr):
    return K.mean(binary_crossentropy(gt, pr))

############################################################
#  Semantic Segmentation Model Class
############################################################ 
Example 22
Project: ECG_Heartbeat_Classification   Author: CVxTz   File: baseline_ptbdb_transfer_fullupdate.py    MIT License 5 votes vote down vote up
def get_model():
    nclass = 1
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass, activation=activations.sigmoid, name="dense_3_ptbdb")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt, loss=losses.binary_crossentropy, metrics=['acc'])
    model.summary()
    return model 
Example 23
Project: ECG_Heartbeat_Classification   Author: CVxTz   File: baseline_ptbdb.py    MIT License 5 votes vote down vote up
def get_model():
    nclass = 1
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass, activation=activations.sigmoid, name="dense_3_ptbdb")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt, loss=losses.binary_crossentropy, metrics=['acc'])
    model.summary()
    return model 
Example 24
Project: ECG_Heartbeat_Classification   Author: CVxTz   File: baseline_ptbdb_transfer_freeze.py    MIT License 5 votes vote down vote up
def get_model():
    nclass = 1
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid", trainable=False)(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass, activation=activations.sigmoid, name="dense_3_ptbdb")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt, loss=losses.binary_crossentropy, metrics=['acc'])
    model.summary()
    return model 
Example 25
Project: DeepMicro   Author: minoh0201   File: DNN_models.py    MIT License 5 votes vote down vote up
def mlp_model(input_dim, numHiddenLayers=3, numUnits=64, dropout_rate=0.5):

    model = Sequential()

    #Check number of hidden layers
    if numHiddenLayers >= 1:
        # First Hidden layer
        model.add(Dense(numUnits, input_dim=input_dim, activation='relu'))
        model.add(Dropout(dropout_rate))

        # Second to the last hidden layers
        for i in range(numHiddenLayers - 1):
            numUnits = numUnits // 2
            model.add(Dense(numUnits, activation='relu'))
            model.add(Dropout(dropout_rate))

        # output layer
        model.add(Dense(1, activation='sigmoid'))

    else:
        # output layer
        model.add(Dense(1, input_dim=input_dim, activation='sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='adam', )#metrics=['accuracy'])

    return model


# Autoencoder 
Example 26
Project: talos   Author: autonomio   File: params.py    MIT License 5 votes vote down vote up
def titanic():

    # here use a standard 2d dictionary for inputting the param boundaries
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16],
         'batch_size': [20, 30, 40],
         'dropout': (0, 0.5, 5),
         'optimizer': ['Adam', 'Nadam'],
         'losses': ['logcosh', 'binary_crossentropy'],
         'activation': ['relu', 'elu'],
         'last_activation': ['sigmoid']}

    return p 
Example 27
Project: dama_ml   Author: elaeon   File: w_keras.py    Apache License 2.0 5 votes vote down vote up
def vae_loss(num_features=None, z_log_var=None, z_mean=None):
    def vae_loss(x, outputs):
        reconstruction_loss = binary_crossentropy(x, outputs)# * num_features
        kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
        kl_loss = K.sum(kl_loss, axis=-1) * -0.5
        return K.mean(reconstruction_loss + kl_loss)
    return vae_loss 
Example 28
Project: deid-training-data   Author: maxfriedrich   File: adversarial.py    MIT License 5 votes vote down vote up
def adversarial_objective(y_true, y_pred):
    loss = binary_crossentropy(y_true, y_pred)
    random_guessing = -K.log(0.5)
    return K.abs(loss - random_guessing) 
Example 29
Project: deid-training-data   Author: maxfriedrich   File: discriminator.py    MIT License 5 votes vote down vote up
def discriminator_loss(y_true, y_pred):
    """ Compares the actual binary crossentropy loss to the random guessing loss (0.6931..., accuracy 0.5) and returns
    the maximum. This is motivated by the fact that our adversarial discriminators should not be worse than random
    guessing, otherwise we could just flip every prediction and get a better discriminator.
    """
    loss = binary_crossentropy(y_true, y_pred)
    random_guessing = -K.log(0.5)
    return K.maximum(loss, random_guessing) 
Example 30
Project: hyperparameter_hunter   Author: HunterMcGushion   File: test_keras_helper.py    MIT License 5 votes vote down vote up
def dummy_0_build_fn(input_shape=(30,)):
    model = Sequential(
        [
            Dense(50, kernel_initializer="uniform", input_shape=input_shape, activation="relu"),
            Dropout(0.5),
            Dense(1, kernel_initializer="uniform", activation="sigmoid"),
        ]
    )
    model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
    return model 
Example 31
Project: malignancy_detection   Author: CVxTz   File: models.py    MIT License 5 votes vote down vote up
def get_model_classif_nasnet():
    inputs = Input((96, 96, 3))
    base_model = NASNetMobile(include_top=False, input_shape=(96, 96, 3))#, weights=None
    x = base_model(inputs)
    out1 = GlobalMaxPooling2D()(x)
    out2 = GlobalAveragePooling2D()(x)
    out3 = Flatten()(x)
    out = Concatenate(axis=-1)([out1, out2, out3])
    out = Dropout(0.5)(out)
    out = Dense(1, activation="sigmoid", name="3_")(out)
    model = Model(inputs, out)
    model.compile(optimizer=Adam(0.0001), loss=binary_crossentropy, metrics=['acc'])
    model.summary()

    return model 
Example 32
Project: ae-review-resources   Author: fdavidcl   File: cancer.py    Mozilla Public License 2.0 5 votes vote down vote up
def train(self, optimizer = "rmsprop", loss = losses.binary_crossentropy, epochs = 50):
        # Here, we use binary crossentropy as loss function
        # since the output of our model is in the interval [0,1]
        # and our data is normalized.
        # Otherwise we could use 'mean_squared_error'
        if self.autoencoder.robust:
            loss = correntropy_loss()
            
        if self.autoencoder.contractive:
            loss = contractive_loss(self.autoencoder.model, rec_err = loss)

        self.autoencoder.model.compile(optimizer = optimizer,
                                       loss = loss)

        # train
        history = LossHistory()
        if self.autoencoder.denoising:
            for ep in range(epochs):
                noisy_train = noise_input(self.x_train)
                self.autoencoder.model.fit(noisy_train, self.x_train,
                                           epochs = 1,
                                           batch_size = 256,
                                           shuffle = True,
                                           callbacks=[history])
        else:
            self.autoencoder.model.fit(self.x_train, self.x_train,
                                       epochs = epochs,
                                       batch_size = 256,
                                       shuffle = True,
                                       callbacks=[history])

        self.name = "{}-{}".format(
            optimizer,
            "mse" if loss == losses.mean_squared_error else ("xent" if loss == losses.binary_crossentropy or self.autoencoder.contractive else "corr")
        )

        with open("cancer-{}-{}.csv".format(self.autoencoder.name, self.name), "w") as out_file:
            out_file.write(",".join(("{}".format(x) for x in history.losses)))

        return self 
Example 33
Project: chazzbot   Author: RamboFisk   File: dnn_model.py    GNU General Public License v3.0 5 votes vote down vote up
def model_creator(leng=448, layer_size=512):
  #linear network
  model = Sequential()

  #model.add(Embedding(3+1, 8, input_length=leng))
  #model.add(Flatten())

  model.add(Dense(units=layer_size, kernel_initializer='normal', activation="relu", input_dim=leng))

  model.add(BatchNormalization())
  #model.add(Dropout(0.2))  
  model.add(Dense(units=layer_size, kernel_initializer='normal', activation="relu"))
  #model.add(Dropout(0.2))
  model.add(Dense(units=layer_size, kernel_initializer='normal', activation="relu"))
  #model.add(Dropout(0.2))
  model.add(Dense(units=layer_size, kernel_initializer='normal', activation="relu"))
  model.add(Dense(units=1, activation="sigmoid"))

  #opt = SGD(lr=0.0001, momentum=0.9, nesterov=True) 
  opt = Adam(lr=0.001)
  model.compile(loss=losses.binary_crossentropy,
                optimizer=opt,
                metrics=['accuracy'])

  print(model.summary())
  return model 
Example 34
Project: DeepPavlov   Author: deepmipt   File: keras_siamese_model.py    Apache License 2.0 5 votes vote down vote up
def compile(self) -> None:
        optimizer = Adam(lr=self.learning_rate)
        loss = losses.binary_crossentropy
        self.model.compile(loss=loss, optimizer=optimizer) 
Example 35
Project: DeepPavlov   Author: deepmipt   File: bilstm_siamese_network.py    Apache License 2.0 5 votes vote down vote up
def compile(self) -> None:
        optimizer = Adam(lr=self.learning_rate)
        if self.triplet_mode:
            loss = self._triplet_loss
        else:
            loss = losses.binary_crossentropy
        self.model.compile(loss=loss, optimizer=optimizer)
        self.score_model = self.create_score_model() 
Example 36
Project: Python-Deep-Learning-SE   Author: ivan-vasilev   File: chapter_06_001.py    MIT License 4 votes vote down vote up
def build_vae(intermediate_dim=512, latent_dim=2):
    """
    Build VAE
    :param intermediate_dim: size of hidden layers of the encoder/decoder
    :param latent_dim: latent space size
    :returns tuple: the encoder, the decoder, and the full vae
    """

    # encoder first
    inputs = Input(shape=(image_size,), name='encoder_input')
    x = Dense(intermediate_dim, activation='relu')(inputs)

    # latent mean and variance
    z_mean = Dense(latent_dim, name='z_mean')(x)
    z_log_var = Dense(latent_dim, name='z_log_var')(x)

    # reparametrization trick for random sampling
    # Note the use of the Lambda layer
    # At runtime, it will call the sampling function
    z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])

    # full encoder encoder model
    encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
    encoder.summary()

    # decoder
    latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
    x = Dense(intermediate_dim, activation='relu')(latent_inputs)
    outputs = Dense(image_size, activation='sigmoid')(x)

    # full decoder model
    decoder = Model(latent_inputs, outputs, name='decoder')
    decoder.summary()

    # VAE model
    outputs = decoder(encoder(inputs)[2])
    vae = Model(inputs, outputs, name='vae')

    # Loss function
    # we start wit the reconstruction loss
    reconstruction_loss = binary_crossentropy(inputs, outputs) * image_size

    # next is the KL divergence
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5

    # we combine them in a total loss
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)

    return encoder, decoder, vae 
Example 37
Project: musical-onset-efficient   Author: ronggong   File: bock_crnn_basecode.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def loss_cal(fns, data_path, scaler, model, len_seq):
    """
    Calculate loss
    :param fns:
    :param data_path:
    :param scaler:
    :param model:
    :return:
    """
    y_pred_val_all = np.array([], dtype='float32')
    label_val_all = np.array([], dtype='int')

    for fn in fns:

        mfcc_line, label, sample_weights = featureLabelSampleWeightsLoad(data_path,
                                                                         fn,
                                                                         scaler)

        # pad sequence
        mfcc_line_pad, label_pad, sample_weights_pad, len_padded = \
            featureLabelSampleWeightsPad(mfcc_line, label, sample_weights, len_seq)

        iter_time = len(mfcc_line_pad) / len_seq
        for ii_iter in range(iter_time):

            # create tensor from the padded line
            mfcc_line_tensor, label_tensor, _ = \
                createInputTensor(mfcc_line_pad, label_pad, sample_weights_pad, len_seq, ii_iter)

            y_pred = model.predict_on_batch(mfcc_line_tensor)

            # remove the padded samples
            if ii_iter == iter_time - 1 and len_padded > 0:
                y_pred = y_pred[:, :len_seq - len_padded, :]
                label_tensor = label_tensor[:, :len_seq - len_padded, :]

            # reduce the label dimension
            y_pred = y_pred.reshape((y_pred.shape[1],))
            label_tensor = label_tensor.reshape((label_tensor.shape[1],))

            y_pred_val_all = np.append(y_pred_val_all, y_pred)
            label_val_all = np.append(label_val_all, label_tensor)

    y_true = K.variable(label_val_all)
    y_pred = K.variable(y_pred_val_all)

    loss = K.eval(binary_crossentropy(y_true, y_pred))

    return loss 
Example 38
Project: musical-onset-efficient   Author: ronggong   File: jingju_crnn_basecode.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def loss_cal(fns, data_path, scaler, model, len_seq):
    """
    Calculate loss
    :param fns:
    :param data_path:
    :param scaler:
    :param model:
    :return:
    """
    y_pred_val_all = np.array([], dtype='float32')
    label_val_all = np.array([], dtype='int')

    for fn in fns:

        mfcc_line, label, sample_weights = featureLabelSampleWeightsLoad(data_path,
                                                                         fn,
                                                                         scaler)

        # pad sequence
        mfcc_line_pad, label_pad, sample_weights_pad, len_padded = \
            featureLabelSampleWeightsPad(mfcc_line, label, sample_weights, len_seq)

        iter_time = len(mfcc_line_pad) / len_seq
        for ii_iter in range(iter_time):

            # create tensor from the padded line
            mfcc_line_tensor, label_tensor, _ = \
                createInputTensor(mfcc_line_pad, label_pad, sample_weights_pad, len_seq, ii_iter)

            y_pred = model.predict_on_batch(mfcc_line_tensor)

            # remove the padded samples
            if ii_iter == iter_time - 1 and len_padded > 0:
                y_pred = y_pred[:, :len_seq - len_padded, :]
                label_tensor = label_tensor[:, :len_seq - len_padded, :]

            # reduce the label dimension
            y_pred = y_pred.reshape((y_pred.shape[1],))
            label_tensor = label_tensor.reshape((label_tensor.shape[1],))

            y_pred_val_all = np.append(y_pred_val_all, y_pred)
            label_val_all = np.append(label_val_all, label_tensor)

    y_true = K.variable(label_val_all)
    y_pred = K.variable(y_pred_val_all)

    loss = K.eval(binary_crossentropy(y_true, y_pred))

    return loss 
Example 39
Project: Variational-AutoEncoder-For-Novelty-Detection   Author: LordAlucard90   File: model.py    GNU General Public License v3.0 4 votes vote down vote up
def _get_standard(self):
        input_img = Input(shape=(28, 28, 1))
        encoder = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)

        encoder_branch_left = MaxPooling2D((2, 2), padding='same')(encoder)
        encoder_branch_left = Conv2D(16, (3, 3), activation='relu', padding='same')(encoder_branch_left)
        encoder_branch_left = MaxPooling2D((2, 2), padding='same')(encoder_branch_left)
        encoder_branch_left = Conv2D(16, (3, 3), activation='relu', padding='same')(encoder_branch_left)
        encoder_branch_left = MaxPooling2D((2, 2), padding='same')(encoder_branch_left)

        encoder_branch_right = AveragePooling2D((2, 2), padding='same')(encoder)
        encoder_branch_right = Conv2D(16, (3, 3), activation='relu', padding='same')(encoder_branch_right)
        encoder_branch_right = AveragePooling2D((2, 2), padding='same')(encoder_branch_right)
        encoder_branch_right = Conv2D(16, (3, 3), activation='relu', padding='same')(encoder_branch_right)
        encoder_branch_right = AveragePooling2D((2, 2), padding='same')(encoder_branch_right)

        encoder_out = Flatten()(Concatenate()([encoder_branch_left, encoder_branch_right]))
        encoder_out = Dense(128, activation='relu')(encoder_out)

        if self.vae:
            mean = Dense(self.hidden, name='mean')(encoder_out)
            log_var = Dense(self.hidden, name='log_var')(encoder_out)
            mirror = Lambda(self._sampling)([mean, log_var])
        else:
            mirror = Dense(self.hidden, name='log_var')(encoder_out)

        decoder = Dense(128, activation='relu')(mirror)
        decoder = Dense(16 * 4 * 4, activation='relu')(decoder)
        decoder = Reshape((4, 4, 16))(decoder)

        decoder_branch_left = Conv2D(16, (3, 3), activation='relu', padding='same')(decoder)
        decoder_branch_left = UpSampling2D((2, 2))(decoder_branch_left)
        decoder_branch_left = Conv2D(16, (3, 3), activation='relu', padding='same')(decoder_branch_left)
        decoder_branch_left = UpSampling2D((2, 2))(decoder_branch_left)
        decoder_branch_left = Conv2D(16, (3, 3), activation='relu')(decoder_branch_left)
        decoder_branch_left = UpSampling2D((2, 2))(decoder_branch_left)
        decoder_branch_left = Conv2D(16, (3, 3), activation='relu', padding='same')(decoder_branch_left)

        decoder_branch_right = Conv2DTranspose(16, (3, 3), activation='relu')(decoder)
        decoder_branch_right = UpSampling2D((2, 2))(decoder_branch_right)
        decoder_branch_right = Conv2DTranspose(16, (3, 3), activation='relu')(decoder_branch_right)
        decoder_branch_right = UpSampling2D((2, 2))(decoder_branch_right)
        decoder_branch_right = Conv2DTranspose(16, (3, 3), activation='relu', padding='same')(decoder_branch_right)

        out = Concatenate()([decoder_branch_left, decoder_branch_right])
        out = Conv2D(16, (3, 3), activation='relu', padding='same')(out)
        out_img = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(out)

        self.model = Model(input_img, out_img)

        if self.vae:
            def my_loss(y_true, y_pred):
                xent = 28 * 28 * binary_crossentropy(K.flatten(y_true), K.flatten(y_pred))
                kl = - 0.5 * K.sum(1 + log_var - K.square(mean) - K.exp(log_var), axis=-1)
                return K.mean(xent + kl)
        else:
            def my_loss(y_true, y_pred):
                return 28 * 28 * binary_crossentropy(K.flatten(y_true), K.flatten(y_pred))

        self.model.compile(optimizer='rmsprop', loss=my_loss) 
Example 40
Project: medical_image_segmentation   Author: CVxTz   File: baseline_aug.py    MIT License 4 votes vote down vote up
def get_unet(do=0, activation=ReLU):
    inputs = Input((None, None, 3))
    conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(inputs)))
    conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv1)))
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(pool1)))
    conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv2)))
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(pool2)))
    conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv3)))
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(pool3)))
    conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv4)))
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(pool4)))
    conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(conv5)))

    up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
    conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(up6)))
    conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv6)))

    up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
    conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(up7)))
    conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv7)))

    up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
    conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(up8)))
    conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv8)))

    up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
    conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(up9)))
    conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv9)))

    conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)

    model = Model(inputs=[inputs], outputs=[conv10])

    model.compile(optimizer=Adam(lr=1e-3), loss=losses.binary_crossentropy, metrics=['accuracy'])


    return model 
Example 41
Project: medical_image_segmentation   Author: CVxTz   File: baseline.py    MIT License 4 votes vote down vote up
def get_unet(do=0, activation=ReLU):
    inputs = Input((None, None, 3))
    conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(inputs)))
    conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv1)))
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(pool1)))
    conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv2)))
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(pool2)))
    conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv3)))
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(pool3)))
    conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv4)))
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(pool4)))
    conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(conv5)))

    up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
    conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(up6)))
    conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv6)))

    up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
    conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(up7)))
    conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv7)))

    up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
    conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(up8)))
    conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv8)))

    up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
    conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(up9)))
    conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv9)))

    conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)

    model = Model(inputs=[inputs], outputs=[conv10])

    model.compile(optimizer=Adam(lr=1e-3), loss=losses.binary_crossentropy, metrics=['accuracy'])


    return model 
Example 42
Project: Python-Deep-Learning-Second-Edition   Author: PacktPublishing   File: chapter_06_001.py    MIT License 4 votes vote down vote up
def build_vae(intermediate_dim=512, latent_dim=2):
    """
    Build VAE
    :param intermediate_dim: size of hidden layers of the encoder/decoder
    :param latent_dim: latent space size
    :returns tuple: the encoder, the decoder, and the full vae
    """

    # encoder first
    inputs = Input(shape=(image_size,), name='encoder_input')
    x = Dense(intermediate_dim, activation='relu')(inputs)

    # latent mean and variance
    z_mean = Dense(latent_dim, name='z_mean')(x)
    z_log_var = Dense(latent_dim, name='z_log_var')(x)

    # reparametrization trick for random sampling
    # Note the use of the Lambda layer
    # At runtime, it will call the sampling function
    z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])

    # full encoder encoder model
    encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
    encoder.summary()

    # decoder
    latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
    x = Dense(intermediate_dim, activation='relu')(latent_inputs)
    outputs = Dense(image_size, activation='sigmoid')(x)

    # full decoder model
    decoder = Model(latent_inputs, outputs, name='decoder')
    decoder.summary()

    # VAE model
    outputs = decoder(encoder(inputs)[2])
    vae = Model(inputs, outputs, name='vae')

    # Loss function
    # we start wit the reconstruction loss
    reconstruction_loss = binary_crossentropy(inputs, outputs) * image_size

    # next is the KL divergence
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5

    # we combine them in a total loss
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)

    return encoder, decoder, vae 
Example 43
Project: ae-review-resources   Author: fdavidcl   File: mnist.py    Mozilla Public License 2.0 4 votes vote down vote up
def train(self, optimizer = "rmsprop", loss = losses.binary_crossentropy, epochs = 50):
        # Here, we use binary crossentropy as loss function
        # since the output of our model is in the interval [0,1]
        # and our data is normalized.
        # Otherwise we could use 'mean_squared_error'
        if self.autoencoder.robust:
            loss = correntropy_loss()
            
        if self.autoencoder.contractive:
            loss = contractive_loss(self.autoencoder.model, rec_err = loss)

        self.autoencoder.model.compile(optimizer = optimizer,
                                       loss = loss)

        # train
        history = LossHistory()
        if self.autoencoder.denoising:
            for ep in range(epochs):
                noisy_train = noise_input(self.x_train)
                self.autoencoder.model.fit(noisy_train, self.x_train,
                                           epochs = 1,
                                           batch_size = 256,
                                           shuffle = True,
                                           callbacks=[history],
                                           validation_data = (self.x_test, self.x_test))
        else:
            self.autoencoder.model.fit(self.x_train, self.x_train,
                                       epochs = epochs,
                                       batch_size = 256,
                                       shuffle = True,
                                       callbacks=[history],
                                       validation_data = (self.x_test, self.x_test))

        self.name = "{}-{}".format(
            optimizer,
            "mse" if loss == losses.mean_squared_error else ("xent" if loss == losses.binary_crossentropy or self.autoencoder.contractive else "corr")
        )

        with open("{}-{}.csv".format(self.autoencoder.name, self.name), "w") as out_file:
            out_file.write(",".join(("{}".format(x) for x in history.losses)))

        return self