Python keras.losses.binary_crossentropy() Examples

The following are 30 code examples for showing how to use keras.losses.binary_crossentropy(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.losses , or try the search function .

Example 1
Project: talos   Author: autonomio   File: params.py    License: MIT License 6 votes vote down vote up
def breast_cancer():

    from keras.optimizers import Adam, Nadam, RMSprop
    from keras.losses import logcosh, binary_crossentropy
    from keras.activations import relu, elu, sigmoid

    # then we can go ahead and set the parameter space
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16, 32, 64],
         'hidden_layers': [0, 1, 2],
         'batch_size': (2, 30, 10),
         'epochs': [50, 100, 150],
         'dropout': (0, 0.5, 5),
         'shapes': ['brick', 'triangle', 'funnel'],
         'optimizer': [Adam, Nadam, RMSprop],
         'losses': [logcosh, binary_crossentropy],
         'activation': [relu, elu],
         'last_activation': [sigmoid]}

    return p 
Example 2
Project: cs-ranking   Author: kiudee   File: fate_linear.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(
        self,
        n_hidden_set_units=32,
        learning_rate=1e-3,
        batch_size=256,
        loss_function=binary_crossentropy,
        epochs_drop=300,
        drop=0.1,
        random_state=None,
        **kwargs,
    ):
        self.n_hidden_set_units = n_hidden_set_units
        self.learning_rate = learning_rate
        self.batch_size = batch_size
        self.random_state = random_state
        self.loss_function = loss_function
        self.epochs_drop = epochs_drop
        self.drop = drop
        self.current_lr = None
        self.weight1 = None
        self.bias1 = None
        self.weight2 = None
        self.bias2 = None
        self.optimizer = None 
Example 3
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    License: MIT License 5 votes vote down vote up
def online_bootstrapping(y_true, y_pred, pixels=512, threshold=0.5):
    """ Implements nline Bootstrapping crossentropy loss, to train only on hard pixels,
        see  https://arxiv.org/abs/1605.06885 Bridging Category-level and Instance-level Semantic Image Segmentation
        The implementation is a bit different as we use binary crossentropy instead of softmax
        SUPPORTS ONLY MINIBATCH WITH 1 ELEMENT!
    # Arguments
        y_true: A tensor with labels.

        y_pred: A tensor with predicted probabilites.

        pixels: number of hard pixels to keep

        threshold: confidence to use, i.e. if threshold is 0.7, y_true=1, prediction=0.65 then we consider that pixel as hard
    # Returns
        Mean loss value
    """
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    difference = K.abs(y_true - y_pred)

    values, indices = K.tf.nn.top_k(difference, sorted=True, k=pixels)
    min_difference = (1 - threshold)
    y_true = K.tf.gather(K.gather(y_true, indices), K.tf.where(values > min_difference))
    y_pred = K.tf.gather(K.gather(y_pred, indices), K.tf.where(values > min_difference))

    return K.mean(K.binary_crossentropy(y_true, y_pred)) 
Example 4
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    License: MIT License 5 votes vote down vote up
def bce_border(y_true, y_pred):
    border = get_border_mask((21, 21), y_true)

    border = K.flatten(border)
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    y_true_f = K.tf.gather(y_true_f, K.tf.where(border > 0.5))
    y_pred_f = K.tf.gather(y_pred_f, K.tf.where(border > 0.5))

    return binary_crossentropy(y_true_f, y_pred_f) 
Example 5
Project: kaggle-carvana-2017   Author: killthekitten   File: losses.py    License: MIT License 5 votes vote down vote up
def make_loss(loss_name):
    if loss_name == 'crossentropy':
        return K.binary_crossentropy
    elif loss_name == 'crossentropy_boot':
        def loss(y, p):
            return bootstrapped_crossentropy(y, p, 'hard', 0.9)
        return loss
    elif loss_name == 'dice':
        return dice_coef_loss
    elif loss_name == 'bce_dice':
        def loss(y, p):
            return dice_coef_loss_bce(y, p, dice=0.8, bce=0.2, bootstrapping='soft', alpha=1)

        return loss
    elif loss_name == 'boot_soft':
        def loss(y, p):
            return dice_coef_loss_bce(y, p, dice=0.8, bce=0.2, bootstrapping='soft', alpha=0.95)

        return loss
    elif loss_name == 'boot_hard':
        def loss(y, p):
            return dice_coef_loss_bce(y, p, dice=0.8, bce=0.2, bootstrapping='hard', alpha=0.95)

        return loss
    elif loss_name == 'online_bootstrapping':
        def loss(y, p):
            return online_bootstrapping(y, p, pixels=512 * 64, threshold=0.7)

        return loss
    elif loss_name == 'dice_coef_loss_border':
        return dice_coef_loss_border
    elif loss_name == 'bce_dice_loss_border':
        return bce_dice_loss_border
    else:
        ValueError("Unknown loss.") 
Example 6
Project: keras-pandas   Author: bjherger   File: Boolean.py    License: MIT License 5 votes vote down vote up
def output_suggested_loss(self):
        self._check_output_support()
        suggested_loss = losses.binary_crossentropy
        return suggested_loss 
Example 7
Project: image-segmentation   Author: nearthlab   File: semantic_model_wrapper.py    License: MIT License 5 votes vote down vote up
def bce_loss_graph(gt, pr):
    return K.mean(binary_crossentropy(gt, pr)) 
Example 8
Project: image-segmentation   Author: nearthlab   File: semantic_model_wrapper.py    License: MIT License 5 votes vote down vote up
def bce_loss_graph(gt, pr):
    return K.mean(binary_crossentropy(gt, pr))

############################################################
#  Semantic Segmentation Model Class
############################################################ 
Example 9
def get_model():
    nclass = 1
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass, activation=activations.sigmoid, name="dense_3_ptbdb")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt, loss=losses.binary_crossentropy, metrics=['acc'])
    model.summary()
    return model 
Example 10
Project: ECG_Heartbeat_Classification   Author: CVxTz   File: baseline_ptbdb.py    License: MIT License 5 votes vote down vote up
def get_model():
    nclass = 1
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass, activation=activations.sigmoid, name="dense_3_ptbdb")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt, loss=losses.binary_crossentropy, metrics=['acc'])
    model.summary()
    return model 
Example 11
def get_model():
    nclass = 1
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid", trainable=False)(inp)
    img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid", trainable=False)(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass, activation=activations.sigmoid, name="dense_3_ptbdb")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt, loss=losses.binary_crossentropy, metrics=['acc'])
    model.summary()
    return model 
Example 12
Project: costar_plan   Author: jhu-lcsr   File: test_grasp_loss.py    License: Apache License 2.0 5 votes vote down vote up
def test_single_pixel_measurement_index(self):
        with self.test_session() as sess:

            def test_different_input(sess, test_shape_height, test_shape_width):
                random_x_true = np.random.randint(0, test_shape_width)
                random_y_true = np.random.randint(0, test_shape_height)
                random_x_false = np.random.randint(0, test_shape_width)
                random_y_false = np.random.randint(0, test_shape_height)
                test_true_np = np.array([[1, random_y_true, random_x_true],
                                        [0, random_y_false, random_x_false]],
                                        dtype=np.float32)
                test_pred_np = np.zeros((2, test_shape_height, test_shape_width, 1), dtype=np.float32)
                test_pred_np[0, random_y_true, random_x_true, 0] = 1.0
                test_pred_np[0, random_y_false, random_x_false, 0] = 0.0
                test_pred_tf = tf.convert_to_tensor(test_pred_np, tf.float32)
                test_true_tf = tf.convert_to_tensor(test_true_np, tf.float32)

                measure_tf_true = grasp_loss.segmentation_single_pixel_binary_crossentropy(test_true_tf, test_pred_np)
                measure_tf_true = sess.run(measure_tf_true)

                direct_call_result = binary_crossentropy(test_true_tf[:, :1], tf.constant([[1.0], [0.0]], tf.float32))
                direct_call_result = sess.run(direct_call_result)

                assert np.allclose(measure_tf_true, np.array([0.0], dtype=np.float32), atol=1e-06)
                assert np.allclose(direct_call_result, measure_tf_true)

            test_different_input(sess, 30, 20)
            test_different_input(sess, 40, 50)
            test_different_input(sess, 25, 30)
            test_different_input(sess, 35, 35) 
Example 13
Project: costar_plan   Author: jhu-lcsr   File: multi_gan_model.py    License: Apache License 2.0 5 votes vote down vote up
def _makeModel(self, features, arm, gripper, arm_cmd, gripper_cmd, label,
            example, *args, **kwargs):

        img_shape = features.shape[1:]
        arm_size = arm.shape[1]
        if len(gripper.shape) > 1:
            gripper_size = gripper.shape[1]
        else:
            gripper_size = 1


        enc_ins, enc = GetEncoder(img_shape,
                arm_size,
                gripper_size,
                self.generator_dim,
                self.dropout_rate,
                self.img_num_filters,
                pre_tiling_layers=0,
                post_tiling_layers=2,
                discriminator=True)
        dec_ins, dec = GetDecoder(self.generator_dim,
                            img_shape,
                            arm_size,
                            gripper_size,
                            dropout_rate=self.dropout_rate,
                            filters=self.img_num_filters,)

        self.make([dec_ins, enc_ins], [dec, enc], loss="binary_crossentropy")

        self.discriminator.trainable = False
        self.generator.trainable = True
        self.adversarial.summary()
        self.discriminator.summary() 
Example 14
Project: talos   Author: autonomio   File: params.py    License: MIT License 5 votes vote down vote up
def titanic():

    # here use a standard 2d dictionary for inputting the param boundaries
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16],
         'batch_size': [20, 30, 40],
         'dropout': (0, 0.5, 5),
         'optimizer': ['Adam', 'Nadam'],
         'losses': ['logcosh', 'binary_crossentropy'],
         'activation': ['relu', 'elu'],
         'last_activation': ['sigmoid']}

    return p 
Example 15
Project: Kaggle-Carvana-Image-Masking-Challenge   Author: petrosgk   File: losses.py    License: MIT License 5 votes vote down vote up
def bce_dice_loss(y_true, y_pred):
    loss = binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
    return loss 
Example 16
Project: hyperparameter_hunter   Author: HunterMcGushion   File: test_keras_helper.py    License: MIT License 5 votes vote down vote up
def dummy_0_build_fn(input_shape=(30,)):
    model = Sequential(
        [
            Dense(50, kernel_initializer="uniform", input_shape=input_shape, activation="relu"),
            Dropout(0.5),
            Dense(1, kernel_initializer="uniform", activation="sigmoid"),
        ]
    )
    model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
    return model 
Example 17
Project: deephar   Author: dluvizon   File: losses.py    License: MIT License 5 votes vote down vote up
def elasticnet_bincross_loss_on_valid_joints(y_true, y_pred):
    idx = K.cast(K.greater(y_true, 0.), 'float32')
    num_joints = K.clip(K.sum(idx, axis=(-1, -2)), 1, None)

    l1 = K.abs(y_pred - y_true)
    l2 = K.square(y_pred - y_true)
    bc = 0.01*K.binary_crossentropy(y_true, y_pred)
    dummy = 0. * y_pred

    return K.sum(tf.where(K.cast(idx, 'bool'), l1 + l2 + bc, dummy),
            axis=(-1, -2)) / num_joints 
Example 18
Project: deephar   Author: dluvizon   File: losses.py    License: MIT License 5 votes vote down vote up
def pose_regression_loss(pose_loss, visibility_weight):

    def _pose_regression_loss(y_true, y_pred):
        video_clip = K.ndim(y_true) == 4
        if video_clip:
            """The model was time-distributed, so there is one additional
            dimension.
            """
            p_true = y_true[:, :, :, 0:-1]
            p_pred = y_pred[:, :, :, 0:-1]
            v_true = y_true[:, :, :, -1]
            v_pred = y_pred[:, :, :, -1]
        else:
            p_true = y_true[:, :, 0:-1]
            p_pred = y_pred[:, :, 0:-1]
            v_true = y_true[:, :, -1]
            v_pred = y_pred[:, :, -1]

        if pose_loss == 'l1l2':
            ploss = elasticnet_loss_on_valid_joints(p_true, p_pred)
        elif pose_loss == 'l1':
            ploss = l1_loss_on_valid_joints(p_true, p_pred)
        elif pose_loss == 'l2':
            ploss = l2_loss_on_valid_joints(p_true, p_pred)
        elif pose_loss == 'l1l2bincross':
            ploss = elasticnet_bincross_loss_on_valid_joints(p_true, p_pred)
        else:
            raise Exception('Invalid pose_loss option ({})'.format(pose_loss))

        vloss = binary_crossentropy(v_true, v_pred)

        if video_clip:
            """If time-distributed, average the error on video frames."""
            vloss = K.mean(vloss, axis=-1)
            ploss = K.mean(ploss, axis=-1)

        return ploss + visibility_weight*vloss

    return _pose_regression_loss 
Example 19
Project: ImageEnhancer   Author: CongBao   File: enhancer.py    License: MIT License 5 votes vote down vote up
def train_model(self):
        """ train the model """
        callbacks = []
        callbacks.append(TensorBoard(self.graph_path))
        callbacks.append(LearningRateScheduler(lambda e: self.learning_rate * 0.999 ** (e / 20)))
        callbacks.append(ModelCheckpoint(self.checkpoint_path + 'checkpoint.best.hdf5', save_best_only=True))
        if not self.best_cp:
            callbacks.append(ModelCheckpoint(self.checkpoint_path + 'checkpoint.{epoch:02d}-{val_loss:.2f}.hdf5'))
        callbacks.append(LambdaCallback(on_epoch_end=lambda epoch, logs: self.save_image('test.{e:02d}-{val_loss:.2f}'.format(e=epoch, **logs))))
        self.model.compile(Adam(lr=self.learning_rate), binary_crossentropy)
        self.model.fit(self.corrupted['train'], self.source['train'],
                       batch_size=self.batch_size,
                       epochs=self.epoch,
                       callbacks=callbacks,
                       validation_data=(self.corrupted['valid'], self.source['valid'])) 
Example 20
Project: ddan   Author: erlendd   File: adabn.py    License: MIT License 5 votes vote down vote up
def _build_model(self, arch, activations, nfeatures, droprate, noise, optimizer):

        self.layers = [Input(shape=(nfeatures,))]

        for i, nunits in enumerate(arch):

            if isinstance(nunits, int):
                self.layers += [Dense(nunits, activation='linear')(self.layers[-1])]

            elif nunits == 'noise':
                self.layers += [GaussianNoise(noise)(self.layers[-1])]

            elif nunits == 'bn':
                self.layers += [BatchNormalization()(self.layers[-1])]
            
            elif nunits == 'abn':
                self.layers += [AdaBN()(self.layers[-1])]

            elif nunits == 'drop':
                self.layers += [Dropout(droprate)(self.layers[-1])]

            elif nunits == 'act':
                if activations == 'prelu':
                    self.layers += [PReLU()(self.layers[-1])]
                elif activations == 'elu':
                    self.layers += [ELU()(self.layers[-1])]
                elif activations == 'leakyrelu':
                    self.layers += [LeakyReLU()(self.layers[-1])]
                else:
                    self.layers += [Activation(activations)(self.layers[-1])]

            else:
                print 'Unrecognised layer {}, type: {}'.format(nunits, type(nunits))

        self.layers += [Dense(1, activation='sigmoid')(self.layers[-1])]

        self.model = Model(self.layers[0], self.layers[-1])
        self.model.compile(loss='binary_crossentropy', optimizer=optimizer) 
Example 21
Project: pixel-decoder   Author: Geoyi   File: loss.py    License: MIT License 5 votes vote down vote up
def dice_logloss(y_true, y_pred):
    return binary_crossentropy(y_true, y_pred) * 0.5 + dice_coef_loss(y_true, y_pred) * 0.5 
Example 22
Project: pixel-decoder   Author: Geoyi   File: loss.py    License: MIT License 5 votes vote down vote up
def dice_logloss2(y_true, y_pred):
    return binary_crossentropy(y_true, y_pred) * 0.75 + dice_coef_loss(y_true, y_pred) * 0.25 
Example 23
Project: pixel-decoder   Author: Geoyi   File: loss.py    License: MIT License 5 votes vote down vote up
def dice_logloss3(y_true, y_pred):
    return binary_crossentropy(y_true, y_pred) * 0.15 + dice_coef_loss(y_true, y_pred) * 0.85

#from https://www.kaggle.com/lyakaap/weighing-boundary-pixels-loss-script-by-keras2
# weight: weighted tensor(same shape with mask image) 
Example 24
Project: Python-Deep-Learning-SE   Author: ivan-vasilev   File: chapter_06_001.py    License: MIT License 4 votes vote down vote up
def build_vae(intermediate_dim=512, latent_dim=2):
    """
    Build VAE
    :param intermediate_dim: size of hidden layers of the encoder/decoder
    :param latent_dim: latent space size
    :returns tuple: the encoder, the decoder, and the full vae
    """

    # encoder first
    inputs = Input(shape=(image_size,), name='encoder_input')
    x = Dense(intermediate_dim, activation='relu')(inputs)

    # latent mean and variance
    z_mean = Dense(latent_dim, name='z_mean')(x)
    z_log_var = Dense(latent_dim, name='z_log_var')(x)

    # reparametrization trick for random sampling
    # Note the use of the Lambda layer
    # At runtime, it will call the sampling function
    z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])

    # full encoder encoder model
    encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
    encoder.summary()

    # decoder
    latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
    x = Dense(intermediate_dim, activation='relu')(latent_inputs)
    outputs = Dense(image_size, activation='sigmoid')(x)

    # full decoder model
    decoder = Model(latent_inputs, outputs, name='decoder')
    decoder.summary()

    # VAE model
    outputs = decoder(encoder(inputs)[2])
    vae = Model(inputs, outputs, name='vae')

    # Loss function
    # we start wit the reconstruction loss
    reconstruction_loss = binary_crossentropy(inputs, outputs) * image_size

    # next is the KL divergence
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5

    # we combine them in a total loss
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)

    return encoder, decoder, vae 
Example 25
Project: medical_image_segmentation   Author: CVxTz   File: baseline_aug.py    License: MIT License 4 votes vote down vote up
def get_unet(do=0, activation=ReLU):
    inputs = Input((None, None, 3))
    conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(inputs)))
    conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv1)))
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(pool1)))
    conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv2)))
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(pool2)))
    conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv3)))
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(pool3)))
    conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv4)))
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(pool4)))
    conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(conv5)))

    up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
    conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(up6)))
    conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv6)))

    up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
    conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(up7)))
    conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv7)))

    up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
    conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(up8)))
    conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv8)))

    up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
    conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(up9)))
    conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv9)))

    conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)

    model = Model(inputs=[inputs], outputs=[conv10])

    model.compile(optimizer=Adam(lr=1e-3), loss=losses.binary_crossentropy, metrics=['accuracy'])


    return model 
Example 26
Project: medical_image_segmentation   Author: CVxTz   File: baseline.py    License: MIT License 4 votes vote down vote up
def get_unet(do=0, activation=ReLU):
    inputs = Input((None, None, 3))
    conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(inputs)))
    conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv1)))
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(pool1)))
    conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv2)))
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(pool2)))
    conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv3)))
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(pool3)))
    conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv4)))
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(pool4)))
    conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(conv5)))

    up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
    conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(up6)))
    conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv6)))

    up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
    conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(up7)))
    conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv7)))

    up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
    conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(up8)))
    conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv8)))

    up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
    conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(up9)))
    conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv9)))

    conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)

    model = Model(inputs=[inputs], outputs=[conv10])

    model.compile(optimizer=Adam(lr=1e-3), loss=losses.binary_crossentropy, metrics=['accuracy'])


    return model 
Example 27
Project: Python-Deep-Learning-Second-Edition   Author: PacktPublishing   File: chapter_06_001.py    License: MIT License 4 votes vote down vote up
def build_vae(intermediate_dim=512, latent_dim=2):
    """
    Build VAE
    :param intermediate_dim: size of hidden layers of the encoder/decoder
    :param latent_dim: latent space size
    :returns tuple: the encoder, the decoder, and the full vae
    """

    # encoder first
    inputs = Input(shape=(image_size,), name='encoder_input')
    x = Dense(intermediate_dim, activation='relu')(inputs)

    # latent mean and variance
    z_mean = Dense(latent_dim, name='z_mean')(x)
    z_log_var = Dense(latent_dim, name='z_log_var')(x)

    # reparametrization trick for random sampling
    # Note the use of the Lambda layer
    # At runtime, it will call the sampling function
    z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])

    # full encoder encoder model
    encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
    encoder.summary()

    # decoder
    latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
    x = Dense(intermediate_dim, activation='relu')(latent_inputs)
    outputs = Dense(image_size, activation='sigmoid')(x)

    # full decoder model
    decoder = Model(latent_inputs, outputs, name='decoder')
    decoder.summary()

    # VAE model
    outputs = decoder(encoder(inputs)[2])
    vae = Model(inputs, outputs, name='vae')

    # Loss function
    # we start wit the reconstruction loss
    reconstruction_loss = binary_crossentropy(inputs, outputs) * image_size

    # next is the KL divergence
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5

    # we combine them in a total loss
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)

    return encoder, decoder, vae 
Example 28
Project: costar_plan   Author: jhu-lcsr   File: pretrain_image_gan.py    License: Apache License 2.0 4 votes vote down vote up
def _makePredictor(self, features):
        '''
        Create model to predict possible manipulation goals.
        '''
        (images, arm, gripper) = features
        img_shape, image_size, arm_size, gripper_size = self._sizes(
                images,
                arm,
                gripper)

        img_in = Input(img_shape,name="predictor_img_in")
        test_in = Input(img_shape, name="descriminator_test_in")

        encoder = self._makeImageEncoder(img_shape, perm_drop=True)
        enc = encoder([img_in])
        decoder = self._makeImageDecoder(
                self.hidden_shape,
                self.skip_shape, False, perm_drop=True)

        if self.load_pretrained_weights:
            try:
                encoder.load_weights(self.makeName(
                    "pretrain_image_encoder",
                    "image_encoder"))
                decoder.load_weights(self.makeName(
                    "pretrain_image_encoder",
                    "image_decoder"))
            except Exception as e:
                print(">> Failed to load pretrained generator weights.")

        gen_out = decoder(enc)
        image_discriminator = self._makeImageDiscriminator(img_shape)
        self.discriminator = image_discriminator

        image_discriminator.trainable = False
        o1 = image_discriminator([img_in, gen_out])

        loss = wasserstein_loss if self.use_wasserstein else "binary_crossentropy"
        weights = [1., 1.] if self.use_wasserstein else [100., 1.]
        self.model = Model([img_in], [gen_out, o1])
        self.model.compile(
                loss=["mae", loss],
                loss_weights=weights,
                optimizer=self.getOptimizer())

        self.generator = Model([img_in], [gen_out])
        self.generator.compile(
                loss=["logcosh"],
                optimizer=self.getOptimizer())

        image_discriminator.summary()

        return self.model, self.model, None, [img_in], enc 
Example 29
Project: costar_plan   Author: jhu-lcsr   File: pretrain_image_gan.py    License: Apache License 2.0 4 votes vote down vote up
def _makeImageDiscriminator(self, img_shape):
        '''
        create image-only encoder to extract keypoints from the scene.

        Params:
        -------
        img_shape: shape of the image to encode
        '''
        img = Input(img_shape,name="img_encoder_in")
        img0 = Input(img_shape,name="img0_encoder_in")
        ins = [img, img0]
        dr = self.dropout_rate

        if self.use_wasserstein:
            loss = wasserstein_loss
            activation = "linear"
        else:
            loss = "binary_crossentropy"
            activation = "sigmoid"

        # common arguments
        kwargs = { "dropout_rate" : dr,
                   "padding" : "same",
                   "lrelu" : True,
                   "bn" : False,
                   "perm_drop" : True,
                 }

        x  = AddConv2D(img,  64, [4,4], 1, **kwargs)
        x0 = AddConv2D(img0, 64, [4,4], 1, **kwargs)
        x  = Add()([x, x0])
        x  = AddConv2D(x,    64, [4,4], 2, **kwargs)
        x  = AddConv2D(x,   128, [4,4], 2, **kwargs)
        x  = AddConv2D(x,   256, [4,4], 2, **kwargs)

        if self.use_wasserstein:
            x = Flatten()(x)
            x = AddDense(x, 1, "linear", 0., output=True, bn=False, perm_drop=True)
        else:
            x = AddConv2D(x, 1, [1,1], 1, 0., "same", activation="sigmoid",
                bn=False, perm_drop=True)
            x = GlobalAveragePooling2D()(x)

        discrim = Model(ins, x, name="image_discriminator")
        self.lr *= 2.
        discrim.compile(loss=loss, loss_weights=[1.],
                optimizer=self.getOptimizer())
        self.lr *= 0.5
        self.image_discriminator = discrim
        return discrim 
Example 30
Project: costar_plan   Author: jhu-lcsr   File: pretrain_image_jigsaws_gan.py    License: Apache License 2.0 4 votes vote down vote up
def _makePredictor(self, images):
        '''
        Create model to predict possible manipulation goals.
        '''
        img_shape = images.shape[1:]

        img_in = Input(img_shape,name="predictor_img_in")
        test_in = Input(img_shape, name="descriminator_test_in")

        encoder = MakeJigsawsImageEncoder(self, img_shape)
        enc = encoder([img_in])
        decoder = MakeJigsawsImageDecoder(
                self,
                self.hidden_shape,
                self.skip_shape, False)

        if self.load_pretrained_weights:
            try:
                encoder.load_weights(self.makeName(
                    "pretrain_image_encoder", "image_encoder"))
                decoder.load_weights(self.makeName(
                    "pretrain_image_encoder", "image_decoder"))
            except Exception as e:
                print(">>> could not load pretrained image weights")
                print(e)

        gen_out = decoder(enc)
        image_discriminator = self._makeImageDiscriminator(img_shape)
        self.discriminator = image_discriminator

        image_discriminator.trainable = False
        o1 = image_discriminator([img_in, gen_out])

        loss = wasserstein_loss if self.use_wasserstein else "binary_crossentropy"

        self.model = Model([img_in], [gen_out, o1])
        self.model.compile(
                loss=["mae", loss],
                loss_weights=[1., 1.],
                optimizer=self.getOptimizer())

        self.generator = Model([img_in], [gen_out])
        self.generator.compile(
                loss=["logcosh"],
                optimizer=self.getOptimizer())

        image_discriminator.summary()

        return self.model, self.model, None, [img_in], enc