Python keras.objectives.binary_crossentropy() Examples

The following are 10 code examples of keras.objectives.binary_crossentropy(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.objectives , or try the search function .
Example #1
Source File: model.py    From V-GAN with MIT License 6 votes vote down vote up
def discriminator_dummy(img_size, n_filters, init_lr, name='d'):    # naive unet without GAN
    # set image specifics
    img_ch=3 # image channels
    out_ch=1 # output channel
    img_height, img_width = img_size[0], img_size[1]

    inputs = Input((img_height, img_width, img_ch + out_ch))

    d = Model(inputs, inputs, name=name)

    def d_loss(y_true, y_pred):
        L = objectives.binary_crossentropy(K.batch_flatten(y_true),
                                            K.batch_flatten(y_pred))
#         L = objectives.mean_squared_error(K.batch_flatten(y_true),
#                                            K.batch_flatten(y_pred))
        return L
    
    d.compile(optimizer=Adam(lr=init_lr, beta_1=0.5), loss=d_loss, metrics=['accuracy'])
    
    return d, d.layers[-1].output_shape[1:] 
Example #2
Source File: model.py    From keras-molecules with MIT License 6 votes vote down vote up
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
        h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
        h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
        h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
        h = Flatten(name='flatten_1')(h)
        h = Dense(435, activation = 'relu', name='dense_1')(h)

        def sampling(args):
            z_mean_, z_log_var_ = args
            batch_size = K.shape(z_mean_)[0]
            epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
            return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

        z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
        z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)

        def vae_loss(x, x_decoded_mean):
            x = K.flatten(x)
            x_decoded_mean = K.flatten(x_decoded_mean)
            xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
            kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
            return xent_loss + kl_loss

        return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var])) 
Example #3
Source File: model.py    From deepchem with MIT License 5 votes vote down vote up
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std=0.01):
    h = Convolution1D(9, 9, activation='relu', name='conv_1')(x)
    h = Convolution1D(9, 9, activation='relu', name='conv_2')(h)
    h = Convolution1D(10, 11, activation='relu', name='conv_3')(h)
    h = Flatten(name='flatten_1')(h)
    h = Dense(435, activation='relu', name='dense_1')(h)

    def sampling(args):
      z_mean_, z_log_var_ = args
      batch_size = K.shape(z_mean_)[0]
      epsilon = K.random_normal(
          shape=(batch_size, latent_rep_size), mean=0., std=epsilon_std)
      return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

    z_mean = Dense(latent_rep_size, name='z_mean', activation='linear')(h)
    z_log_var = Dense(latent_rep_size, name='z_log_var', activation='linear')(h)

    def vae_loss(x, x_decoded_mean):
      x = K.flatten(x)
      x_decoded_mean = K.flatten(x_decoded_mean)
      xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
      kl_loss = -0.5 * K.mean(
          1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
      return xent_loss + kl_loss

    return (vae_loss, Lambda(
        sampling, output_shape=(latent_rep_size,),
        name='lambda')([z_mean, z_log_var])) 
Example #4
Source File: conv_vae.py    From Projects with MIT License 5 votes vote down vote up
def _vae_loss(self,input,output):
        '''
        loss function for variational autoencoder
        '''
        input_flat = K.flatten(input)
        output_flat = K.flatten(output)
        xent_loss = self.image_size[0] * self.image_size[1] \
                    * objectives.binary_crossentropy(input_flat,output_flat)
        kl_loss = - 0.5 * K.mean(1 + self.z_log_var - K.square(self.z_mean) 
                  - K.exp(self.z_log_var), axis=-1)
        return xent_loss + kl_loss 
Example #5
Source File: model.py    From V-GAN with MIT License 5 votes vote down vote up
def discriminator_pixel(img_size, n_filters, init_lr, name='d'):
    """
    discriminator network (pixel GAN)
    """
    
    # set image specifics
    k=3 # kernel size
    img_ch=3 # image channels
    out_ch=1 # output channel
    img_height, img_width = img_size[0], img_size[1]
    
    inputs = Input((img_height, img_width, img_ch + out_ch))

    conv1 = Conv2D(n_filters, kernel_size=(k, k), padding="same")(inputs) 
    conv1 = LeakyReLU(0.2)(conv1)
    
    conv2 = Conv2D(2*n_filters, kernel_size=(k, k), padding="same")(conv1) 
    conv2 = LeakyReLU(0.2)(conv2)
    
    conv3 = Conv2D(4*n_filters, kernel_size=(k, k), padding="same")(conv2) 
    conv3 = LeakyReLU(0.2)(conv3)

    conv4 =  Conv2D(out_ch, kernel_size=(1, 1), padding="same")(conv3)
    outputs = Activation('sigmoid')(conv4)

    d = Model(inputs, outputs, name=name)

    def d_loss(y_true, y_pred):
        L = objectives.binary_crossentropy(K.batch_flatten(y_true),
                                           K.batch_flatten(y_pred))
        return L

    d.compile(optimizer=Adam(lr=init_lr, beta_1=0.5), loss=d_loss, metrics=['accuracy'])
    
    return d, d.layers[-1].output_shape[1:] 
Example #6
Source File: model.py    From V-GAN with MIT License 5 votes vote down vote up
def GAN(g,d,img_size,n_filters_g, n_filters_d, alpha_recip, init_lr, name='gan'):
    """
    GAN (that binds generator and discriminator)
    """
    img_h, img_w=img_size[0], img_size[1]

    img_ch=3
    seg_ch=1
    
    fundus = Input((img_h, img_w, img_ch))
    vessel = Input((img_h, img_w, seg_ch))
    
    fake_vessel=g(fundus)
    fake_pair=Concatenate(axis=3)([fundus, fake_vessel])
    
    gan=Model([fundus, vessel], d(fake_pair), name=name)

    def gan_loss(y_true, y_pred):
        y_true_flat = K.batch_flatten(y_true)
        y_pred_flat = K.batch_flatten(y_pred)

        L_adv = objectives.binary_crossentropy(y_true_flat, y_pred_flat)
#         L_adv = objectives.mean_squared_error(y_true_flat, y_pred_flat)

        vessel_flat = K.batch_flatten(vessel)
        fake_vessel_flat = K.batch_flatten(fake_vessel)
        L_seg = objectives.binary_crossentropy(vessel_flat, fake_vessel_flat)
#         L_seg = objectives.mean_absolute_error(vessel_flat, fake_vessel_flat)

        return alpha_recip*L_adv + L_seg
    
    
    gan.compile(optimizer=Adam(lr=init_lr, beta_1=0.5), loss=gan_loss, metrics=['accuracy'])
        
    return gan 
Example #7
Source File: mnist_vae.py    From keras-examples with MIT License 5 votes vote down vote up
def vae_loss(x, x_decoded_mean):
    xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    return xent_loss + kl_loss 
Example #8
Source File: variational_autoencoder.py    From nn_playground with MIT License 5 votes vote down vote up
def vae_loss(x, x_hat):
    kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
    xent_loss = n * objectives.binary_crossentropy(x, x_hat)
    mse_loss = n * objectives.mse(x, x_hat) 
    if use_loss == 'xent':
        return xent_loss + kl_loss
    elif use_loss == 'mse':
        return mse_loss + kl_loss
    else:
        raise Expception, 'Nonknow loss!' 
Example #9
Source File: variational_autoencoder.py    From keras-autoencoder with GNU General Public License v3.0 5 votes vote down vote up
def vae_loss(x, x_decoded_mean):
        xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
        kl_loss = - 0.5 * K.mean(1 + z_log_std - K.square(z_mean) - K.exp(z_log_std), axis=-1)
        return xent_loss + kl_loss 
Example #10
Source File: model.py    From V-GAN with MIT License 4 votes vote down vote up
def discriminator_patch1(img_size, n_filters, init_lr, name='d'):
    """
    discriminator network (patch GAN)
    stride 2 conv X 1
    max pooling X 2
    """
    
    # set image specifics
    k=3 # kernel size
    s=2 # stride
    img_ch=3 # image channels
    out_ch=1 # output channel
    img_height, img_width = img_size[0], img_size[1]
    padding='same'#'valid'

    inputs = Input((img_height, img_width, img_ch + out_ch))

    conv1 = Conv2D(n_filters, kernel_size=(k, k), strides=(s,s), padding=padding)(inputs)
    conv1 = BatchNormalization(scale=False, axis=3)(conv1)
    conv1 = Activation('relu')(conv1)    
    conv1 = Conv2D(n_filters, kernel_size=(k, k), padding=padding)(conv1) 
    conv1 = BatchNormalization(scale=False, axis=3)(conv1)
    conv1 = Activation('relu')(conv1)    
    pool1 = MaxPooling2D(pool_size=(s, s))(conv1)
    
    conv2 = Conv2D(2*n_filters, kernel_size=(k, k), padding=padding)(pool1) 
    conv2 = BatchNormalization(scale=False, axis=3)(conv2)
    conv2 = Activation('relu')(conv2)    
    conv2 = Conv2D(2*n_filters, kernel_size=(k, k), padding=padding)(conv2) 
    conv2 = BatchNormalization(scale=False, axis=3)(conv2)
    conv2 = Activation('relu')(conv2)    
    pool2 = MaxPooling2D(pool_size=(s, s))(conv2)
    
    conv3 = Conv2D(4*n_filters, kernel_size=(k, k), padding=padding)(pool2) 
    conv3 = BatchNormalization(scale=False, axis=3)(conv3)
    conv3 = Activation('relu')(conv3)    
    conv3 = Conv2D(4*n_filters, kernel_size=(k, k), padding=padding)(conv3) 
    conv3 = BatchNormalization(scale=False, axis=3)(conv3)
    conv3 = Activation('relu')(conv3)
    
    outputs=Conv2D(out_ch, kernel_size=(1, 1), padding=padding, activation='sigmoid')(conv3)

    d = Model(inputs, outputs, name=name)

    def d_loss(y_true, y_pred):
        L = objectives.binary_crossentropy(K.batch_flatten(y_true),
                                            K.batch_flatten(y_pred))
#         L = objectives.mean_squared_error(K.batch_flatten(y_true),
#                                            K.batch_flatten(y_pred))
        return L

    d.compile(optimizer=Adam(lr=init_lr, beta_1=0.5), loss=d_loss, metrics=['accuracy'])
    
    return d, d.layers[-1].output_shape[1:]