Python keras.backend.random_normal() Examples

The following are 30 code examples for showing how to use keras.backend.random_normal(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.backend , or try the search function .

Example 1
Project: Keras-GAN   Author: eriklindernoren   File: aae.py    License: MIT License 6 votes vote down vote up
def build_encoder(self):
        # Encoder

        img = Input(shape=self.img_shape)

        h = Flatten()(img)
        h = Dense(512)(h)
        h = LeakyReLU(alpha=0.2)(h)
        h = Dense(512)(h)
        h = LeakyReLU(alpha=0.2)(h)
        mu = Dense(self.latent_dim)(h)
        log_var = Dense(self.latent_dim)(h)
        latent_repr = merge([mu, log_var],
                mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2),
                output_shape=lambda p: p[0])

        return Model(img, latent_repr) 
Example 2
Project: Python-Deep-Learning-SE   Author: ivan-vasilev   File: chapter_06_001.py    License: MIT License 6 votes vote down vote up
def sampling(args: tuple):
    """
    Reparameterization trick by sampling z from unit Gaussian
    :param args: (tensor, tensor) mean and log of variance of q(z|x)
    :returns tensor: sampled latent vector z
    """

    # unpack the input tuple
    z_mean, z_log_var = args

    # mini-batch size
    mb_size = K.shape(z_mean)[0]

    # latent space size
    dim = K.int_shape(z_mean)[1]

    # random normal vector with mean=0 and std=1.0
    epsilon = K.random_normal(shape=(mb_size, dim))

    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example 3
Project: pyod   Author: yzhao062   File: vae.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def sampling(self, args):
        """Reparametrisation by sampling from Gaussian, N(0,I)
        To sample from epsilon = Norm(0,I) instead of from likelihood Q(z|X)
        with latent variables z: z = z_mean + sqrt(var) * epsilon

        Parameters
        ----------
        args : tensor
            Mean and log of variance of Q(z|X).
    
        Returns
        -------
        z : tensor
            Sampled latent variable.
        """

        z_mean, z_log = args
        batch = K.shape(z_mean)[0]  # batch size
        dim = K.int_shape(z_mean)[1]  # latent dimension
        epsilon = K.random_normal(shape=(batch, dim))  # mean=0, std=1.0

        return z_mean + K.exp(0.5 * z_log) * epsilon 
Example 4
def _compute_probabilities(self, energy, previous_attention=None):
        if self.is_monotonic:
            # add presigmoid noise to encourage discreteness
            sigmoid_noise = K.in_train_phase(1., 0.)
            noise = K.random_normal(K.shape(energy), mean=0.0, stddev=sigmoid_noise)
            # encourage discreteness in train
            energy = K.in_train_phase(energy + noise, energy)

            p = K.in_train_phase(K.sigmoid(energy),
                                 K.cast(energy > 0, energy.dtype))
            p = K.squeeze(p, -1)
            p_prev = K.squeeze(previous_attention, -1)
            # monotonic attention function from tensorflow
            at = K.in_train_phase(
                tf.contrib.seq2seq.monotonic_attention(p, p_prev, 'parallel'),
                tf.contrib.seq2seq.monotonic_attention(p, p_prev, 'hard'))
            at = K.expand_dims(at, -1)
        else:
            # softmax
            at = keras.activations.softmax(energy, axis=1)

        return at 
Example 5
Project: Python-Deep-Learning-Second-Edition   Author: PacktPublishing   File: chapter_06_001.py    License: MIT License 6 votes vote down vote up
def sampling(args: tuple):
    """
    Reparameterization trick by sampling z from unit Gaussian
    :param args: (tensor, tensor) mean and log of variance of q(z|x)
    :returns tensor: sampled latent vector z
    """

    # unpack the input tuple
    z_mean, z_log_var = args

    # mini-batch size
    mb_size = K.shape(z_mean)[0]

    # latent space size
    dim = K.int_shape(z_mean)[1]

    # random normal vector with mean=0 and std=1.0
    epsilon = K.random_normal(shape=(mb_size, dim))

    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example 6
Project: scgen   Author: theislab   File: _vae_keras.py    License: GNU General Public License v3.0 6 votes vote down vote up
def _sample_z(args):
        """
            Samples from standard Normal distribution with shape [size, z_dim] and
            applies re-parametrization trick. It is actually sampling from latent
            space distributions with N(mu, var) computed in `_encoder` function.
            Parameters
            ----------
            No parameters are needed.
            Returns
            -------
            The computed Tensor of samples with shape [size, z_dim].
        """
        mu, log_var = args
        batch_size = K.shape(mu)[0]
        z_dim = K.shape(mu)[1]
        eps = K.random_normal(shape=[batch_size, z_dim])
        return mu + K.exp(log_var / 2) * eps 
Example 7
Project: keras-adversarial   Author: bstriner   File: example_aae_cifar10.py    License: MIT License 6 votes vote down vote up
def model_encoder(latent_dim, input_shape, units=512, reg=lambda: l1l2(l1=1e-7, l2=1e-7), dropout=0.5):
    k = 5
    x = Input(input_shape)
    h = Convolution2D(units / 4, k, k, border_mode='same', W_regularizer=reg())(x)
    # h = SpatialDropout2D(dropout)(h)
    h = MaxPooling2D(pool_size=(2, 2))(h)
    h = LeakyReLU(0.2)(h)
    h = Convolution2D(units / 2, k, k, border_mode='same', W_regularizer=reg())(h)
    # h = SpatialDropout2D(dropout)(h)
    h = MaxPooling2D(pool_size=(2, 2))(h)
    h = LeakyReLU(0.2)(h)
    h = Convolution2D(units / 2, k, k, border_mode='same', W_regularizer=reg())(h)
    # h = SpatialDropout2D(dropout)(h)
    h = MaxPooling2D(pool_size=(2, 2))(h)
    h = LeakyReLU(0.2)(h)
    h = Convolution2D(units, k, k, border_mode='same', W_regularizer=reg())(h)
    # h = SpatialDropout2D(dropout)(h)
    h = LeakyReLU(0.2)(h)
    h = Flatten()(h)
    mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(h)
    log_sigma_sq = Dense(latent_dim, name="encoder_log_sigma_sq", W_regularizer=reg())(h)
    z = Lambda(lambda (_mu, _lss): _mu + K.random_normal(K.shape(_mu)) * K.exp(_lss / 2),
               output_shape=lambda (_mu, _lss): _mu)([mu, log_sigma_sq])
    return Model(x, z, name="encoder") 
Example 8
Project: keras-adversarial   Author: bstriner   File: example_bigan.py    License: MIT License 6 votes vote down vote up
def model_encoder(latent_dim, input_shape, hidden_dim=1024, reg=lambda: l1l2(1e-5, 0), batch_norm_mode=0):
    x = Input(input_shape, name="x")
    h = Flatten()(x)
    h = Dense(hidden_dim, name="encoder_h1", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    h = Dense(hidden_dim / 2, name="encoder_h2", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    h = Dense(hidden_dim / 4, name="encoder_h3", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(h)
    log_sigma_sq = Dense(latent_dim, name="encoder_log_sigma_sq", W_regularizer=reg())(h)
    z = merge([mu, log_sigma_sq], mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2),
              output_shape=lambda x: x[0])
    return Model(x, z, name="encoder") 
Example 9
Project: keras-adversarial   Author: bstriner   File: example_bigan_unrolled.py    License: MIT License 6 votes vote down vote up
def model_encoder(latent_dim, input_shape, hidden_dim=1024, reg=lambda: l1(1e-5), batch_norm_mode=2):
    x = Input(input_shape, name="x")
    h = Flatten()(x)
    h = Dense(hidden_dim, name="encoder_h1", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    h = Dense(hidden_dim / 2, name="encoder_h2", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    h = Dense(hidden_dim / 4, name="encoder_h3", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(h)
    log_sigma_sq = Dense(latent_dim, name="encoder_log_sigma_sq", W_regularizer=reg())(h)
    z = merge([mu, log_sigma_sq], mode=lambda p: p[0] + K.random_normal(p[0].shape) * K.exp(p[1] / 2),
              output_shape=lambda x: x[0])
    return Model(x, z, name="encoder") 
Example 10
Project: AnomalyDetectionTransformations   Author: izikgo   File: dagmm.py    License: MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        z, gamma_k = inputs

        gamma_k_sum = K.sum(gamma_k)
        est_phi = K.mean(gamma_k, axis=0)
        est_mu = K.dot(K.transpose(gamma_k), z) / gamma_k_sum
        est_sigma = K.dot(K.transpose(z - est_mu),
                          gamma_k * (z - est_mu)) / gamma_k_sum

        est_sigma = est_sigma + (K.random_normal(shape=(K.int_shape(z)[1], 1), mean=1e-3, stddev=1e-4) * K.eye(K.int_shape(z)[1]))

        self.add_update(K.update(self.phi, est_phi), inputs)
        self.add_update(K.update(self.mu, est_mu), inputs)
        self.add_update(K.update(self.sigma, est_sigma), inputs)

        est_sigma_diag_inv = K.eye(K.int_shape(self.sigma)[0]) / est_sigma
        self.add_loss(self.lambd_diag * K.sum(est_sigma_diag_inv), inputs)

        phi = K.in_train_phase(est_phi, self.phi, training)
        mu = K.in_train_phase(est_mu, self.mu, training)
        sigma = K.in_train_phase(est_sigma, self.sigma, training)
        return GaussianMixtureComponent._calc_component_density(z, phi, mu, sigma) 
Example 11
Project: keras-molecules   Author: maxhodak   File: model.py    License: MIT License 6 votes vote down vote up
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
        h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
        h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
        h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
        h = Flatten(name='flatten_1')(h)
        h = Dense(435, activation = 'relu', name='dense_1')(h)

        def sampling(args):
            z_mean_, z_log_var_ = args
            batch_size = K.shape(z_mean_)[0]
            epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
            return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

        z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
        z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)

        def vae_loss(x, x_decoded_mean):
            x = K.flatten(x)
            x_decoded_mean = K.flatten(x_decoded_mean)
            xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
            kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
            return xent_loss + kl_loss

        return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var])) 
Example 12
def call(self, x, mask=None, training=None):
        m = K.not_equal(x, 0.)
        noise_x = x + K.random_normal(shape=K.shape(x),
                                      mean=0.,
                                      stddev=self.sigma)
        noise_x = noise_x * K.cast(m, K.floatx())

        return K.in_train_phase(noise_x, x, training=training) 
Example 13
Project: deepchem   Author: deepchem   File: model.py    License: MIT License 5 votes vote down vote up
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std=0.01):
    h = Convolution1D(9, 9, activation='relu', name='conv_1')(x)
    h = Convolution1D(9, 9, activation='relu', name='conv_2')(h)
    h = Convolution1D(10, 11, activation='relu', name='conv_3')(h)
    h = Flatten(name='flatten_1')(h)
    h = Dense(435, activation='relu', name='dense_1')(h)

    def sampling(args):
      z_mean_, z_log_var_ = args
      batch_size = K.shape(z_mean_)[0]
      epsilon = K.random_normal(
          shape=(batch_size, latent_rep_size), mean=0., std=epsilon_std)
      return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

    z_mean = Dense(latent_rep_size, name='z_mean', activation='linear')(h)
    z_log_var = Dense(latent_rep_size, name='z_log_var', activation='linear')(h)

    def vae_loss(x, x_decoded_mean):
      x = K.flatten(x)
      x_decoded_mean = K.flatten(x_decoded_mean)
      xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
      kl_loss = -0.5 * K.mean(
          1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
      return xent_loss + kl_loss

    return (vae_loss, Lambda(
        sampling, output_shape=(latent_rep_size,),
        name='lambda')([z_mean, z_log_var])) 
Example 14
Project: navbot   Author: marooncn   File: VAE2.py    License: MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], Z_DIM), mean=0., stddev=1.)
    return z_mean + K.exp(z_log_var / 2) * epsilon 
Example 15
Project: Projects   Author: iamshang1   File: conv_vae.py    License: MIT License 5 votes vote down vote up
def _sampling(self,args):
        '''
        sampling function for embedding layer
        '''
        z_mean,z_log_var = args
        epsilon = K.random_normal(shape=K.shape(z_mean),mean=self.eps_mean,
                                  stddev=self.eps_std)
        return z_mean + K.exp(z_log_var) * epsilon 
Example 16
Project: MachineLearning   Author: mengli   File: vae_mnist.py    License: Apache License 2.0 5 votes vote down vote up
def sampling(args):
    (z_mean, z_var) = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0],
                              LATENT_VAR_DIM), mean=0., stddev=1.)
    return z_mean + z_var * epsilon 
Example 17
Project: keras_bn_library   Author: bnsnapper   File: test_vae_lstm.py    License: MIT License 5 votes vote down vote up
def sampling(args):
	z_mean, z_log_var = args
	epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.,
			                         std=epsilon_std)
	return z_mean + K.exp(z_log_var / 2) * epsilon 
Example 18
Project: keras_bn_library   Author: bnsnapper   File: activations.py    License: MIT License 5 votes vote down vote up
def nrlu(x):
   std = K.mean(K.sigmoid(x))
   eta = K.random_normal(shape=x.shape, std=std)
   y = K.maximum(x + eta, 0)
   return y 
Example 19
Project: VAE-for-Image-Generation   Author: chaitanya100100   File: mnist_train.py    License: MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0., stddev=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon

#z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var]) 
Example 20
Project: VAE-for-Image-Generation   Author: chaitanya100100   File: caltech101_128_train.py    License: MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
                              mean=0., stddev=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon 
Example 21
Project: VAE-for-Image-Generation   Author: chaitanya100100   File: test.py    License: MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
                              mean=0., stddev=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])` 
Example 22
Project: VAE-for-Image-Generation   Author: chaitanya100100   File: cifar10_train.py    License: MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
                              mean=0., stddev=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon 
Example 23
Project: VAE-for-Image-Generation   Author: chaitanya100100   File: caltech101_92_train.py    License: MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
                              mean=0., stddev=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon 
Example 24
Project: GEM-Benchmark   Author: palash1992   File: sdne_utils.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def sampling(args):
    z_mean, z_std = args
    epsilon_std = 1.0
    epsilon = KBack.random_normal(shape=(z_mean._keras_shape[1],),
                                  mean=0.,
                                  stddev=epsilon_std)
    return z_mean + z_std * epsilon
    # return z_mean + KBack.exp(z_std / 2) * epsilon 
Example 25
Project: keras-efficientnets   Author: titu1994   File: custom_objects.py    License: MIT License 5 votes vote down vote up
def __call__(self, shape, dtype=None):
        dtype = dtype or K.floatx()

        kernel_height, kernel_width, _, out_filters = shape
        fan_out = int(kernel_height * kernel_width * out_filters)
        return K.random_normal(
            shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py 
Example 26
Project: vaegan-celebs-keras   Author: baudm   File: models.py    License: MIT License 5 votes vote down vote up
def _sampling(args):
    """Reparameterization trick by sampling fr an isotropic unit Gaussian.
       Instead of sampling from Q(z|X), sample eps = N(0,I)

    # Arguments:
        args (tensor): mean and log of variance of Q(z|X)
    # Returns:
        z (tensor): sampled latent vector
    """
    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example 27
Project: world_models   Author: llSourcell   File: arch.py    License: MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], Z_DIM), mean=0.,stddev=1.)
    return z_mean + K.exp(z_log_var / 2) * epsilon 
Example 28
Project: voxelmorph   Author: voxelmorph   File: models.py    License: GNU General Public License v3.0 5 votes vote down vote up
def sample_z(self, args):
        mu, log_var = args
        # shape = (K.shape(mu)[0], self.nb_z)
        shape = K.shape(mu)
        eps = K.random_normal(shape=shape, mean=0., stddev=1.)
        return mu + K.exp(log_var / 2) * eps 
Example 29
Project: KATE   Author: hugochan   File: vae.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def sampling(self, args):
        z_mean, z_log_var = args
        epsilon = K.random_normal(shape=(K.shape(z_mean)[0], self.dim[1]), mean=0.,\
                                  stddev=self.epsilon_std)

        return z_mean + K.exp(z_log_var / 2) * epsilon 
Example 30
Project: bayesian_dense   Author: bstriner   File: bayesian_dense.py    License: MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        e = K.random_normal((x.shape[0], self.input_dim, self.output_dim))
        w = self.W_mu.dimshuffle('x',0,1)+e*(K.exp(self.W_log_sigma/2).dimshuffle('x',0,1))
        output = K.batch_dot(x, w)
        test_output = K.dot(x, self.W_mu)
        if self.bias:
            eb = K.random_normal((x.shape[0], self.output_dim))
            b = self.b_mu.dimshuffle('x',0)+eb*(K.exp(self.b_log_sigma/2).dimshuffle('x',0))
            output += b
            test_output += self.b_mu

        return self.activation(K.in_train_phase(output, test_output))