Python tensorflow.contrib.distributions.Bernoulli() Examples

The following are 9 code examples of tensorflow.contrib.distributions.Bernoulli(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.distributions , or try the search function .
Example #1
Source File: vae.py    From IMPLEMENTATION_Variational-Auto-Encoder with MIT License 6 votes vote down vote up
def __init__(self, latent_dim, batch_size, encoder, decoder,
                 observation_dim=784,
                 learning_rate=1e-4,
                 optimizer=tf.train.RMSPropOptimizer,
                 observation_distribution="Bernoulli", # or Gaussian
                 observation_std=0.01):

        self._latent_dim = latent_dim
        self._batch_size = batch_size
        self._encode = encoder
        self._decode = decoder
        self._observation_dim = observation_dim
        self._learning_rate = learning_rate
        self._optimizer = optimizer
        self._observation_distribution = observation_distribution
        self._observation_std = observation_std
        self._build_graph() 
Example #2
Source File: nem.py    From Relational-NEM with MIT License 6 votes vote down vote up
def add_noise(data, noise):
    noise_type = noise['noise_type']
    if noise_type in ['None', 'none', None]:
        return data

    with tf.name_scope('input_noise'):
        shape = tf.stack([s.value if s.value is not None else tf.shape(data)[i]
                         for i, s in enumerate(data.get_shape())])

        if noise_type == 'bitflip':
            noise_dist = dist.Bernoulli(probs=noise['prob'], dtype=data.dtype)
            n = noise_dist.sample(shape)
            corrupted = data + n - 2 * data * n  # hacky way of implementing (data XOR n)
        else:
            raise KeyError('Unknown noise_type "{}"'.format(noise_type))

        corrupted.set_shape(data.get_shape())
        return corrupted 
Example #3
Source File: nem.py    From Neural-EM with MIT License 5 votes vote down vote up
def add_noise(data, noise, dataset):
    noise_type = noise['noise_type']
    if noise_type in ['None', 'none', None]:
        return data
    if noise_type == 'data':
        noise_type = 'bitflip' if dataset['binary'] else 'masked_uniform'

    with tf.name_scope('input_noise'):
        shape = tf.stack([s.value if s.value is not None else tf.shape(data)[i]
                         for i, s in enumerate(data.get_shape())])

        if noise_type == 'bitflip':
            noise_dist = dist.Bernoulli(probs=noise['prob'], dtype=data.dtype)
            n = noise_dist.sample(shape)
            corrupted = data + n - 2 * data * n  # hacky way of implementing (data XOR n)
        elif noise_type == 'masked_uniform':
            noise_dist = dist.Uniform(low=0., high=1.)
            noise_uniform = noise_dist.sample(shape)

            # sample mask
            mask_dist = dist.Bernoulli(probs=noise['prob'], dtype=data.dtype)
            mask = mask_dist.sample(shape)

            # produce output
            corrupted = mask * noise_uniform + (1 - mask) * data
        else:
            raise KeyError('Unknown noise_type "{}"'.format(noise_type))

        corrupted.set_shape(data.get_shape())
        return corrupted 
Example #4
Source File: prior.py    From attend_infer_repeat with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, steps_probs):
        """

        :param steps_probs: tensor; Bernoulli success probabilities
        """
        self._steps_probs = steps_probs
        self._joint = bernoulli_to_modified_geometric(steps_probs)
        self._bernoulli = None 
Example #5
Source File: prior.py    From attend_infer_repeat with GNU General Public License v3.0 5 votes vote down vote up
def sample(self, n=None):
        if self._bernoulli is None:
            self._bernoulli = Bernoulli(self._steps_probs)

        sample = self._bernoulli.sample(n)
        sample = tf.cumprod(sample, tf.rank(sample) - 1)
        sample = tf.reduce_sum(sample, -1)
        return sample 
Example #6
Source File: nem.py    From auto_yolo with MIT License 4 votes vote down vote up
def add_noise(data, noise_prob, noise_type):
    if noise_type in ['None', 'none', None]:
        return data

    shape = tf.stack([s.value if s.value is not None else tf.shape(data)[i]
                     for i, s in enumerate(data.get_shape())])

    if noise_type == 'bitflip':
        noise_dist = dist.Bernoulli(probs=noise_prob, dtype=data.dtype)
        n = noise_dist.sample(shape)
        corrupted = data + n - 2 * data * n  # hacky way of implementing (data XOR n)
    elif noise_type == 'masked_uniform':
        noise_dist = dist.Uniform(low=0., high=1.)
        noise_uniform = noise_dist.sample(shape)

        # sample mask
        mask_dist = dist.Bernoulli(probs=noise_prob, dtype=data.dtype)
        mask = mask_dist.sample(shape)

        # produce output
        corrupted = mask * noise_uniform + (1 - mask) * data
    else:
        raise KeyError('Unknown noise_type "{}"'.format(noise_type))

    corrupted.set_shape(data.get_shape())
    return corrupted


# def set_up_optimizer(loss, optimizer, params, clip_gradients):
#     opt = {
#         'adam': tf.train.AdamOptimizer,
#         'sgd': tf.train.GradientDescentOptimizer,
#         'momentum': tf.train.MomentumOptimizer,
#         'adadelta': tf.train.AdadeltaOptimizer,
#         'adagrad': tf.train.AdagradOptimizer,
#         'rmsprop': tf.train.RMSPropOptimizer
#     }[optimizer](**params)
#
#     # optionally clip gradients by norm
#     grads_and_vars = opt.compute_gradients(loss)
#     if clip_gradients is not None:
#         grads_and_vars = [(tf.clip_by_norm(grad, clip_gradients), var)
#                           for grad, var in grads_and_vars]
#
#     return opt, opt.apply_gradients(grads_and_vars)


# -------------------------------- nem_model.py --------------------------------- 
Example #7
Source File: vae.py    From IMPLEMENTATION_Variational-Auto-Encoder with MIT License 4 votes vote down vote up
def _build_graph(self):

        with tf.variable_scope('vae'):
            self.x = tf.placeholder(tf.float32, shape=[None, self._observation_dim])

            with tf.variable_scope('encoder'):
                encoded = self._encode(self.x, self._latent_dim)

            with tf.variable_scope('latent'):
                self.mean = encoded[:, :self._latent_dim]
                logvar = encoded[:, self._latent_dim:]
                stddev = tf.sqrt(tf.exp(logvar))
                epsilon = tf.random_normal([self._batch_size, self._latent_dim])
                self.z = self.mean + stddev * epsilon

            with tf.variable_scope('decoder'):
                decoded = self._decode(self.z, self._observation_dim)
                self.obs_mean = decoded
                if self._observation_distribution == 'Gaussian':
                    obs_epsilon = tf.random_normal([self._batch_size, self._observation_dim])
                    self.sample = self.obs_mean + self._observation_std * obs_epsilon
                else:
                    self.sample = Bernoulli(probs=self.obs_mean).sample()


            with tf.variable_scope('loss'):
                with tf.variable_scope('kl-divergence'):
                    kl = self._kl_diagnormal_stdnormal(self.mean, logvar)

                if self._observation_distribution == 'Gaussian':
                    with tf.variable_scope('gaussian'):
                        obj = self._gaussian_log_likelihood(self.x, self.obs_mean, self._observation_std)
                else:
                    with tf.variable_scope('bernoulli'):
                        obj = self._bernoulli_log_likelihood(self.x, self.obs_mean)

                self._loss = (kl + obj) / self._batch_size

            with tf.variable_scope('optimizer'):
                optimizer = tf.train.RMSPropOptimizer(learning_rate=self._learning_rate)
            with tf.variable_scope('training-step'):
                self._train = optimizer.minimize(self._loss)

            self._sesh = tf.Session()
            init = tf.global_variables_initializer()
            self._sesh.run(init) 
Example #8
Source File: cell.py    From attend_infer_repeat with GNU General Public License v3.0 4 votes vote down vote up
def __init__(self, img_size, crop_size, n_appearance,
                 transition, input_encoder, glimpse_encoder, glimpse_decoder, transform_estimator, steps_predictor,
                 discrete_steps=True, canvas_init=None, explore_eps=None, debug=False):
        """Creates the cell

        :param img_size: int tuple, size of the image
        :param crop_size: int tuple, size of the attention glimpse
        :param n_appearance: number of latent units describing the "what"
        :param transition: an RNN cell for maintaining the internal hidden state
        :param input_encoder: callable, encodes the original input image before passing it into the transition
        :param glimpse_encoder: callable, encodes the glimpse into latent representation
        :param glimpse_decoder: callable, decodes the glimpse from latent representation
        :param transform_estimator: callabe, transforms the hidden state into parameters for the spatial transformer
        :param steps_predictor: callable, predicts whether to take a step
        :param discrete_steps: boolean, steps are samples from a Bernoulli distribution if True; if False, all steps are
         taken and are weighted by the step probability
        :param canvas_init: float or None, initial value for the reconstructed image. If None, the canvas is black. If
         float, the canvas starts with a given value, which is trainable.
        :param explore_eps: float or None; if float, it has to be \in (0., .5); step probability is clipped between
         `explore_eps` and (1 - `explore_eps)
        :param debug: boolean, adds checks for NaNs in the inputs to distributions
        """

        super(AIRCell, self).__init__(self.__class__.__name__)
        self._img_size = img_size
        self._n_pix = np.prod(self._img_size)
        self._crop_size = crop_size
        self._n_appearance = n_appearance
        self._transition = transition
        self._n_hidden = self._transition.output_size[0]

        self._sample_presence = discrete_steps
        self._explore_eps = explore_eps
        self._debug = debug

        with self._enter_variable_scope():
            self._canvas = tf.zeros(self._img_size, dtype=tf.float32)
            if canvas_init is not None:
                self._canvas_value = tf.get_variable('canvas_value', dtype=tf.float32, initializer=canvas_init)
                self._canvas += self._canvas_value

            transform_constraints = snt.AffineWarpConstraints.no_shear_2d()

            self._spatial_transformer = SpatialTransformer(img_size, crop_size, transform_constraints)
            self._inverse_transformer = SpatialTransformer(img_size, crop_size, transform_constraints, inverse=True)

            self._transform_estimator = transform_estimator(self._n_transform_param)
            self._input_encoder = input_encoder()
            self._glimpse_encoder = glimpse_encoder()
            self._glimpse_decoder = glimpse_decoder(crop_size)

            self._what_distrib = ParametrisedGaussian(n_appearance, scale_offset=0.5,
                                                      validate_args=self._debug, allow_nan_stats=not self._debug)

            self._steps_predictor = steps_predictor() 
Example #9
Source File: cell.py    From attend_infer_repeat with GNU General Public License v3.0 4 votes vote down vote up
def _build(self, inpt, state):
        """Input is unused; it's only to force a maximum number of steps"""

        img_flat, canvas_flat, what_code, where_code, hidden_state, presence = state

        img_inpt = img_flat
        img = tf.reshape(img_inpt, (-1,) + tuple(self._img_size))


        inpt_encoding = self._input_encoder(img)
        with tf.variable_scope('rnn_inpt'):
            hidden_output, hidden_state = self._transition(inpt_encoding, hidden_state)

        where_param = self._transform_estimator(hidden_output)
        where_distrib = NormalWithSoftplusScale(*where_param,
                                                validate_args=self._debug, allow_nan_stats=not self._debug)
        where_loc, where_scale = where_distrib.loc, where_distrib.scale
        where_code = where_distrib.sample()

        cropped = self._spatial_transformer(img, where_code)

        with tf.variable_scope('presence'):
            presence_prob = self._steps_predictor(hidden_output)

            if self._explore_eps is not None:
                presence_prob = self._explore_eps / 2 + (1 - self._explore_eps) * presence_prob

            if self._sample_presence:
                presence_distrib = Bernoulli(probs=presence_prob, dtype=tf.float32,
                                             validate_args=self._debug, allow_nan_stats=not self._debug)

                new_presence = presence_distrib.sample()
                presence *= new_presence

            else:
                presence = presence_prob

        what_params = self._glimpse_encoder(cropped)
        what_distrib = self._what_distrib(what_params)
        what_loc, what_scale = what_distrib.loc, what_distrib.scale
        what_code = what_distrib.sample()

        decoded = self._glimpse_decoder(what_code)
        inversed = self._inverse_transformer(decoded, where_code)

        with tf.variable_scope('rnn_outputs'):
            inversed_flat = tf.reshape(inversed, (-1, self._n_pix))

            canvas_flat += presence * inversed_flat
            decoded_flat = tf.reshape(decoded, (-1, np.prod(self._crop_size)))

        output = [canvas_flat, decoded_flat, what_code, what_loc, what_scale, where_code, where_loc, where_scale,
                  presence_prob, presence]
        state = [img_flat, canvas_flat,
                 what_code, where_code, hidden_state, presence]
        return output, state