Python tensorflow.distributions() Examples

The following are 5 code examples of tensorflow.distributions(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: tf_sampler.py    From batchflow with Apache License 2.0 5 votes vote down vote up
def __init__(self, name, **kwargs):
        super().__init__(name, **kwargs)
        name = _get_method_by_alias(name, 'tf', tf.distributions)
        self.name = name
        self._params = copy(kwargs)
        self.graph = tf.Graph()
        with self.graph.as_default():
            config = tf.ConfigProto(device_count={'GPU':0})
            self.sess = tf.Session(config=config)
            _ = kwargs.pop('dim', None)
            self.sampler = getattr(tf.distributions, self.name)(**kwargs) 
Example #2
Source File: tf_utils.py    From GtS with MIT License 5 votes vote down vote up
def sample_categorical(p):
    # TODO change to tf.distributions once update tf version
    dist = tf.contrib.distributions.Categorical(probs=p)
    sample = dist.sample()
    return sample

###############
### Asserts ###
############### 
Example #3
Source File: losses.py    From aboleth with Apache License 2.0 4 votes vote down vote up
def elbo(log_likelihood, KL, N):
    r"""Build the evidence lower bound (ELBO) loss for a neural net.

    Parameters
    ----------
    log_likelihood : Tensor
        the log-likelihood Tensor that takes neural network(s) and targets as
        an input. We recommend using a ``tf.distributions`` object's
        ``log_prob()`` method to obtain this tensor. The shape of this Tensor
        should be ``(n_samples, N, ...)``, where ``n_samples`` is the number of
        log-likelihood samples (defined by ab.InputLayer) and ``N`` is the
        number of observations (can be ``?`` if you are using a placeholder and
        mini-batching). These likelihoods can also be weighted to, for example,
        adjust for class imbalance etc. This weighting is left up to the user.
    KL : float, Tensor
        the Kullback Leibler divergence between the posterior and prior
        parameters of the model (:math:`\text{KL}[q\|p]`).
    N : int, Tensor
        the total size of the dataset (i.e. number of observations).

    Returns
    -------
    nelbo : Tensor
        the loss function of the Bayesian neural net (negative ELBO).

    Example
    -------
    This is how we would typically generate a likelihood for this objective,

    .. code-block:: python

        noise = ab.pos_variable(1.0)
        likelihood = tf.distributions.Normal(loc=NN, scale=noise)
        log_likelihood = likelihood.log_prob(Y)

    where ``NN`` is our neural network, and ``Y`` are our targets.

    Note
    ----
    The way ``tf.distributions.Bernoulli`` and ``tf.distributions.Categorical``
    are implemented are a little confusing... it is worth noting that you
    should use a target array, ``Y``, of shape ``(N, 1)`` of ints with the
    Bernoulli likelihood, and a target array of shape ``(N,)`` of ints with
    the Categorical likelihood.

    """
    # Batch amplification factor
    B = N / tf.to_float(tf.shape(log_likelihood)[1])

    # averaging over samples
    n_samples = tf.to_float(tf.shape(log_likelihood)[0])

    # Just mean over samps for expected log-likelihood
    ELL = tf.squeeze(tf.reduce_sum(log_likelihood, axis=[0, 1])) / n_samples

    # negative ELBO is batch weighted ELL and KL
    nELBO = - B * ELL + KL

    return nELBO 
Example #4
Source File: losses.py    From aboleth with Apache License 2.0 4 votes vote down vote up
def max_posterior(log_likelihood, regulariser):
    r"""Build maximum a-posteriori (MAP) loss for a neural net.

    Parameters
    ----------
    log_likelihood : Tensor
        the log-likelihood Tensor that takes neural network(s) and targets as
        an input. We recommend using a ``tf.distributions`` object's
        ``log_prob()`` method to obtain this tensor.  The shape of this Tensor
        should be ``(n_samples, N, ...)``, where ``n_samples`` is the number of
        log-likelihood samples (defined by ab.InputLayer) and ``N`` is the
        number of observations (can be ``?`` if you are using a placeholder and
        mini-batching). These likelihoods can also be weighted to, for example,
        adjust for class imbalance etc. This weighting is left up to the user.
    regulariser : float, Tensor
        the regulariser on the parameters of the model to penalise model
        complexity.

    Returns
    -------
    map : Tensor
        the loss function of the MAP neural net.

    Example
    -------
    This is how we would typically generate a likelihood for this objective,

    .. code-block:: python

        noise = ab.pos_variable(1.0)
        likelihood = tf.distributions.Normal(loc=NN, scale=noise)
        log_likelihood = likelihood.log_prob(Y)

    where ``NN`` is our neural network, and ``Y`` are our targets.

    Note
    ----
    The way ``tf.distributions.Bernoulli`` and ``tf.distributions.Categorical``
    are implemented are a little confusing... it is worth noting that you
    should use a target array, ``Y``, of shape ``(N, 1)`` of ints with the
    Bernoulli likelihood, and a target array of shape ``(N,)`` of ints with
    the Categorical likelihood.

    """
    # Average likelihood for batch
    AVLL = tf.squeeze(tf.reduce_mean(log_likelihood, axis=[0, 1]))

    # MAP objective
    MAP = - AVLL + regulariser

    return MAP 
Example #5
Source File: SCAN.py    From SCAN-tensorflow with Apache License 2.0 4 votes vote down vote up
def train(self):
        img, sym = self.read_data_sets()

        with tf.variable_scope("beta_VAE"):
            img_q_mu, img_q_sigma = self.img_encoder(img)
            img_z = distributions.Normal(img_q_mu, img_q_sigma)
            img_gen = self.img_decoder(img_z.sample(self.cfg.batch_size))

            img_reconstruct_error = tf.reduce_mean(img_gen)

            img_z_prior = distributions.Normal()
            KL_divergence = kl_divergence(img_z, img_z_prior)
            KL_divergence = self.cfg.beta_vae * KL_divergence

            loss = img_reconstruct_error - KL_divergence

        # train beta VAE
        optimizer = tf.train.AdamOptimizer(self.cfg.learning_rate)
        train_op = optimizer.minimize(loss)

        for step in range(self.cfg.epoch):
            self.sess.run(train_op)

        with tf.variable_scope("SCAN"):
            sym_q_mu, sym_q_sigma = self.sym_encoder(sym)
            sym_z = distributions.Normal(sym_q_mu, sym_q_sigma)
            self.sym_decoder(sym_z.sample(self.cfg.batch_size))

            sym_reconstruct_error = tf.reduce_mean()

            sym_z_prior = distributions.Normal()
            beta_KL_divergence = kl_divergence(sym_z, sym_z_prior)
            beta_KL_divergence = self.cfg.beta_scan * beta_KL_divergence

            lambda_KL_divergence = kl_divergence(img_z, sym_z)

            loss = sym_reconstruct_error - beta_KL_divergence
            loss -= self.cfg.lambda_scan * lambda_KL_divergence

        # train SCAN
        optimizer = tf.train.AdamOptimizer(self.cfg.learning_rate)
        train_op = optimizer.minimize(loss)

        for step in range(self.cfg.epoch):
            self.sess.run(train_op)