Python tensorflow.exp() Examples

The following are 30 code examples of tensorflow.exp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: distributions.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def diag_gaussian_log_likelihood(z, mu=0.0, logvar=0.0):
  """Log-likelihood under a Gaussian distribution with diagonal covariance.
    Returns the log-likelihood for each dimension.  One should sum the
    results for the log-likelihood under the full multidimensional model.

  Args:
    z: The value to compute the log-likelihood.
    mu: The mean of the Gaussian
    logvar: The log variance of the Gaussian.

  Returns:
    The log-likelihood under the Gaussian model.
  """

  return -0.5 * (logvar + np.log(2*np.pi) + \
                 tf.square((z-mu)/tf.exp(0.5*logvar))) 
Example #2
Source File: distributions.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self, batch_size, z_size, mean, logvar):
    """Create a diagonal gaussian distribution.

    Args:
      batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
      z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
      mean: The N-D mean of the distribution.
      logvar: The N-D log variance of the diagonal distribution.
    """
    size__xz = [None, z_size]
    self.mean = mean            # bxn already
    self.logvar = logvar        # bxn already
    self.noise = noise = tf.random_normal(tf.shape(logvar))
    self.sample = mean + tf.exp(0.5 * logvar) * noise
    mean.set_shape(size__xz)
    logvar.set_shape(size__xz)
    self.sample.set_shape(size__xz) 
Example #3
Source File: distributions.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self, x_bxu, z_size, name, var_min=0.0):
    """Create an input dependent diagonal Gaussian distribution.

    Args:
      x: The input tensor from which the mean and variance are computed,
        via a linear transformation of x.  I.e.
          mu = Wx + b, log(var) = Mx + c
      z_size: The size of the distribution.
      name:  The name to prefix to learned variables.
      var_min (optional): Minimal variance allowed.  This is an additional
        way to control the amount of information getting through the stochastic
        layer.
    """
    size_bxn = tf.stack([tf.shape(x_bxu)[0], z_size])
    self.mean_bxn = mean_bxn = linear(x_bxu, z_size, name=(name+"/mean"))
    logvar_bxn = linear(x_bxu, z_size, name=(name+"/logvar"))
    if var_min > 0.0:
      logvar_bxn = tf.log(tf.exp(logvar_bxn) + var_min)
    self.logvar_bxn = logvar_bxn

    self.noise_bxn = noise_bxn = tf.random_normal(size_bxn)
    self.noise_bxn.set_shape([None, z_size])
    self.sample_bxn = mean_bxn + tf.exp(0.5 * logvar_bxn) * noise_bxn 
Example #4
Source File: utils.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def gaussian_kernel_matrix(x, y, sigmas):
  r"""Computes a Guassian Radial Basis Kernel between the samples of x and y.

  We create a sum of multiple gaussian kernels each having a width sigma_i.

  Args:
    x: a tensor of shape [num_samples, num_features]
    y: a tensor of shape [num_samples, num_features]
    sigmas: a tensor of floats which denote the widths of each of the
      gaussians in the kernel.
  Returns:
    A tensor of shape [num_samples{x}, num_samples{y}] with the RBF kernel.
  """
  beta = 1. / (2. * (tf.expand_dims(sigmas, 1)))

  dist = compute_pairwise_distances(x, y)

  s = tf.matmul(beta, tf.reshape(dist, (1, -1)))

  return tf.reshape(tf.reduce_sum(tf.exp(-s), 0), tf.shape(dist)) 
Example #5
Source File: competition_model_class.py    From Deep_Learning_Weather_Forecasting with Apache License 2.0 6 votes vote down vote up
def build_graph(self):
        #keras.backend.clear_session() # clear session/graph    
        self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)

        self.model = Seq2Seq_MVE_subnets_swish(id_embd=True, time_embd=True,
            lr=self.lr, decay=self.decay,
            num_input_features=self.num_input_features, num_output_features=self.num_output_features,
            num_decoder_features=self.num_decoder_features, layers=self.layers,
            loss=self.loss, regulariser=self.regulariser)

        def _mve_loss(y_true, y_pred):
            pred_u = crop(2,0,3)(y_pred)
            pred_sig = crop(2,3,6)(y_pred)
            print(pred_sig)
            #exp_sig = tf.exp(pred_sig) # avoid pred_sig is too small such as zero    
            #precision = 1./exp_sig
            precision = 1./pred_sig
            #log_loss= 0.5*tf.log(exp_sig)+0.5*precision*((pred_u-y_true)**2)
            log_loss= 0.5*tf.log(pred_sig)+0.5*precision*((pred_u-y_true)**2)            
          
            log_loss=tf.reduce_mean(log_loss)
            return log_loss

        print(self.model.summary())
        self.model.compile(optimizer = self.optimizer, loss=_mve_loss) 
Example #6
Source File: competition_model_class.py    From Deep_Learning_Weather_Forecasting with Apache License 2.0 6 votes vote down vote up
def minus_plus_std_strategy(self, pred_mean, pred_var, feature_name,\
                            timestep_to_ensemble=21, alpha=0):
        '''
        This stratergy aims to calculate linear weighted at specific timestep (timestep_to_ensemble) between prediction and ruitu as formula:
                                    (alpha)*pred_mean + (1-alpha)*ruitu_inputs
        pred_mean: (10, 37, 3)
        pred_var: (10, 37, 3)
        timestep_to_ensemble: int32 (From 0 to 36)
        '''
        print('Using minus_plus_var_strategy with alpha {}'.format(alpha))
        assert 0<=timestep_to_ensemble<=36 , 'Please ensure 0<=timestep_to_ensemble<=36!'
        assert -0.3<= alpha <=0.3, '-0.3<= alpha <=0.3!'
        assert pred_mean.shape == (10, 37, 3), 'Error! This funtion ONLY works for \
        one data sample with shape (10, 37, 3). Any data shape (None, 10, 37, 3) will leads this error!'
        pred_std = np.sqrt(np.exp(pred_var))           
        print('alpha:',alpha)

        pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] = \
        pred_mean[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]] + \
        alpha * pred_std[:,timestep_to_ensemble:,self.obs_and_output_feature_index_map[feature_name]]

        return pred_mean 
Example #7
Source File: train_policy.py    From cs294-112_hws with MIT License 6 votes vote down vote up
def sample_action(self, policy_parameters):
        """
        constructs a symbolic operation for stochastically sampling from the policy
        distribution

        arguments:
            policy_parameters
                mean, log_std) of a Gaussian distribution over actions
                    sy_mean: (batch_size, self.ac_dim)
                    sy_logstd: (batch_size, self.ac_dim)

        returns:
            sy_sampled_ac:
                (batch_size, self.ac_dim)
        """
        sy_mean, sy_logstd = policy_parameters
        sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(tf.shape(sy_mean), 0, 1)
        return sy_sampled_ac 
Example #8
Source File: density_model.py    From cs294-112_hws with MIT License 6 votes vote down vote up
def make_encoder(self, state, z_size, scope, n_layers, hid_size):
        """
            ### PROBLEM 3
            ### YOUR CODE HERE

            args:
                state: tf variable
                z_size: output dimension of the encoder network
                scope: scope name
                n_layers: number of layers of the encoder network
                hid_size: hidden dimension of encoder network

            TODO:
                1. z_mean: the output of a neural network that takes the state as input,
                    has output dimension z_size, n_layers layers, and hidden 
                    dimension hid_size
                2. z_logstd: a trainable variable, initialized to 0
                    shape (z_size,)

            Hint: use build_mlp
        """
        z_mean = build_mlp(state, z_size, scope, n_layers, hid_size)
        z_logstd = tf.get_variable('z_logstd', shape=z_size, trainable=True,
                                   initializer=tf.constant_initializer(value=0.))
        return tfp.distributions.MultivariateNormalDiag(loc=z_mean, scale_diag=tf.exp(z_logstd)) 
Example #9
Source File: nn.py    From cs294-112_hws with MIT License 6 votes vote down vote up
def call(self, inputs):
        mean_and_log_std = self.model(inputs)
        mean, log_std = tf.split(mean_and_log_std, num_or_size_splits=2, axis=1)
        log_std = tf.clip_by_value(log_std, -20., 2.)
        
        distribution = tfp.distributions.MultivariateNormalDiag(
            loc=mean,
            scale_diag=tf.exp(log_std)
        )
        
        raw_actions = distribution.sample()
        if not self._reparameterize:
            ### Problem 1.3.A
            ### YOUR CODE HERE
            raw_actions = tf.stop_gradient(raw_actions)
        log_probs = distribution.log_prob(raw_actions)
        log_probs -= self._squash_correction(raw_actions)

        ### Problem 2.A
        ### YOUR CODE HERE
        self.actions = tf.tanh(raw_actions)
            
        return self.actions, log_probs 
Example #10
Source File: learning_rate.py    From fine-lm with MIT License 6 votes vote down vote up
def _learning_rate_warmup(warmup_steps, warmup_schedule="exp", hparams=None):
  """Learning rate warmup multiplier."""
  if not warmup_steps:
    return tf.constant(1.)

  tf.logging.info("Applying %s learning rate warmup for %d steps",
                  warmup_schedule, warmup_steps)

  warmup_steps = tf.to_float(warmup_steps)
  global_step = _global_step(hparams)

  if warmup_schedule == "exp":
    return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step)
  else:
    assert warmup_schedule == "linear"
    start = tf.constant(0.35)
    return ((tf.constant(1.) - start) / warmup_steps) * global_step + start 
Example #11
Source File: autoencoders.py    From fine-lm with MIT License 6 votes vote down vote up
def bottleneck(self, x):  # pylint: disable=arguments-differ
    hparams = self.hparams
    if hparams.unordered:
      return super(AutoencoderOrderedDiscrete, self).bottleneck(x)
    noise = hparams.bottleneck_noise
    hparams.bottleneck_noise = 0.0  # We'll add noise below.
    x, loss = discretization.parametrized_bottleneck(x, hparams)
    hparams.bottleneck_noise = noise
    if hparams.mode == tf.estimator.ModeKeys.TRAIN:
      # We want a number p such that p^bottleneck_bits = 1 - noise.
      # So log(p) * bottleneck_bits = log(noise)
      log_p = tf.log(1 - float(noise) / 2) / float(hparams.bottleneck_bits)
      # Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_bits.
      noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1))
      # Having the no-noise mask, we can make noise just uniformly at random.
      ordered_noise = tf.random_uniform(tf.shape(x))
      # We want our noise to be 1s at the start and random {-1, 1} bits later.
      ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise))
      # Now we flip the bits of x on the noisy positions (ordered and normal).
      x *= 2.0 * ordered_noise - 1
    return x, loss 
Example #12
Source File: common_layers.py    From fine-lm with MIT License 6 votes vote down vote up
def get_timing_signal(length,
                      min_timescale=1,
                      max_timescale=1e4,
                      num_timescales=16):
  """Create Tensor of sinusoids of different frequencies.

  Args:
    length: Length of the Tensor to create, i.e. Number of steps.
    min_timescale: a float
    max_timescale: a float
    num_timescales: an int

  Returns:
    Tensor of shape (length, 2*num_timescales)
  """
  positions = tf.to_float(tf.range(length))
  log_timescale_increment = (
      math.log(max_timescale / min_timescale) / (num_timescales - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) 
Example #13
Source File: discretization.py    From fine-lm with MIT License 6 votes vote down vote up
def vae(x, name, z_size):
  """Simple variational autoencoder without discretization.

  Args:
    x: Input to the discretization bottleneck.
    name: Name for the bottleneck scope.
    z_size: Number of bits used to produce discrete code; discrete codes range
      from 1 to 2**z_size.

  Returns:
    Embedding function, latent, loss, mu and log_simga.
  """
  with tf.variable_scope(name):
    mu = tf.layers.dense(x, z_size, name="mu")
    log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
    shape = common_layers.shape_list(x)
    epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
    z = mu + tf.exp(log_sigma / 2) * epsilon
    kl = 0.5 * tf.reduce_mean(
        tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)
    free_bits = z_size // 4
    kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
    return z, kl_loss, mu, log_sigma 
Example #14
Source File: train.py    From Traffic_sign_detection_YOLO with MIT License 5 votes vote down vote up
def expit_tensor(x):
	return 1. / (1. + tf.exp(-x)) 
Example #15
Source File: tfutil.py    From disentangling_conditional_gans with MIT License 5 votes vote down vote up
def exp2(x):
    with tf.name_scope('Exp2'):
        return tf.exp(x * np.float32(np.log(2.0))) 
Example #16
Source File: picklable_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fprop(self, x, **kwargs):
        alpha = 1.6732632423543772848170429916717
        scale = 1.0507009873554804934193349852946
        mask = tf.to_float(x >= 0.)
        out = mask * x + (1. - mask) * \
            (alpha * tf.exp((1. - mask) * x) - alpha)
        return scale * out 
Example #17
Source File: 19_char_rnn.py    From deep-learning-note with MIT License 5 votes vote down vote up
def create_model(self):
        seq = tf.one_hot(self.seq, len(self.vocab))
        self.create_rnn(seq)
        self.logits = tf.layers.dense(self.output, len(self.vocab), None)
        loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits[:, :-1],
                                                       labels=seq[:, 1:])
        self.loss = tf.reduce_sum(loss)
        # sample the next character from Maxwell-Boltzmann Distribution
        # with temperature temp. It works equally well without tf.exp
        self.sample = tf.random.categorical(tf.exp(self.logits[:, -1] / self.temp), 1)[:, 0]
        self.opt = tf.compat.v1.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=self.gstep) 
Example #18
Source File: actor.py    From neural-combinatorial-optimization-rl-tensorflow with MIT License 5 votes vote down vote up
def build_optim(self):
        # Update moving_mean and moving_variance for batch normalization layers
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):

            with tf.name_scope('baseline'):
                # Update baseline
                reward_mean, reward_var = tf.nn.moments(self.reward,axes=[0])
                self.base_op = tf.assign(self.avg_baseline, self.alpha*self.avg_baseline+(1.0-self.alpha)*reward_mean)
                tf.summary.scalar('average baseline',self.avg_baseline)

            with tf.name_scope('reinforce'):
                # Actor learning rate
                self.lr1 = tf.train.exponential_decay(self.lr1_start, self.global_step, self.lr1_decay_step,self.lr1_decay_rate, staircase=False, name="learning_rate1")
                # Optimizer
                self.opt1 = tf.train.AdamOptimizer(learning_rate=self.lr1,beta1=0.9,beta2=0.99, epsilon=0.0000001)
                # Discounted reward
                self.reward_baseline = tf.stop_gradient(self.reward - self.avg_baseline - self.critic.predictions) # [Batch size, 1] 
                variable_summaries('reward_baseline',self.reward_baseline, with_max_min = True)
                # Loss
                self.loss1 = tf.reduce_mean(self.reward_baseline*self.log_softmax,0)
                tf.summary.scalar('loss1', self.loss1)
                # Minimize step
                gvs = self.opt1.compute_gradients(self.loss1)
                capped_gvs = [(tf.clip_by_norm(grad, 1.), var) for grad, var in gvs if grad is not None] # L2 clip
                self.train_step1 = self.opt1.apply_gradients(capped_gvs, global_step=self.global_step)

            with tf.name_scope('state_value'):
                # Critic learning rate
                self.lr2 = tf.train.exponential_decay(self.lr2_start, self.global_step2, self.lr2_decay_step,self.lr2_decay_rate, staircase=False, name="learning_rate1")
                # Optimizer
                self.opt2 = tf.train.AdamOptimizer(learning_rate=self.lr2,beta1=0.9,beta2=0.99, epsilon=0.0000001)
                # Loss
                weights_ = 1.0 #weights_ = tf.exp(self.log_softmax-tf.reduce_max(self.log_softmax)) # probs / max_prob
                self.loss2 = tf.losses.mean_squared_error(self.reward - self.avg_baseline, self.critic.predictions, weights = weights_)
                tf.summary.scalar('loss2', self.loss1)
                # Minimize step
                gvs2 = self.opt2.compute_gradients(self.loss2)
                capped_gvs2 = [(tf.clip_by_norm(grad, 1.), var) for grad, var in gvs2 if grad is not None] # L2 clip
                self.train_step2 = self.opt1.apply_gradients(capped_gvs2, global_step=self.global_step2) 
Example #19
Source File: utils.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def logits_to_log_prob(logits):
  """Computes log probabilities using numerically stable trick.

  This uses two numerical stability tricks:
  1) softmax(x) = softmax(x - c) where c is a constant applied to all
  arguments. If we set c = max(x) then the softmax is more numerically
  stable.
  2) log softmax(x) is not numerically stable, but we can stabilize it
  by using the identity log softmax(x) = x - log sum exp(x)

  Args:
    logits: Tensor of arbitrary shape whose last dimension contains logits.

  Returns:
    A tensor of the same shape as the input, but with corresponding log
    probabilities.
  """

  with tf.variable_scope('log_probabilities'):
    reduction_indices = len(logits.shape.as_list()) - 1
    max_logits = tf.reduce_max(
        logits, reduction_indices=reduction_indices, keep_dims=True)
    safe_logits = tf.subtract(logits, max_logits)
    sum_exp = tf.reduce_sum(
        tf.exp(safe_logits),
        reduction_indices=reduction_indices,
        keep_dims=True)
    log_probs = tf.subtract(safe_logits, tf.log(sum_exp))
  return log_probs 
Example #20
Source File: model.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def soft_min(self, x, y):
    return tf.maximum(-1.0 * (1 / (
        self.utility.FLAGS.soft_min_value + 0.0)) * tf.log(
            tf.exp(-self.utility.FLAGS.soft_min_value * x) + tf.exp(
                -self.utility.FLAGS.soft_min_value * y)), tf.zeros_like(x)) 
Example #21
Source File: tf_utils.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def inverse_sigmoid_decay(k, global_step_op):
  with tf.name_scope('inverse_sigmoid_decay'):
    k = tf.constant(k, dtype=tf.float32)
    tmp = k*tf.exp(-tf.cast(global_step_op, tf.float32)/k)
    tmp = tmp / (1. + tmp)
  return tmp 
Example #22
Source File: accountant.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def accumulate_privacy_spending(self, eps_delta, unused_sigma,
                                  num_examples):
    """Accumulate the privacy spending.

    Currently only support approximate privacy. Here we assume we use Gaussian
    noise on randomly sampled batch so we get better composition: 1. the per
    batch privacy is computed using privacy amplication via sampling bound;
    2. the composition is done using the composition with Gaussian noise.
    TODO(liqzhang) Add a link to a document that describes the bounds used.

    Args:
      eps_delta: EpsDelta pair which can be tensors.
      unused_sigma: the noise sigma. Unused for this accountant.
      num_examples: the number of examples involved.
    Returns:
      a TensorFlow operation for updating the privacy spending.
    """

    eps, delta = eps_delta
    with tf.control_dependencies(
        [tf.Assert(tf.greater(delta, 0),
                   ["delta needs to be greater than 0"])]):
      amortize_ratio = (tf.cast(num_examples, tf.float32) * 1.0 /
                        self._total_examples)
      # Use privacy amplification via sampling bound.
      # See Lemma 2.2 in http://arxiv.org/pdf/1405.7085v2.pdf
      # TODO(liqzhang) Add a link to a document with formal statement
      # and proof.
      amortize_eps = tf.reshape(tf.log(1.0 + amortize_ratio * (
          tf.exp(eps) - 1.0)), [1])
      amortize_delta = tf.reshape(amortize_ratio * delta, [1])
      return tf.group(*[tf.assign_add(self._eps_squared_sum,
                                      tf.square(amortize_eps)),
                        tf.assign_add(self._delta_sum, amortize_delta)]) 
Example #23
Source File: accountant.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _compute_log_moment(self, sigma, q, moment_order):
    """Compute high moment of privacy loss.

    Args:
      sigma: the noise sigma, in the multiples of the sensitivity.
      q: the sampling ratio.
      moment_order: the order of moment.
    Returns:
      log E[exp(moment_order * X)]
    """
    pass 
Example #24
Source File: accountant.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _differential_moments(self, sigma, s, t):
    """Compute 0 to t-th differential moments for Gaussian variable.

        E[(P(x+s)/P(x+s-1)-1)^t]
      = sum_{i=0}^t (t choose i) (-1)^{t-i} E[(P(x+s)/P(x+s-1))^i]
      = sum_{i=0}^t (t choose i) (-1)^{t-i} E[exp(-i*(2*x+2*s-1)/(2*sigma^2))]
      = sum_{i=0}^t (t choose i) (-1)^{t-i} exp(i(i+1-2*s)/(2 sigma^2))
    Args:
      sigma: the noise sigma, in the multiples of the sensitivity.
      s: the shift.
      t: 0 to t-th moment.
    Returns:
      0 to t-th moment as a tensor of shape [t+1].
    """
    assert t <= self._max_moment_order, ("The order of %d is out "
                                         "of the upper bound %d."
                                         % (t, self._max_moment_order))
    binomial = tf.slice(self._binomial_table, [0, 0],
                        [t + 1, t + 1])
    signs = numpy.zeros((t + 1, t + 1), dtype=numpy.float64)
    for i in range(t + 1):
      for j in range(t + 1):
        signs[i, j] = 1.0 - 2 * ((i - j) % 2)
    exponents = tf.constant([j * (j + 1.0 - 2.0 * s) / (2.0 * sigma * sigma)
                             for j in range(t + 1)], dtype=tf.float64)
    # x[i, j] = binomial[i, j] * signs[i, j] = (i choose j) * (-1)^{i-j}
    x = tf.multiply(binomial, signs)
    # y[i, j] = x[i, j] * exp(exponents[j])
    #         = (i choose j) * (-1)^{i-j} * exp(j(j-1)/(2 sigma^2))
    # Note: this computation is done by broadcasting pointwise multiplication
    # between [t+1, t+1] tensor and [t+1] tensor.
    y = tf.multiply(x, tf.exp(exponents))
    # z[i] = sum_j y[i, j]
    #      = sum_j (i choose j) * (-1)^{i-j} * exp(j(j-1)/(2 sigma^2))
    z = tf.reduce_sum(y, 1)
    return z 
Example #25
Source File: accountant.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _compute_log_moment(self, sigma, q, moment_order):
    """Compute high moment of privacy loss.

    Args:
      sigma: the noise sigma, in the multiples of the sensitivity.
      q: the sampling ratio.
      moment_order: the order of moment.
    Returns:
      log E[exp(moment_order * X)]
    """
    assert moment_order <= self._max_moment_order, ("The order of %d is out "
                                                    "of the upper bound %d."
                                                    % (moment_order,
                                                       self._max_moment_order))
    binomial_table = tf.slice(self._binomial_table, [moment_order, 0],
                              [1, moment_order + 1])
    # qs = [1 q q^2 ... q^L] = exp([0 1 2 ... L] * log(q))
    qs = tf.exp(tf.constant([i * 1.0 for i in range(moment_order + 1)],
                            dtype=tf.float64) * tf.cast(
                                tf.log(q), dtype=tf.float64))
    moments0 = self._differential_moments(sigma, 0.0, moment_order)
    term0 = tf.reduce_sum(binomial_table * qs * moments0)
    moments1 = self._differential_moments(sigma, 1.0, moment_order)
    term1 = tf.reduce_sum(binomial_table * qs * moments1)
    return tf.squeeze(tf.log(tf.cast(q * term0 + (1.0 - q) * term1,
                                     tf.float64))) 
Example #26
Source File: faster_rcnn_box_coder.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _decode(self, rel_codes, anchors):
    """Decode relative codes to boxes.

    Args:
      rel_codes: a tensor representing N anchor-encoded boxes.
      anchors: BoxList of anchors.

    Returns:
      boxes: BoxList holding N bounding boxes.
    """
    ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()

    ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes))
    if self._scale_factors:
      ty /= self._scale_factors[0]
      tx /= self._scale_factors[1]
      th /= self._scale_factors[2]
      tw /= self._scale_factors[3]
    w = tf.exp(tw) * wa
    h = tf.exp(th) * ha
    ycenter = ty * ha + ycenter_a
    xcenter = tx * wa + xcenter_a
    ymin = ycenter - h / 2.
    xmin = xcenter - w / 2.
    ymax = ycenter + h / 2.
    xmax = xcenter + w / 2.
    return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) 
Example #27
Source File: square_box_coder.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _decode(self, rel_codes, anchors):
    """Decodes relative codes to boxes.

    Args:
      rel_codes: a tensor representing N anchor-encoded boxes.
      anchors: BoxList of anchors.

    Returns:
      boxes: BoxList holding N bounding boxes.
    """
    ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
    la = tf.sqrt(ha * wa)

    ty, tx, tl = tf.unstack(tf.transpose(rel_codes))
    if self._scale_factors:
      ty /= self._scale_factors[0]
      tx /= self._scale_factors[1]
      tl /= self._scale_factors[2]
    l = tf.exp(tl) * la
    ycenter = ty * la + ycenter_a
    xcenter = tx * la + xcenter_a
    ymin = ycenter - l / 2.
    xmin = xcenter - l / 2.
    ymax = ycenter + l / 2.
    xmax = xcenter + l / 2.
    return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) 
Example #28
Source File: distributions.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def logp(self, bin_counts):
    """Compute the log probability for the counts in the bin, under the model.

    Args:
      bin_counts: array-like integer counts

    Returns:
      The log-probability under the Poisson models for each element of
      bin_counts.
    """
    k = tf.to_float(bin_counts)
    # log poisson(k, r) = log(r^k * e^(-r) / k!) = k log(r) - r - log k!
    # log poisson(k, r=exp(x)) = k * x - exp(x) - lgamma(k + 1)
    return k * self.logr - tf.exp(self.logr) - tf.lgamma(k + 1) 
Example #29
Source File: model.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _BuildMotionKernel(self):
    image = self.images[-2]
    diff = self.diffs[-2]
    shape = image.get_shape().as_list()
    assert shape[1] == shape[2] and shape[1] == 128
    batch_size = shape[0]

    net = tf.concat(axis=3, values=[image, diff])
    with tf.variable_scope('motion_encoder'):
      with slim.arg_scope([slim.conv2d], padding='VALID'):
        net = slim.conv2d(net, 96, [5, 5], stride=1)
        net = slim.max_pool2d(net, [2, 2])
        net = slim.conv2d(net, 96, [5, 5], stride=1)
        net = slim.max_pool2d(net, [2, 2])
        net = slim.conv2d(net, 128, [5, 5], stride=1)
        net = slim.conv2d(net, 128, [5, 5], stride=1)
        net = slim.max_pool2d(net, [2, 2])
        net = slim.conv2d(net, 256, [4, 4], stride=1)
        net = slim.conv2d(net, 256, [3, 3], stride=1)

        z = tf.reshape(net, shape=[batch_size, -1])
        self.z_mean, self.z_stddev_log = tf.split(
            axis=1, num_or_size_splits=2, value=z)
        self.z_stddev = tf.exp(self.z_stddev_log)

        epsilon = tf.random_normal(
            self.z_mean.get_shape().as_list(), 0, 1, dtype=tf.float32)
        kernel = self.z_mean + tf.multiply(self.z_stddev, epsilon)

        width = int(math.sqrt(kernel.get_shape().as_list()[1] // 128))
        kernel = tf.reshape(kernel, [batch_size, width, width, 128])
    with tf.variable_scope('kernel_decoder'):
      with slim.arg_scope([slim.conv2d], padding='SAME'):
        kernel = slim.conv2d(kernel, 128, [5, 5], stride=1)
        self.kernel = slim.conv2d(kernel, 128, [5, 5], stride=1)

    sys.stderr.write('kernel shape: %s\n' % kernel.get_shape()) 
Example #30
Source File: VariationalAutoencoder.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()):
        self.n_input = n_input
        self.n_hidden = n_hidden

        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.z_mean = tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1'])
        self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1'])

        # sample from gaussian distribution
        eps = tf.random_normal(tf.stack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)
        self.z = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))

        self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2'])

        # cost
        reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
                                           - tf.square(self.z_mean)
                                           - tf.exp(self.z_log_sigma_sq), 1)
        self.cost = tf.reduce_mean(reconstr_loss + latent_loss)
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init)