Python math.exp() Examples

The following are 30 code examples of math.exp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module math , or try the search function .
Example #1
Source File: common_layers.py    From fine-lm with MIT License 6 votes vote down vote up
def get_timing_signal(length,
                      min_timescale=1,
                      max_timescale=1e4,
                      num_timescales=16):
  """Create Tensor of sinusoids of different frequencies.

  Args:
    length: Length of the Tensor to create, i.e. Number of steps.
    min_timescale: a float
    max_timescale: a float
    num_timescales: an int

  Returns:
    Tensor of shape (length, 2*num_timescales)
  """
  positions = tf.to_float(tf.range(length))
  log_timescale_increment = (
      math.log(max_timescale / min_timescale) / (num_timescales - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) 
Example #2
Source File: train_csd.py    From CSD-SSD with MIT License 6 votes vote down vote up
def rampweight(iteration):
    ramp_up_end = 32000
    ramp_down_start = 100000

    if(iteration<ramp_up_end):
        ramp_weight = math.exp(-5 * math.pow((1 - iteration / ramp_up_end),2))
    elif(iteration>ramp_down_start):
        ramp_weight = math.exp(-12.5 * math.pow((1 - (120000 - iteration) / 20000),2)) 
    else:
        ramp_weight = 1 


    if(iteration==0):
        ramp_weight = 0

    return ramp_weight 
Example #3
Source File: run_cmrc2019_baseline.py    From cmrc2019 with Creative Commons Attribution Share Alike 4.0 International 6 votes vote down vote up
def _compute_softmax(scores):
    """Compute softmax probability over raw logits."""
    if not scores:
        return []

    max_score = None
    for score in scores:
        if max_score is None or score > max_score:
            max_score = score

    exp_scores = []
    total_sum = 0.0
    for score in scores:
        x = math.exp(score - max_score)
        exp_scores.append(x)
        total_sum += x

    probs = []
    for score in exp_scores:
        probs.append(score / total_sum)
    return probs 
Example #4
Source File: utils.py    From AdaptiveWingLoss with Apache License 2.0 6 votes vote down vote up
def _gaussian(
        size=3, sigma=0.25, amplitude=1, normalize=False, width=None,
        height=None, sigma_horz=None, sigma_vert=None, mean_horz=0.5,
        mean_vert=0.5):
    # handle some defaults
    if width is None:
        width = size
    if height is None:
        height = size
    if sigma_horz is None:
        sigma_horz = sigma
    if sigma_vert is None:
        sigma_vert = sigma
    center_x = mean_horz * width + 0.5
    center_y = mean_vert * height + 0.5
    gauss = np.empty((height, width), dtype=np.float32)
    # generate kernel
    for i in range(height):
        for j in range(width):
            gauss[i][j] = amplitude * math.exp(-(math.pow((j + 1 - center_x) / (
                sigma_horz * width), 2) / 2.0 + math.pow((i + 1 - center_y) / (sigma_vert * height), 2) / 2.0))
    if normalize:
        gauss = gauss / np.sum(gauss)
    return gauss 
Example #5
Source File: run_utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def evaluate(mod, data_iter, epoch, log_interval):
    """ Run evaluation on cpu. """
    start = time.time()
    total_L = 0.0
    nbatch = 0
    density = 0
    mod.set_states(value=0)
    for batch in data_iter:
        mod.forward(batch, is_train=False)
        outputs = mod.get_outputs(merge_multi_context=False)
        states = outputs[:-1]
        total_L += outputs[-1][0]
        mod.set_states(states=states)
        nbatch += 1
        # don't include padding data in the test perplexity
        density += batch.data[1].mean()
        if (nbatch + 1) % log_interval == 0:
            logging.info("Eval batch %d loss : %.7f" % (nbatch, (total_L / density).asscalar()))
    data_iter.reset()
    loss = (total_L / density).asscalar()
    ppl = math.exp(loss) if loss < 100 else 1e37
    end = time.time()
    logging.info('Iter[%d]\t\t CE loss %.7f, ppl %.7f. Eval duration = %.2f seconds ' % \
                 (epoch, loss, ppl, end - start))
    return loss 
Example #6
Source File: accountant.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _compute_delta(self, log_moments, eps):
    """Compute delta for given log_moments and eps.

    Args:
      log_moments: the log moments of privacy loss, in the form of pairs
        of (moment_order, log_moment)
      eps: the target epsilon.
    Returns:
      delta
    """
    min_delta = 1.0
    for moment_order, log_moment in log_moments:
      if math.isinf(log_moment) or math.isnan(log_moment):
        sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
        continue
      if log_moment < moment_order * eps:
        min_delta = min(min_delta,
                        math.exp(log_moment - moment_order * eps))
    return min_delta 
Example #7
Source File: gaussian_moments.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def compute_a(sigma, q, lmbd, verbose=False):
  lmbd_int = int(math.ceil(lmbd))
  if lmbd_int == 0:
    return 1.0

  a_lambda_first_term_exact = 0
  a_lambda_second_term_exact = 0
  for i in xrange(lmbd_int + 1):
    coef_i = scipy.special.binom(lmbd_int, i) * (q ** i)
    s1, s2 = 0, 0
    for j in xrange(i + 1):
      coef_j = scipy.special.binom(i, j) * (-1) ** (i - j)
      s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2)))
      s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2)))
    a_lambda_first_term_exact += coef_i * s1
    a_lambda_second_term_exact += coef_i * s2

  a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
                    q * a_lambda_second_term_exact)
  if verbose:
    print "A: by binomial expansion    {} = {} + {}".format(
        a_lambda_exact,
        (1.0 - q) * a_lambda_first_term_exact,
        q * a_lambda_second_term_exact)
  return _to_np_float64(a_lambda_exact) 
Example #8
Source File: analysis.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def compute_q_noisy_max(counts, noise_eps):
  """returns ~ Pr[outcome != winner].

  Args:
    counts: a list of scores
    noise_eps: privacy parameter for noisy_max
  Returns:
    q: the probability that outcome is different from true winner.
  """
  # For noisy max, we only get an upper bound.
  # Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
  # proof at http://mathoverflow.net/questions/66763/
  # tight-bounds-on-probability-of-sum-of-laplace-random-variables

  winner = np.argmax(counts)
  counts_normalized = noise_eps * (counts - counts[winner])
  counts_rest = np.array(
      [counts_normalized[i] for i in xrange(len(counts)) if i != winner])
  q = 0.0
  for c in counts_rest:
    gap = -c
    q += (gap + 2.0) / (4.0 * math.exp(gap))
  return min(q, 1.0 - (1.0/len(counts))) 
Example #9
Source File: analysis.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def compute_q_noisy_max_approx(counts, noise_eps):
  """returns ~ Pr[outcome != winner].

  Args:
    counts: a list of scores
    noise_eps: privacy parameter for noisy_max
  Returns:
    q: the probability that outcome is different from true winner.
  """
  # For noisy max, we only get an upper bound.
  # Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
  # proof at http://mathoverflow.net/questions/66763/
  # tight-bounds-on-probability-of-sum-of-laplace-random-variables
  # This code uses an approximation that is faster and easier
  # to get local sensitivity bound on.

  winner = np.argmax(counts)
  counts_normalized = noise_eps * (counts - counts[winner])
  counts_rest = np.array(
      [counts_normalized[i] for i in xrange(len(counts)) if i != winner])
  gap = -max(counts_rest)
  q = (len(counts) - 1) * (gap + 2.0) / (4.0 * math.exp(gap))
  return min(q, 1.0 - (1.0/len(counts))) 
Example #10
Source File: analysis.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def smoothed_sens(counts, noise_eps, l, beta):
  """Compute beta-smooth sensitivity.

  Args:
    counts: array of scors
    noise_eps: noise parameter
    l: moment of interest
    beta: smoothness parameter
  Returns:
    smooth_sensitivity: a beta smooth upper bound
  """
  k = 0
  smoothed_sensitivity = sens_at_k(counts, noise_eps, l, k)
  while k < max(counts):
    k += 1
    sensitivity_at_k = sens_at_k(counts, noise_eps, l, k)
    smoothed_sensitivity = max(
        smoothed_sensitivity,
        math.exp(-beta * k) * sensitivity_at_k)
    if sensitivity_at_k == 0.0:
      break
  return smoothed_sensitivity 
Example #11
Source File: functional.py    From audio with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def mu_law_decoding(
        x_mu: Tensor,
        quantization_channels: int
) -> Tensor:
    r"""Decode mu-law encoded signal.  For more info see the
    `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_

    This expects an input with values between 0 and quantization_channels - 1
    and returns a signal scaled between -1 and 1.

    Args:
        x_mu (Tensor): Input tensor
        quantization_channels (int): Number of channels

    Returns:
        Tensor: Input after mu-law decoding
    """
    mu = quantization_channels - 1.0
    if not x_mu.is_floating_point():
        x_mu = x_mu.to(torch.float)
    mu = torch.tensor(mu, dtype=x_mu.dtype)
    x = ((x_mu) / mu) * 2 - 1.0
    x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
    return x 
Example #12
Source File: gaussian_moments.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _compute_delta(log_moments, eps):
  """Compute delta for given log_moments and eps.

  Args:
    log_moments: the log moments of privacy loss, in the form of pairs
      of (moment_order, log_moment)
    eps: the target epsilon.
  Returns:
    delta
  """
  min_delta = 1.0
  for moment_order, log_moment in log_moments:
    if moment_order == 0:
      continue
    if math.isinf(log_moment) or math.isnan(log_moment):
      sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
      continue
    if log_moment < moment_order * eps:
      min_delta = min(min_delta,
                      math.exp(log_moment - moment_order * eps))
  return min_delta 
Example #13
Source File: dqn.py    From Pytorch-Project-Template with MIT License 6 votes vote down vote up
def select_action(self, state):
        """
        The action selection function, it either uses the model to choose an action or samples one uniformly.
        :param state: current state of the model
        :return:
        """
        if self.cuda:
            state = state.cuda()
        sample = random.random()
        eps_threshold = self.config.eps_start + (self.config.eps_start - self.config.eps_end) * math.exp(
            -1. * self.current_iteration / self.config.eps_decay)
        self.current_iteration += 1
        if sample > eps_threshold:
            with torch.no_grad():
                return self.policy_model(state).max(1)[1].view(1, 1)
        else:
            return torch.tensor([[random.randrange(2)]], device=self.device, dtype=torch.long) 
Example #14
Source File: gale_church.py    From razzy-spinner with GNU General Public License v3.0 6 votes vote down vote up
def erfcc(x):
        """Complementary error function."""
        z = abs(x)
        t = 1 / (1 + 0.5 * z)
        r = t * math.exp(-z * z -
                         1.26551223 + t *
                         (1.00002368 + t *
                          (.37409196 + t *
                           (.09678418 + t *
                            (-.18628806 + t *
                             (.27886807 + t *
                              (-1.13520398 + t *
                               (1.48851587 + t *
                                (-.82215223 + t * .17087277)))))))))
        if x >= 0.:
            return r
        else:
            return 2. - r 
Example #15
Source File: jmp_score.py    From python-esppy with Apache License 2.0 5 votes vote down vote up
def exp(x):
    try:
        return math.exp(x)
    except OverflowError:
        return float('inf') 
Example #16
Source File: node.py    From tensortrade with Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 com: float = None,
                 span: float = None,
                 halflife: float = None,
                 alpha: float = None,
                 warmup: int = 0,
                 adjust: bool = True,
                 ignore_na: bool = False,
                 name: str = None):
        super().__init__(name)
        self.com = com
        self.span = span
        self.halflife = halflife

        self.warmup = warmup
        self.adjust = adjust
        self.ignore_na = ignore_na

        if alpha:
            assert 0 < alpha <= 1
            self.alpha = alpha
        elif com:
            assert com >= 0
            self.alpha = 1 / (1 + com)
        elif span:
            assert span >= 1
            self.alpha = 2 / (1 + span)
        elif halflife:
            assert halflife > 0
            self.alpha = 1 - math.exp(math.log(0.5) / halflife)

        self.history = []
        self.weights = [] 
Example #17
Source File: distiller.py    From overhaul-distillation with MIT License 5 votes vote down vote up
def get_margin_from_BN(bn):
    margin = []
    std = bn.weight.data
    mean = bn.bias.data
    for (s, m) in zip(std, mean):
        s = abs(s.item())
        m = m.item()
        if norm.cdf(-m / s) > 0.001:
            margin.append(- s * math.exp(- (m / s) ** 2 / 2) / math.sqrt(2 * math.pi) / norm.cdf(-m / s) + m)
        else:
            margin.append(-3 * s)

    return torch.FloatTensor(margin).to(std.device) 
Example #18
Source File: probability.py    From razzy-spinner with GNU General Public License v3.0 5 votes vote down vote up
def prob(self, sample):
        if self._log:
            if sample not in self._prob_dict: return 0
            else: return math.exp(self._prob_dict[sample])
        else:
            return self._prob_dict.get(sample, 0) 
Example #19
Source File: functional.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _dB2Linear(x: float) -> float:
    return math.exp(x * math.log(10) / 20.0) 
Example #20
Source File: probability.py    From razzy-spinner with GNU General Public License v3.0 5 votes vote down vote up
def prob(self, sample):
        # inherit documentation
        i = self._sample_dict.get(sample)
        if i != None:
            if self._logs:
                return exp(self._data[i])
            else:
                return self._data[i]
        else:
            return 0.0 
Example #21
Source File: probability.py    From razzy-spinner with GNU General Public License v3.0 5 votes vote down vote up
def update(self, sample, prob, log=True):
        """
        Update the probability for the given sample. This may cause the object
        to stop being the valid probability distribution - the user must
        ensure that they update the sample probabilities such that all samples
        have probabilities between 0 and 1 and that all probabilities sum to
        one.

        @param sample: the sample for which to update the probability
        @type sample: C{any}
        @param prob: the new probability
        @type prob: C{float}
        @param log: is the probability already logged
        @type log: C{bool}
        """
        i = self._sample_dict.get(sample)
        assert i != None
        if self._logs:
            if log: self._data[i] = prob
            else:   self._data[i] = log(prob)
        else:
            if log: self._data[i] = exp(prob)
            else:   self._data[i] = prob

##//////////////////////////////////////////////////////
##  Probability Distribution Operations
##////////////////////////////////////////////////////// 
Example #22
Source File: probability.py    From razzy-spinner with GNU General Public License v3.0 5 votes vote down vote up
def add_logs(logx, logy):
    """
    Given two numbers C{logx}=M{log(x)} and C{logy}=M{log(y)}, return
    M{log(x+y)}.  Conceptually, this is the same as returning
    M{log(exp(C{logx})+exp(C{logy}))}, but the actual implementation
    avoids overflow errors that could result from direct computation.
    """
    if (logx < logy + _ADD_LOGS_MAX_DIFF):
        return logy
    if (logy < logx + _ADD_LOGS_MAX_DIFF):
        return logx
    base = min(logx, logy)
    return base + math.log(math.exp(logx-base) + math.exp(logy-base)) 
Example #23
Source File: probability.py    From razzy-spinner with GNU General Public License v3.0 5 votes vote down vote up
def set_logprob(self, logprob):
        """
        Set the log probability associated with this object to
        C{logprob}.  I.e., set the probability associated with this
        object to C{exp(logprob)}.
        @param logprob: The new log probability
        @type logprob: C{float}
        """
        self.__logprob = prob
        self.__prob = None 
Example #24
Source File: probability.py    From razzy-spinner with GNU General Public License v3.0 5 votes vote down vote up
def smoothedNr(self, r):
        """
        Return the number of samples with count r.

        :param r: The amount of frequency.
        :type r: int
        :rtype: float
        """

        # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
        # relationship)
        # Estimate a and b by simple linear regression technique on
        # the logarithmic form of the equation: log Nr = a + b*log(r)

        return math.exp(self._intercept + self._slope * math.log(r)) 
Example #25
Source File: kaldi.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def inverse_mel_scale(mel_freq: Tensor) -> Tensor:
    return 700.0 * ((mel_freq / 1127.0).exp() - 1.0) 
Example #26
Source File: kaldi.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def inverse_mel_scale_scalar(mel_freq: float) -> float:
    return 700.0 * (math.exp(mel_freq / 1127.0) - 1.0) 
Example #27
Source File: MRAN.py    From transferlearning with MIT License 5 votes vote down vote up
def train(epoch, model, source_loader, target_loader):
    #最后的全连接层学习率为前面的10倍
    LEARNING_RATE = args.lr / math.pow((1 + 10 * (epoch - 1) / args.epochs), 0.75)
    print("learning rate:", LEARNING_RATE)
    if args.diff_lr:
        optimizer = torch.optim.SGD([
        {'params': model.sharedNet.parameters()},
        {'params': model.Inception.parameters(), 'lr': LEARNING_RATE},
        ], lr=LEARNING_RATE / 10, momentum=args.momentum, weight_decay=args.l2_decay)
    else:
        optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=args.momentum,weight_decay = args.l2_decay)
    model.train()
    tgt_iter = iter(target_loader)
    for batch_idx, (source_data, source_label) in enumerate(source_loader):
        try:
            target_data, _ = tgt_iter.next()
        except Exception as err:
            tgt_iter=iter(target_loader)
            target_data, _ = tgt_iter.next()
        
        if args.cuda:
            source_data, source_label = source_data.cuda(), source_label.cuda()
            target_data = target_data.cuda()
        optimizer.zero_grad()

        s_output, mmd_loss = model(source_data, target_data, source_label)
        soft_loss = F.nll_loss(F.log_softmax(s_output, dim=1), source_label)
        # print((2 / (1 + math.exp(-10 * (epoch) / args.epochs)) - 1))
        if args.gamma == 1:
            gamma = 2 / (1 + math.exp(-10 * (epoch) / args.epochs)) - 1
        if args.gamma == 2:
            gamma = epoch /args.epochs
        loss = soft_loss + gamma * mmd_loss
        loss.backward()
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tlabel_Loss: {:.6f}\tmmd_Loss: {:.6f}'.format(
                epoch, batch_idx * len(source_data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item(), soft_loss.item(), mmd_loss.item())) 
Example #28
Source File: common_layers.py    From fine-lm with MIT License 5 votes vote down vote up
def relu_density_logit(x, reduce_dims):
  """logit(density(x)).

  Useful for histograms.

  Args:
    x: a Tensor, typically the output of tf.relu
    reduce_dims: a list of dimensions

  Returns:
    a Tensor
  """
  frac = tf.reduce_mean(tf.to_float(x > 0.0), reduce_dims)
  scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))
  return scaled 
Example #29
Source File: common_layers.py    From fine-lm with MIT License 5 votes vote down vote up
def inverse_exp_decay(max_step, min_value=0.01):
  """Inverse-decay exponentially from 0.01 to 1.0 reached at max_step."""
  inv_base = tf.exp(tf.log(min_value) / float(max_step))
  step = tf.to_float(tf.train.get_global_step())
  return inv_base**tf.maximum(float(max_step) - step, 0.0) 
Example #30
Source File: jmp_score.py    From python-esppy with Apache License 2.0 5 votes vote down vote up
def squash(x):
    return 1.0 / (1.0 + exp(x))


# Returns true if the numbers are identical using straight comparison.
# If necessary, replace with a suitable comparison using a value of EPSILON
# appropriate for your domain.