Python math.exp() Examples

The following are code examples for showing how to use math.exp(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def parse_mtestfile(fname):
    """Parse a file with test values

    -- starts a comment
    blank lines, or lines containing only a comment, are ignored
    other lines are expected to have the form
      id fn arg -> expected [flag]*

    """
    with open(fname) as fp:
        for line in fp:
            # strip comments, and skip blank lines
            if '--' in line:
                line = line[:line.index('--')]
            if not line.strip():
                continue

            lhs, rhs = line.split('->')
            id, fn, arg = lhs.split()
            rhs_pieces = rhs.split()
            exp = rhs_pieces[0]
            flags = rhs_pieces[1:]

            yield (id, fn, float(arg), float(exp), flags) 
Example 2
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def testFrexp(self):
        self.assertRaises(TypeError, math.frexp)

        def testfrexp(name, result, expected):
            (mant, exp), (emant, eexp) = result, expected
            if abs(mant-emant) > eps or exp != eexp:
                self.fail('%s returned %r, expected %r'%\
                          (name, (mant, exp), (emant,eexp)))

        testfrexp('frexp(-1)', math.frexp(-1), (-0.5, 1))
        testfrexp('frexp(0)', math.frexp(0), (0, 0))
        testfrexp('frexp(1)', math.frexp(1), (0.5, 1))
        testfrexp('frexp(2)', math.frexp(2), (0.5, 2))

        self.assertEqual(math.frexp(INF)[0], INF)
        self.assertEqual(math.frexp(NINF)[0], NINF)
        self.assertTrue(math.isnan(math.frexp(NAN)[0])) 
Example 3
Project: pyblish-win   Author: pyblish   File: test_random.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
    # Reflection to right half of complex plane
    if z < 0.5:
        return pi / sin(pi*z) / gamma(1.0-z)
    # Lanczos approximation with g=7
    az = z + (7.0 - 0.5)
    return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
        0.9999999999995183,
        676.5203681218835 / z,
        -1259.139216722289 / (z+1.0),
        771.3234287757674 / (z+2.0),
        -176.6150291498386 / (z+3.0),
        12.50734324009056 / (z+4.0),
        -0.1385710331296526 / (z+5.0),
        0.9934937113930748e-05 / (z+6.0),
        0.1659470187408462e-06 / (z+7.0),
    ]) 
Example 4
Project: Py-Utils   Author: LonamiWebs   File: statis.py    MIT License 6 votes vote down vote up
def dpois(lmbda):
  """Poisson Distribution
     lmbda = average number of successes per unit interval

     Used to determine the probability of an amount of
     successes occuring in a fixed interval (time, area…)

     This doesn't return a value, but rather the specified Poisson function
  """
  def p(k):
    if 0 <= k:
      return (exp(-lmbda) * lmbda**k) / factorial(k)
    else:
      return 0

  # Allow accessing the used 'lmbda' value from the function
  p.__dict__['lmbda'] = lmbda
  p.__dict__['expected'] = lmbda
  p.__dict__['variance'] = lmbda
  return p 
Example 5
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: run_utils.py    Apache License 2.0 6 votes vote down vote up
def evaluate(mod, data_iter, epoch, log_interval):
    """ Run evaluation on cpu. """
    start = time.time()
    total_L = 0.0
    nbatch = 0
    density = 0
    mod.set_states(value=0)
    for batch in data_iter:
        mod.forward(batch, is_train=False)
        outputs = mod.get_outputs(merge_multi_context=False)
        states = outputs[:-1]
        total_L += outputs[-1][0]
        mod.set_states(states=states)
        nbatch += 1
        # don't include padding data in the test perplexity
        density += batch.data[1].mean()
        if (nbatch + 1) % log_interval == 0:
            logging.info("Eval batch %d loss : %.7f" % (nbatch, (total_L / density).asscalar()))
    data_iter.reset()
    loss = (total_L / density).asscalar()
    ppl = math.exp(loss) if loss < 100 else 1e37
    end = time.time()
    logging.info('Iter[%d]\t\t CE loss %.7f, ppl %.7f. Eval duration = %.2f seconds ' % \
                 (epoch, loss, ppl, end - start))
    return loss 
Example 6
Project: MFEprimer_linux   Author: nick-youngblut   File: GelMobility.py    MIT License 6 votes vote down vote up
def cal_mobility(X, gel_conc=1.0, ref_mobility=50, formula='Helling'):
    '''Cal mobility based on size'''
    import math
    gel_para_dict, a, b, k = load_gel_para_dict(gel_conc=gel_conc, formula=formula)

    X = float(X)
    gel_conc = float(gel_conc)

    # X: size (bp)
    # ref_mobility: the mobility distance of the fastest DNA segment

    if formula == 'Helling':
        Y = a - b * math.log(X + k)
    else:
        pass
        #Y = math.exp(a - b * math.log(X + k))

    # Y: the relative mobility = mobility distance / ref_mobility
    Y = Y * ref_mobility
    # Y: the mobility distance
    return round(Y, 1) 
Example 7
Project: MFEprimer_linux   Author: nick-youngblut   File: GelMobility.py    MIT License 6 votes vote down vote up
def cal_size(Y, gel_conc=1.0, ref_mobility=50, formula='Helling'):
    '''Predict size based on the relative mobility'''
    import math

    gel_para_dict, a, b, k = load_gel_para_dict(gel_conc=gel_conc, formula=formula)

    # Y: the mobility distance
    Y = Y / ref_mobility
    # ref_mobility: the mobility distance of the fastest DNA segment
    if formula == 'Helling':
        #Y = a - b * math.log(X + k)
        X = math.exp((a - Y) / b) - k
    else:
        pass

    return int(round(X, 0)) 
Example 8
Project: DOTA_models   Author: ringringyi   File: accountant.py    Apache License 2.0 6 votes vote down vote up
def _compute_delta(self, log_moments, eps):
    """Compute delta for given log_moments and eps.

    Args:
      log_moments: the log moments of privacy loss, in the form of pairs
        of (moment_order, log_moment)
      eps: the target epsilon.
    Returns:
      delta
    """
    min_delta = 1.0
    for moment_order, log_moment in log_moments:
      if math.isinf(log_moment) or math.isnan(log_moment):
        sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
        continue
      if log_moment < moment_order * eps:
        min_delta = min(min_delta,
                        math.exp(log_moment - moment_order * eps))
    return min_delta 
Example 9
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    Apache License 2.0 6 votes vote down vote up
def compute_a(sigma, q, lmbd, verbose=False):
  lmbd_int = int(math.ceil(lmbd))
  if lmbd_int == 0:
    return 1.0

  a_lambda_first_term_exact = 0
  a_lambda_second_term_exact = 0
  for i in xrange(lmbd_int + 1):
    coef_i = scipy.special.binom(lmbd_int, i) * (q ** i)
    s1, s2 = 0, 0
    for j in xrange(i + 1):
      coef_j = scipy.special.binom(i, j) * (-1) ** (i - j)
      s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2)))
      s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2)))
    a_lambda_first_term_exact += coef_i * s1
    a_lambda_second_term_exact += coef_i * s2

  a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
                    q * a_lambda_second_term_exact)
  if verbose:
    print "A: by binomial expansion    {} = {} + {}".format(
        a_lambda_exact,
        (1.0 - q) * a_lambda_first_term_exact,
        q * a_lambda_second_term_exact)
  return _to_np_float64(a_lambda_exact) 
Example 10
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    Apache License 2.0 6 votes vote down vote up
def _compute_delta(log_moments, eps):
  """Compute delta for given log_moments and eps.

  Args:
    log_moments: the log moments of privacy loss, in the form of pairs
      of (moment_order, log_moment)
    eps: the target epsilon.
  Returns:
    delta
  """
  min_delta = 1.0
  for moment_order, log_moment in log_moments:
    if moment_order == 0:
      continue
    if math.isinf(log_moment) or math.isnan(log_moment):
      sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
      continue
    if log_moment < moment_order * eps:
      min_delta = min(min_delta,
                      math.exp(log_moment - moment_order * eps))
  return min_delta 
Example 11
Project: DOTA_models   Author: ringringyi   File: analysis.py    Apache License 2.0 6 votes vote down vote up
def compute_q_noisy_max(counts, noise_eps):
  """returns ~ Pr[outcome != winner].

  Args:
    counts: a list of scores
    noise_eps: privacy parameter for noisy_max
  Returns:
    q: the probability that outcome is different from true winner.
  """
  # For noisy max, we only get an upper bound.
  # Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
  # proof at http://mathoverflow.net/questions/66763/
  # tight-bounds-on-probability-of-sum-of-laplace-random-variables

  winner = np.argmax(counts)
  counts_normalized = noise_eps * (counts - counts[winner])
  counts_rest = np.array(
      [counts_normalized[i] for i in xrange(len(counts)) if i != winner])
  q = 0.0
  for c in counts_rest:
    gap = -c
    q += (gap + 2.0) / (4.0 * math.exp(gap))
  return min(q, 1.0 - (1.0/len(counts))) 
Example 12
Project: DOTA_models   Author: ringringyi   File: analysis.py    Apache License 2.0 6 votes vote down vote up
def compute_q_noisy_max_approx(counts, noise_eps):
  """returns ~ Pr[outcome != winner].

  Args:
    counts: a list of scores
    noise_eps: privacy parameter for noisy_max
  Returns:
    q: the probability that outcome is different from true winner.
  """
  # For noisy max, we only get an upper bound.
  # Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
  # proof at http://mathoverflow.net/questions/66763/
  # tight-bounds-on-probability-of-sum-of-laplace-random-variables
  # This code uses an approximation that is faster and easier
  # to get local sensitivity bound on.

  winner = np.argmax(counts)
  counts_normalized = noise_eps * (counts - counts[winner])
  counts_rest = np.array(
      [counts_normalized[i] for i in xrange(len(counts)) if i != winner])
  gap = -max(counts_rest)
  q = (len(counts) - 1) * (gap + 2.0) / (4.0 * math.exp(gap))
  return min(q, 1.0 - (1.0/len(counts))) 
Example 13
Project: DOTA_models   Author: ringringyi   File: analysis.py    Apache License 2.0 6 votes vote down vote up
def smoothed_sens(counts, noise_eps, l, beta):
  """Compute beta-smooth sensitivity.

  Args:
    counts: array of scors
    noise_eps: noise parameter
    l: moment of interest
    beta: smoothness parameter
  Returns:
    smooth_sensitivity: a beta smooth upper bound
  """
  k = 0
  smoothed_sensitivity = sens_at_k(counts, noise_eps, l, k)
  while k < max(counts):
    k += 1
    sensitivity_at_k = sens_at_k(counts, noise_eps, l, k)
    smoothed_sensitivity = max(
        smoothed_sensitivity,
        math.exp(-beta * k) * sensitivity_at_k)
    if sensitivity_at_k == 0.0:
      break
  return smoothed_sensitivity 
Example 14
Project: Pytorch-Project-Template   Author: moemen95   File: dqn.py    MIT License 6 votes vote down vote up
def select_action(self, state):
        """
        The action selection function, it either uses the model to choose an action or samples one uniformly.
        :param state: current state of the model
        :return:
        """
        if self.cuda:
            state = state.cuda()
        sample = random.random()
        eps_threshold = self.config.eps_start + (self.config.eps_start - self.config.eps_end) * math.exp(
            -1. * self.current_iteration / self.config.eps_decay)
        self.current_iteration += 1
        if sample > eps_threshold:
            with torch.no_grad():
                return self.policy_model(state).max(1)[1].view(1, 1)
        else:
            return torch.tensor([[random.randrange(2)]], device=self.device, dtype=torch.long) 
Example 15
Project: pcfg-sampling   Author: wilkeraziz   File: slice_variable.py    Apache License 2.0 6 votes vote down vote up
def get(self, sym, start, end):
        """
        Returns a slice variable if it exists, otherwise calculates one based on the condition of the
        previous derivations or if none, on a beta distribution
        """
        # slice variables are indexed by the annotated LHS symbol as shown below
        state = (sym, start, end)
        # try to retrieve an assignment of the slice variable
        u = self.slice_variables.get(state, None)
        if u is None:  # if we have never computed such an assignment
            theta = self.conditions.get(state, None)  # first we try to retrieve a condition
            if theta is None:  # if there is none
                u = math.log(numpy.random.beta(self.a, self.b))  # the option is to sample u from a beta
            else:  # otherwise
                u = math.log(numpy.random.uniform(0, math.exp(theta)))  # we must sample u uniformly in the interval [0, theta)
            self.slice_variables[state] = u  # finally we store u for next time
        return u 
Example 16
Project: fine-lm   Author: akzaidi   File: common_layers.py    MIT License 6 votes vote down vote up
def get_timing_signal(length,
                      min_timescale=1,
                      max_timescale=1e4,
                      num_timescales=16):
  """Create Tensor of sinusoids of different frequencies.

  Args:
    length: Length of the Tensor to create, i.e. Number of steps.
    min_timescale: a float
    max_timescale: a float
    num_timescales: an int

  Returns:
    Tensor of shape (length, 2*num_timescales)
  """
  positions = tf.to_float(tf.range(length))
  log_timescale_increment = (
      math.log(max_timescale / min_timescale) / (num_timescales - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) 
Example 17
Project: pyCEST   Author: pganssle   File: cjlib.py    MIT License 6 votes vote down vote up
def t2fit(te, mm):
    """ Do a mono-exponential decay curve fit to given data """

    # Calculate some mins and max
    noise_max = 2*min(mm)
    pd_max = 2*max(mm)

    coeffs = polyfit( te, log(mm), 1 )    
    t2_guess = -1 / coeffs[0]

    # Lambda to calculate the residuals between the simulated and the measured data
    residuals = lambda x, te, mm: sum(   (  (x[0]*array([math.exp(-tt/x[1]) for tt in te])+x[2] ) - mm  )**2   )

    # Set the initial parameters: PD T2 offset
    p0 = array([mm[0], t2_guess, mm[-1]/2])

    # Call the optimization program
    plsq = fmin_l_bfgs_b(residuals, p0, args=(te, mm), bounds=[(0, pd_max), (0.1, 1200), (0, noise_max) ], approx_grad=True)
    #plsq = fmin_tnc(residuals, p0, args=(te, mm), bounds=[(0, pd_max), (t2_guess/2, t2_guess*2), (0, noise_max) ], approx_grad=True, messages=0)
    
    # Return the appropriate values
    return plsq[0] 
Example 18
Project: pyCEST   Author: pganssle   File: cjlib.py    MIT License 6 votes vote down vote up
def nnls_fit( te, y, t2 ):
    A = exp(- outer( te,  r_[ [1/t2a for t2a in t2], 0]) )

    if False:
        H = 0.0*diag(1*ones((A.shape[1],)))

        #H = diag(1*ones((A.shape[1],)))
        #H = H + diag(-1*ones((A.shape[1],)), k=1)[:-1,:-1]
        yt = zeros(( A.shape[1] ))
        Att = concatenate( (A, H), axis=0 )
        ytt = concatenate( (y, yt), axis=0 )

        x = scipy.optimize.nnls(Att, ytt)[0]
    else:
        x = scipy.optimize.nnls(A, y)[0]

    ## Compute the fitted data
    y_fit = inner(A, x)

    ## Compute the chi2
    chi2 = sqrt( sum( ( y - y_fit)**2 ) )

    return x, y_fit, chi2
#    return x, 0,0 
Example 19
Project: simulated-annealing-tsp   Author: chncyhn   File: anneal.py    MIT License 5 votes vote down vote up
def p_accept(self, candidate_fitness):
        """
        Probability of accepting if the candidate is worse than current.
        Depends on the current temperature and difference between candidate and current.
        """
        return math.exp(-abs(candidate_fitness - self.cur_fitness) / self.T) 
Example 20
Project: kicker-module   Author: EvanTheB   File: trueskill.py    GNU General Public License v3.0 5 votes vote down vote up
def match_quality(teams, BETA):
    # Set up multivariate gaussians
    u = np.matrix([p.mu for p in itertools.chain.from_iterable(teams)]).T
    summa = np.diagflat(
        [p.sigma ** 2 for p in itertools.chain.from_iterable(teams)])

    total_players = sum(len(x) for x in teams)
    done_players = 0
    A_T = []
    for i in range(len(teams) - 1):
        A_T.append(
            np.array(
                [0] * done_players +
                [1] * len(teams[i]) +
                [-1] * len(teams[i + 1]) +
                [0] * (total_players - done_players -
                       len(teams[i]) - len(teams[i + 1]))
            )
        )
        done_players += len(teams[i])
    A = np.matrix(A_T).T

    common = BETA ** 2 * A.T * A + A.T * summa * A
    exp_part = -0.5 * u.T * A * np.linalg.inv(common) * A.T * u
    sqrt_part = np.linalg.det(BETA ** 2 * A.T * A) / np.linalg.det(common)
    return math.sqrt(sqrt_part) * math.exp(exp_part) 
Example 21
Project: kicker-module   Author: EvanTheB   File: trueskill.py    GNU General Public License v3.0 5 votes vote down vote up
def gaussian_at(x, mean=0.0, standard_dev=1.0):
    """
    gaussian function at x
    """
    # // See http://mathworld.wolfram.com/NormalDistribution.html
    # // 1 -(x-mean)^2 / (2*stdDev^2)
    # // P(x) = ------------------- * e
    # // stdDev * sqrt(2*pi)
    multiplier = 1.0 / (standard_dev * math.sqrt(2 * math.pi))
    exp_part = (-1.0 * (x - mean) ** 2 / (2 * (standard_dev ** 2)))
    result = multiplier * math.exp(exp_part)
    return result 
Example 22
Project: pyblish-win   Author: pyblish   File: random.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def lognormvariate(self, mu, sigma):
        """Log normal distribution.

        If you take the natural logarithm of this distribution, you'll get a
        normal distribution with mean mu and standard deviation sigma.
        mu can have any value, and sigma must be greater than zero.

        """
        return _exp(self.normalvariate(mu, sigma))

## -------------------- exponential distribution -------------------- 
Example 23
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def testExp(self):
        self.assertRaises(TypeError, math.exp)
        self.ftest('exp(-1)', math.exp(-1), 1/math.e)
        self.ftest('exp(0)', math.exp(0), 1)
        self.ftest('exp(1)', math.exp(1), math.e)
        self.assertEqual(math.exp(INF), INF)
        self.assertEqual(math.exp(NINF), 0.)
        self.assertTrue(math.isnan(math.exp(NAN))) 
Example 24
Project: comet-commonsense   Author: atcbosselut   File: demo_bilinear.py    Apache License 2.0 5 votes vote down vote up
def sigmoid(x):
    return 1 / (1 + math.exp(-x)) 
Example 25
Project: subword-qac   Author: clovaai   File: utils.py    MIT License 5 votes vote down vote up
def print_str(self, time_avg_=False):
        loss_query, loss_token = self.average()
        time_str = f"{self.elapsed_time() * 1000. / self.cnt_add:6.2f} ms/batch" if time_avg_ else \
                   f"{self.elapsed_time():6.2f} s"
        return f"{time_str} | loss_query {loss_query:6.2f} | token_ppl {math.exp(loss_token):6.2f}" 
Example 26
Project: subword-qac   Author: clovaai   File: generate.py    MIT License 5 votes vote down vote up
def log_sum_exp(a, b):
    return max(a, b) + np.log(1 + math.exp(-abs(a - b))) 
Example 27
Project: streetview_objectmapping   Author: vlkryl   File: objectmapping.py    MIT License 5 votes vote down vote up
def MetersToLatLon( mx, my ):
    "Converts XY point from Spherical Mercator EPSG:4326 to lat/lon in WGS84 Datum"
    originShift = 2 * pi * 6378137 / 2.0
    lon = (mx / originShift) * 180.0
    lat = (my / originShift) * 180.0
    lat = 180 / pi * (2 * atan(exp(lat * pi / 180.0)) - pi / 2.0)
    return lat, lon


# haversine distance formula between two points specified by their GPS coordinates 
Example 28
Project: deep-learning-note   Author: wdxtub   File: 53_machine_translation.py    MIT License 5 votes vote down vote up
def bleu(pred_tokens, label_tokens, k):
    len_pred, len_label = len(pred_tokens), len(label_tokens)
    score = math.exp(min(0, 1 - len_label / len_pred))
    for n in range(1, k + 1):
        num_matches, label_subs = 0, collections.defaultdict(int)
        for i in range(len_label - n + 1):
            label_subs[''.join(label_tokens[i: i + n])] += 1
        for i in range(len_pred - n + 1):
            if label_subs[''.join(pred_tokens[i: i + n])] > 0:
                num_matches += 1
                label_subs[''.join(pred_tokens[i: i + n])] -= 1
        score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n))
    return score 
Example 29
Project: FastTextKorean   Author: skyer9   File: get_frequent_word.py    Apache License 2.0 5 votes vote down vote up
def check_morphs(lst, corpus_fname, output_fname, log_fname):
    mcab = mecab.MeCab()

    model_fname = 'soyword.model'
    word_extractor = WordExtractor(
        min_frequency=100,
        min_cohesion_forward=0.05,
        min_right_branching_entropy=0.0
    )
    word_extractor.load(model_fname)
    scores = word_extractor.word_scores()
    scores = {key:(scores[key].cohesion_forward * math.exp(scores[key].right_branching_entropy)) for key in scores.keys()}
    soy_tokenizer = LTokenizer(scores=scores)

    with open(corpus_fname, 'r', encoding='utf-8') as f1, \
         open(output_fname, 'w', encoding='utf-8') as f2, \
         open(log_fname, 'w', encoding='utf-8') as f3:
        sentences = f1.read()

        for item in lst:
            cnt, word = item

            if cnt < 100 or len(word) == 1:
                continue

            tokens = mcab.morphs(word)
            if len(tokens) == 1:
                continue

            (cho, jung, jong) = hgtk.letter.decompose(word[-1])
            if 'ㄱ' <= jong <= 'ㅎ':
                dic_line = "{},,,,NNP,*,{},{},*,*,*,*,*".format(word, 'T', word)
            else:
                dic_line = "{},,,,NNP,*,{},{},*,*,*,*,*".format(word, 'F', word)
            f2.writelines(dic_line + '\n')
            f3.writelines("{}\t{}\t{}".format(word, ' '.join(tokens), cnt) + '\n') 
Example 30
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: train.py    Apache License 2.0 5 votes vote down vote up
def evaluate(valid_module, data_iter, epoch, mode, bptt, batch_size):
    total_loss = 0.0
    nbatch = 0
    for batch in data_iter:
        valid_module.forward(batch, is_train=False)
        outputs = valid_module.get_loss()
        total_loss += mx.nd.sum(outputs[0]).asscalar()
        nbatch += 1
    data_iter.reset()
    loss = total_loss / bptt / batch_size / nbatch
    logging.info('Iter[%d] %s loss:\t%.7f, Perplexity: %.7f' % \
                 (epoch, mode, loss, math.exp(loss)))
    return loss 
Example 31
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: metric.py    Apache License 2.0 5 votes vote down vote up
def get(self):
        """Returns the current evaluation result.

        Returns
        -------
        Tuple of (str, float)
            Representing name of the metric and evaluation result.
        """
        return (self.name, math.exp(self.sum_metric/self.num_inst))

####################
# REGRESSION METRICS
#################### 
Example 32
Project: DensityPeakCluster   Author: lanbing510   File: cluster.py    MIT License 5 votes vote down vote up
def local_density(max_id, distances, dc, guass=True, cutoff=False):
	'''
	Compute all points' local density

	Args:
		max_id    : max continues id
		distances : distance dict
		gauss     : use guass func or not(can't use together with cutoff)
		cutoff    : use cutoff func or not(can't use together with guass)
	
	Returns:
	    local density vector that index is the point index that start from 1
	'''
	assert guass and cutoff == False and guass or cutoff == True
	logger.info("PROGRESS: compute local density")
	guass_func = lambda dij, dc : math.exp(- (dij / dc) ** 2)
	cutoff_func = lambda dij, dc: 1 if dij < dc else 0
	func = guass and guass_func or cutoff_func
	rho = [-1] + [0] * max_id
	for i in xrange(1, max_id):
		for j in xrange(i + 1, max_id + 1):
			rho[i] += func(distances[(i, j)], dc)
			rho[j] += func(distances[(i, j)], dc)
		if i % (max_id / 10) == 0:
			logger.info("PROGRESS: at index #%i" % (i))
	return np.array(rho, np.float32) 
Example 33
Project: DOTA_models   Author: ringringyi   File: accountant.py    Apache License 2.0 5 votes vote down vote up
def accumulate_privacy_spending(self, eps_delta, unused_sigma,
                                  num_examples):
    """Accumulate the privacy spending.

    Currently only support approximate privacy. Here we assume we use Gaussian
    noise on randomly sampled batch so we get better composition: 1. the per
    batch privacy is computed using privacy amplication via sampling bound;
    2. the composition is done using the composition with Gaussian noise.
    TODO(liqzhang) Add a link to a document that describes the bounds used.

    Args:
      eps_delta: EpsDelta pair which can be tensors.
      unused_sigma: the noise sigma. Unused for this accountant.
      num_examples: the number of examples involved.
    Returns:
      a TensorFlow operation for updating the privacy spending.
    """

    eps, delta = eps_delta
    with tf.control_dependencies(
        [tf.Assert(tf.greater(delta, 0),
                   ["delta needs to be greater than 0"])]):
      amortize_ratio = (tf.cast(num_examples, tf.float32) * 1.0 /
                        self._total_examples)
      # Use privacy amplification via sampling bound.
      # See Lemma 2.2 in http://arxiv.org/pdf/1405.7085v2.pdf
      # TODO(liqzhang) Add a link to a document with formal statement
      # and proof.
      amortize_eps = tf.reshape(tf.log(1.0 + amortize_ratio * (
          tf.exp(eps) - 1.0)), [1])
      amortize_delta = tf.reshape(amortize_ratio * delta, [1])
      return tf.group(*[tf.assign_add(self._eps_squared_sum,
                                      tf.square(amortize_eps)),
                        tf.assign_add(self._delta_sum, amortize_delta)]) 
Example 34
Project: DOTA_models   Author: ringringyi   File: accountant.py    Apache License 2.0 5 votes vote down vote up
def _compute_log_moment(self, sigma, q, moment_order):
    """Compute high moment of privacy loss.

    Args:
      sigma: the noise sigma, in the multiples of the sensitivity.
      q: the sampling ratio.
      moment_order: the order of moment.
    Returns:
      log E[exp(moment_order * X)]
    """
    pass 
Example 35
Project: DOTA_models   Author: ringringyi   File: accountant.py    Apache License 2.0 5 votes vote down vote up
def _differential_moments(self, sigma, s, t):
    """Compute 0 to t-th differential moments for Gaussian variable.

        E[(P(x+s)/P(x+s-1)-1)^t]
      = sum_{i=0}^t (t choose i) (-1)^{t-i} E[(P(x+s)/P(x+s-1))^i]
      = sum_{i=0}^t (t choose i) (-1)^{t-i} E[exp(-i*(2*x+2*s-1)/(2*sigma^2))]
      = sum_{i=0}^t (t choose i) (-1)^{t-i} exp(i(i+1-2*s)/(2 sigma^2))
    Args:
      sigma: the noise sigma, in the multiples of the sensitivity.
      s: the shift.
      t: 0 to t-th moment.
    Returns:
      0 to t-th moment as a tensor of shape [t+1].
    """
    assert t <= self._max_moment_order, ("The order of %d is out "
                                         "of the upper bound %d."
                                         % (t, self._max_moment_order))
    binomial = tf.slice(self._binomial_table, [0, 0],
                        [t + 1, t + 1])
    signs = numpy.zeros((t + 1, t + 1), dtype=numpy.float64)
    for i in range(t + 1):
      for j in range(t + 1):
        signs[i, j] = 1.0 - 2 * ((i - j) % 2)
    exponents = tf.constant([j * (j + 1.0 - 2.0 * s) / (2.0 * sigma * sigma)
                             for j in range(t + 1)], dtype=tf.float64)
    # x[i, j] = binomial[i, j] * signs[i, j] = (i choose j) * (-1)^{i-j}
    x = tf.multiply(binomial, signs)
    # y[i, j] = x[i, j] * exp(exponents[j])
    #         = (i choose j) * (-1)^{i-j} * exp(j(j-1)/(2 sigma^2))
    # Note: this computation is done by broadcasting pointwise multiplication
    # between [t+1, t+1] tensor and [t+1] tensor.
    y = tf.multiply(x, tf.exp(exponents))
    # z[i] = sum_j y[i, j]
    #      = sum_j (i choose j) * (-1)^{i-j} * exp(j(j-1)/(2 sigma^2))
    z = tf.reduce_sum(y, 1)
    return z 
Example 36
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    Apache License 2.0 5 votes vote down vote up
def pdf_gauss_mp(x, sigma, mean):
  return mp.mpf(1.) / mp.sqrt(mp.mpf("2.") * sigma ** 2 * mp.pi) * mp.exp(
      - (x - mean) ** 2 / (mp.mpf("2.") * sigma ** 2)) 
Example 37
Project: DOTA_models   Author: ringringyi   File: data_utils.py    Apache License 2.0 5 votes vote down vote up
def safe_exp(x):
  perp = 10000
  x = float(x)
  if x < 100: perp = math.exp(x)
  if perp > 10000: return 10000
  return perp 
Example 38
Project: DOTA_models   Author: ringringyi   File: caption_generator_test.py    Apache License 2.0 5 votes vote down vote up
def _assertExpectedCaptions(self,
                              expected_captions,
                              beam_size=3,
                              max_caption_length=20,
                              length_normalization_factor=0):
    """Tests that beam search generates the expected captions.

    Args:
      expected_captions: A sequence of pairs (sentence, probability), where
        sentence is a list of integer ids and probability is a float in [0, 1].
      beam_size: Parameter passed to beam_search().
      max_caption_length: Parameter passed to beam_search().
      length_normalization_factor: Parameter passed to beam_search().
    """
    expected_sentences = [c[0] for c in expected_captions]
    expected_probabilities = [c[1] for c in expected_captions]

    # Generate captions.
    generator = caption_generator.CaptionGenerator(
        model=FakeModel(),
        vocab=FakeVocab(),
        beam_size=beam_size,
        max_caption_length=max_caption_length,
        length_normalization_factor=length_normalization_factor)
    actual_captions = generator.beam_search(sess=None, encoded_image=None)

    actual_sentences = [c.sentence for c in actual_captions]
    actual_probabilities = [math.exp(c.logprob) for c in actual_captions]

    self.assertEqual(expected_sentences, actual_sentences)
    self.assertAllClose(expected_probabilities, actual_probabilities) 
Example 39
Project: DOTA_models   Author: ringringyi   File: run_inference.py    Apache License 2.0 5 votes vote down vote up
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)

  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)

    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      with tf.gfile.GFile(filename, "r") as f:
        image = f.read()
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob))) 
Example 40
Project: gradient-descent   Author: codebox   File: hypothesis.py    MIT License 5 votes vote down vote up
def calculate(self, values):
        exp_value = None
        try:
            exp_value = math.exp(-super(LogisticHypothesis, self).sum_of_products(values))
        except OverflowError:
            Log().warn('OverflowError for values: ' + str(values))
            return 1

        return 1 / (1 + exp_value) 
Example 41
Project: sic   Author: Yanixos   File: random.py    GNU General Public License v3.0 5 votes vote down vote up
def lognormvariate(self, mu, sigma):
        """Log normal distribution.

        If you take the natural logarithm of this distribution, you'll get a
        normal distribution with mean mu and standard deviation sigma.
        mu can have any value, and sigma must be greater than zero.

        """
        return _exp(self.normalvariate(mu, sigma))

## -------------------- exponential distribution -------------------- 
Example 42
Project: CAFA_assessment_tool   Author: ashleyzhou972   File: Stats.py    GNU General Public License v3.0 5 votes vote down vote up
def hypergeometric_probability(k, n, K, N):
    """
    Returns probability of k successes in n draws without replacement from
    a finite population of size N containing a maximum of K successes.
    """
    return exp(lncombination(K, k) + lncombination(N - K, n - k) - lncombination(N, n)) 
Example 43
Project: PersonalRecommendation   Author: ma-zhiyuan   File: check.py    Apache License 2.0 5 votes vote down vote up
def sigmoid(x):
    """
    sigmoid function
    """
    return 1/(1+math.exp(-x)) 
Example 44
Project: PersonalRecommendation   Author: ma-zhiyuan   File: check.py    Apache License 2.0 5 votes vote down vote up
def sigmoid(x):
    """
    sigmoid function
    """
    return 1/(1+math.exp(-x)) 
Example 45
Project: TradzQAI   Author: kkuette   File: utils.py    Apache License 2.0 5 votes vote down vote up
def sigmoid(x):
    try:
        exp = math.exp(-x)
    except:
        exp = float('Inf')
    return 1 / (1 + exp) 
Example 46
Project: python.math.expression.parser.pymep   Author: sbesada   File: complex.py    Apache License 2.0 5 votes vote down vote up
def __pow__(self,exp):
        a = self.__log__()
        a = self.mul(exp, a)
        return a.__exp__() 
Example 47
Project: python.math.expression.parser.pymep   Author: sbesada   File: complex.py    Apache License 2.0 5 votes vote down vote up
def __rpow__(self,exp):
        a = self.__log__()
        a = self.rmul(exp, a)
        return a.__exp__() 
Example 48
Project: python.math.expression.parser.pymep   Author: sbesada   File: complex.py    Apache License 2.0 5 votes vote down vote up
def __exp__(self):
        exp_x = math.exp(self.real)
        return Complex(exp_x * math.cos(self.imag), exp_x * math.sin(self.imag)) 
Example 49
Project: python.math.expression.parser.pymep   Author: sbesada   File: complex.py    Apache License 2.0 5 votes vote down vote up
def rpow(c, exp):
        return c.__rpow__(exp) 
Example 50
Project: python.math.expression.parser.pymep   Author: sbesada   File: complex.py    Apache License 2.0 5 votes vote down vote up
def pow(c, exp):
        return c.__pow__(exp)