Python math.exp() Examples

The following are code examples for showing how to use math.exp(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: pyblish-win   Author: pyblish   File: test_random.py    GNU Lesser General Public License v3.0 7 votes vote down vote up
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
    # Reflection to right half of complex plane
    if z < 0.5:
        return pi / sin(pi*z) / gamma(1.0-z)
    # Lanczos approximation with g=7
    az = z + (7.0 - 0.5)
    return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
        0.9999999999995183,
        676.5203681218835 / z,
        -1259.139216722289 / (z+1.0),
        771.3234287757674 / (z+2.0),
        -176.6150291498386 / (z+3.0),
        12.50734324009056 / (z+4.0),
        -0.1385710331296526 / (z+5.0),
        0.9934937113930748e-05 / (z+6.0),
        0.1659470187408462e-06 / (z+7.0),
    ]) 
Example 2
Project: DOTA_models   Author: ringringyi   File: analysis.py    Apache License 2.0 7 votes vote down vote up
def smoothed_sens(counts, noise_eps, l, beta):
  """Compute beta-smooth sensitivity.

  Args:
    counts: array of scors
    noise_eps: noise parameter
    l: moment of interest
    beta: smoothness parameter
  Returns:
    smooth_sensitivity: a beta smooth upper bound
  """
  k = 0
  smoothed_sensitivity = sens_at_k(counts, noise_eps, l, k)
  while k < max(counts):
    k += 1
    sensitivity_at_k = sens_at_k(counts, noise_eps, l, k)
    smoothed_sensitivity = max(
        smoothed_sensitivity,
        math.exp(-beta * k) * sensitivity_at_k)
    if sensitivity_at_k == 0.0:
      break
  return smoothed_sensitivity 
Example 3
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def parse_mtestfile(fname):
    """Parse a file with test values

    -- starts a comment
    blank lines, or lines containing only a comment, are ignored
    other lines are expected to have the form
      id fn arg -> expected [flag]*

    """
    with open(fname) as fp:
        for line in fp:
            # strip comments, and skip blank lines
            if '--' in line:
                line = line[:line.index('--')]
            if not line.strip():
                continue

            lhs, rhs = line.split('->')
            id, fn, arg = lhs.split()
            rhs_pieces = rhs.split()
            exp = rhs_pieces[0]
            flags = rhs_pieces[1:]

            yield (id, fn, float(arg), float(exp), flags) 
Example 4
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def testFrexp(self):
        self.assertRaises(TypeError, math.frexp)

        def testfrexp(name, result, expected):
            (mant, exp), (emant, eexp) = result, expected
            if abs(mant-emant) > eps or exp != eexp:
                self.fail('%s returned %r, expected %r'%\
                          (name, (mant, exp), (emant,eexp)))

        testfrexp('frexp(-1)', math.frexp(-1), (-0.5, 1))
        testfrexp('frexp(0)', math.frexp(0), (0, 0))
        testfrexp('frexp(1)', math.frexp(1), (0.5, 1))
        testfrexp('frexp(2)', math.frexp(2), (0.5, 2))

        self.assertEqual(math.frexp(INF)[0], INF)
        self.assertEqual(math.frexp(NINF)[0], NINF)
        self.assertTrue(math.isnan(math.frexp(NAN)[0])) 
Example 5
Project: Py-Utils   Author: LonamiWebs   File: statis.py    MIT License 6 votes vote down vote up
def dpois(lmbda):
  """Poisson Distribution
     lmbda = average number of successes per unit interval

     Used to determine the probability of an amount of
     successes occuring in a fixed interval (time, area…)

     This doesn't return a value, but rather the specified Poisson function
  """
  def p(k):
    if 0 <= k:
      return (exp(-lmbda) * lmbda**k) / factorial(k)
    else:
      return 0

  # Allow accessing the used 'lmbda' value from the function
  p.__dict__['lmbda'] = lmbda
  p.__dict__['expected'] = lmbda
  p.__dict__['variance'] = lmbda
  return p 
Example 6
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: run_utils.py    Apache License 2.0 6 votes vote down vote up
def evaluate(mod, data_iter, epoch, log_interval):
    """ Run evaluation on cpu. """
    start = time.time()
    total_L = 0.0
    nbatch = 0
    density = 0
    mod.set_states(value=0)
    for batch in data_iter:
        mod.forward(batch, is_train=False)
        outputs = mod.get_outputs(merge_multi_context=False)
        states = outputs[:-1]
        total_L += outputs[-1][0]
        mod.set_states(states=states)
        nbatch += 1
        # don't include padding data in the test perplexity
        density += batch.data[1].mean()
        if (nbatch + 1) % log_interval == 0:
            logging.info("Eval batch %d loss : %.7f" % (nbatch, (total_L / density).asscalar()))
    data_iter.reset()
    loss = (total_L / density).asscalar()
    ppl = math.exp(loss) if loss < 100 else 1e37
    end = time.time()
    logging.info('Iter[%d]\t\t CE loss %.7f, ppl %.7f. Eval duration = %.2f seconds ' % \
                 (epoch, loss, ppl, end - start))
    return loss 
Example 7
Project: MFEprimer_linux   Author: nick-youngblut   File: GelMobility.py    MIT License 6 votes vote down vote up
def cal_mobility(X, gel_conc=1.0, ref_mobility=50, formula='Helling'):
    '''Cal mobility based on size'''
    import math
    gel_para_dict, a, b, k = load_gel_para_dict(gel_conc=gel_conc, formula=formula)

    X = float(X)
    gel_conc = float(gel_conc)

    # X: size (bp)
    # ref_mobility: the mobility distance of the fastest DNA segment

    if formula == 'Helling':
        Y = a - b * math.log(X + k)
    else:
        pass
        #Y = math.exp(a - b * math.log(X + k))

    # Y: the relative mobility = mobility distance / ref_mobility
    Y = Y * ref_mobility
    # Y: the mobility distance
    return round(Y, 1) 
Example 8
Project: MFEprimer_linux   Author: nick-youngblut   File: GelMobility.py    MIT License 6 votes vote down vote up
def cal_size(Y, gel_conc=1.0, ref_mobility=50, formula='Helling'):
    '''Predict size based on the relative mobility'''
    import math

    gel_para_dict, a, b, k = load_gel_para_dict(gel_conc=gel_conc, formula=formula)

    # Y: the mobility distance
    Y = Y / ref_mobility
    # ref_mobility: the mobility distance of the fastest DNA segment
    if formula == 'Helling':
        #Y = a - b * math.log(X + k)
        X = math.exp((a - Y) / b) - k
    else:
        pass

    return int(round(X, 0)) 
Example 9
Project: DOTA_models   Author: ringringyi   File: accountant.py    Apache License 2.0 6 votes vote down vote up
def _compute_delta(self, log_moments, eps):
    """Compute delta for given log_moments and eps.

    Args:
      log_moments: the log moments of privacy loss, in the form of pairs
        of (moment_order, log_moment)
      eps: the target epsilon.
    Returns:
      delta
    """
    min_delta = 1.0
    for moment_order, log_moment in log_moments:
      if math.isinf(log_moment) or math.isnan(log_moment):
        sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
        continue
      if log_moment < moment_order * eps:
        min_delta = min(min_delta,
                        math.exp(log_moment - moment_order * eps))
    return min_delta 
Example 10
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    Apache License 2.0 6 votes vote down vote up
def compute_a(sigma, q, lmbd, verbose=False):
  lmbd_int = int(math.ceil(lmbd))
  if lmbd_int == 0:
    return 1.0

  a_lambda_first_term_exact = 0
  a_lambda_second_term_exact = 0
  for i in xrange(lmbd_int + 1):
    coef_i = scipy.special.binom(lmbd_int, i) * (q ** i)
    s1, s2 = 0, 0
    for j in xrange(i + 1):
      coef_j = scipy.special.binom(i, j) * (-1) ** (i - j)
      s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2)))
      s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2)))
    a_lambda_first_term_exact += coef_i * s1
    a_lambda_second_term_exact += coef_i * s2

  a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
                    q * a_lambda_second_term_exact)
  if verbose:
    print "A: by binomial expansion    {} = {} + {}".format(
        a_lambda_exact,
        (1.0 - q) * a_lambda_first_term_exact,
        q * a_lambda_second_term_exact)
  return _to_np_float64(a_lambda_exact) 
Example 11
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    Apache License 2.0 6 votes vote down vote up
def _compute_delta(log_moments, eps):
  """Compute delta for given log_moments and eps.

  Args:
    log_moments: the log moments of privacy loss, in the form of pairs
      of (moment_order, log_moment)
    eps: the target epsilon.
  Returns:
    delta
  """
  min_delta = 1.0
  for moment_order, log_moment in log_moments:
    if moment_order == 0:
      continue
    if math.isinf(log_moment) or math.isnan(log_moment):
      sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
      continue
    if log_moment < moment_order * eps:
      min_delta = min(min_delta,
                      math.exp(log_moment - moment_order * eps))
  return min_delta 
Example 12
Project: DOTA_models   Author: ringringyi   File: analysis.py    Apache License 2.0 6 votes vote down vote up
def compute_q_noisy_max(counts, noise_eps):
  """returns ~ Pr[outcome != winner].

  Args:
    counts: a list of scores
    noise_eps: privacy parameter for noisy_max
  Returns:
    q: the probability that outcome is different from true winner.
  """
  # For noisy max, we only get an upper bound.
  # Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
  # proof at http://mathoverflow.net/questions/66763/
  # tight-bounds-on-probability-of-sum-of-laplace-random-variables

  winner = np.argmax(counts)
  counts_normalized = noise_eps * (counts - counts[winner])
  counts_rest = np.array(
      [counts_normalized[i] for i in xrange(len(counts)) if i != winner])
  q = 0.0
  for c in counts_rest:
    gap = -c
    q += (gap + 2.0) / (4.0 * math.exp(gap))
  return min(q, 1.0 - (1.0/len(counts))) 
Example 13
Project: DOTA_models   Author: ringringyi   File: analysis.py    Apache License 2.0 6 votes vote down vote up
def compute_q_noisy_max_approx(counts, noise_eps):
  """returns ~ Pr[outcome != winner].

  Args:
    counts: a list of scores
    noise_eps: privacy parameter for noisy_max
  Returns:
    q: the probability that outcome is different from true winner.
  """
  # For noisy max, we only get an upper bound.
  # Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
  # proof at http://mathoverflow.net/questions/66763/
  # tight-bounds-on-probability-of-sum-of-laplace-random-variables
  # This code uses an approximation that is faster and easier
  # to get local sensitivity bound on.

  winner = np.argmax(counts)
  counts_normalized = noise_eps * (counts - counts[winner])
  counts_rest = np.array(
      [counts_normalized[i] for i in xrange(len(counts)) if i != winner])
  gap = -max(counts_rest)
  q = (len(counts) - 1) * (gap + 2.0) / (4.0 * math.exp(gap))
  return min(q, 1.0 - (1.0/len(counts))) 
Example 14
Project: Pytorch-Project-Template   Author: moemen95   File: dqn.py    MIT License 6 votes vote down vote up
def select_action(self, state):
        """
        The action selection function, it either uses the model to choose an action or samples one uniformly.
        :param state: current state of the model
        :return:
        """
        if self.cuda:
            state = state.cuda()
        sample = random.random()
        eps_threshold = self.config.eps_start + (self.config.eps_start - self.config.eps_end) * math.exp(
            -1. * self.current_iteration / self.config.eps_decay)
        self.current_iteration += 1
        if sample > eps_threshold:
            with torch.no_grad():
                return self.policy_model(state).max(1)[1].view(1, 1)
        else:
            return torch.tensor([[random.randrange(2)]], device=self.device, dtype=torch.long) 
Example 15
Project: pcfg-sampling   Author: wilkeraziz   File: slice_variable.py    Apache License 2.0 6 votes vote down vote up
def get(self, sym, start, end):
        """
        Returns a slice variable if it exists, otherwise calculates one based on the condition of the
        previous derivations or if none, on a beta distribution
        """
        # slice variables are indexed by the annotated LHS symbol as shown below
        state = (sym, start, end)
        # try to retrieve an assignment of the slice variable
        u = self.slice_variables.get(state, None)
        if u is None:  # if we have never computed such an assignment
            theta = self.conditions.get(state, None)  # first we try to retrieve a condition
            if theta is None:  # if there is none
                u = math.log(numpy.random.beta(self.a, self.b))  # the option is to sample u from a beta
            else:  # otherwise
                u = math.log(numpy.random.uniform(0, math.exp(theta)))  # we must sample u uniformly in the interval [0, theta)
            self.slice_variables[state] = u  # finally we store u for next time
        return u 
Example 16
Project: fine-lm   Author: akzaidi   File: common_layers.py    MIT License 6 votes vote down vote up
def get_timing_signal(length,
                      min_timescale=1,
                      max_timescale=1e4,
                      num_timescales=16):
  """Create Tensor of sinusoids of different frequencies.

  Args:
    length: Length of the Tensor to create, i.e. Number of steps.
    min_timescale: a float
    max_timescale: a float
    num_timescales: an int

  Returns:
    Tensor of shape (length, 2*num_timescales)
  """
  positions = tf.to_float(tf.range(length))
  log_timescale_increment = (
      math.log(max_timescale / min_timescale) / (num_timescales - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0)
  return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) 
Example 17
Project: pyCEST   Author: pganssle   File: cjlib.py    MIT License 6 votes vote down vote up
def t2fit(te, mm):
    """ Do a mono-exponential decay curve fit to given data """

    # Calculate some mins and max
    noise_max = 2*min(mm)
    pd_max = 2*max(mm)

    coeffs = polyfit( te, log(mm), 1 )    
    t2_guess = -1 / coeffs[0]

    # Lambda to calculate the residuals between the simulated and the measured data
    residuals = lambda x, te, mm: sum(   (  (x[0]*array([math.exp(-tt/x[1]) for tt in te])+x[2] ) - mm  )**2   )

    # Set the initial parameters: PD T2 offset
    p0 = array([mm[0], t2_guess, mm[-1]/2])

    # Call the optimization program
    plsq = fmin_l_bfgs_b(residuals, p0, args=(te, mm), bounds=[(0, pd_max), (0.1, 1200), (0, noise_max) ], approx_grad=True)
    #plsq = fmin_tnc(residuals, p0, args=(te, mm), bounds=[(0, pd_max), (t2_guess/2, t2_guess*2), (0, noise_max) ], approx_grad=True, messages=0)
    
    # Return the appropriate values
    return plsq[0] 
Example 18
Project: pyCEST   Author: pganssle   File: cjlib.py    MIT License 6 votes vote down vote up
def nnls_fit( te, y, t2 ):
    A = exp(- outer( te,  r_[ [1/t2a for t2a in t2], 0]) )

    if False:
        H = 0.0*diag(1*ones((A.shape[1],)))

        #H = diag(1*ones((A.shape[1],)))
        #H = H + diag(-1*ones((A.shape[1],)), k=1)[:-1,:-1]
        yt = zeros(( A.shape[1] ))
        Att = concatenate( (A, H), axis=0 )
        ytt = concatenate( (y, yt), axis=0 )

        x = scipy.optimize.nnls(Att, ytt)[0]
    else:
        x = scipy.optimize.nnls(A, y)[0]

    ## Compute the fitted data
    y_fit = inner(A, x)

    ## Compute the chi2
    chi2 = sqrt( sum( ( y - y_fit)**2 ) )

    return x, y_fit, chi2
#    return x, 0,0 
Example 19
Project: simulated-annealing-tsp   Author: chncyhn   File: anneal.py    MIT License 5 votes vote down vote up
def p_accept(self, candidate_fitness):
        """
        Probability of accepting if the candidate is worse than current.
        Depends on the current temperature and difference between candidate and current.
        """
        return math.exp(-abs(candidate_fitness - self.cur_fitness) / self.T) 
Example 20
Project: kicker-module   Author: EvanTheB   File: trueskill.py    GNU General Public License v3.0 5 votes vote down vote up
def match_quality(teams, BETA):
    # Set up multivariate gaussians
    u = np.matrix([p.mu for p in itertools.chain.from_iterable(teams)]).T
    summa = np.diagflat(
        [p.sigma ** 2 for p in itertools.chain.from_iterable(teams)])

    total_players = sum(len(x) for x in teams)
    done_players = 0
    A_T = []
    for i in range(len(teams) - 1):
        A_T.append(
            np.array(
                [0] * done_players +
                [1] * len(teams[i]) +
                [-1] * len(teams[i + 1]) +
                [0] * (total_players - done_players -
                       len(teams[i]) - len(teams[i + 1]))
            )
        )
        done_players += len(teams[i])
    A = np.matrix(A_T).T

    common = BETA ** 2 * A.T * A + A.T * summa * A
    exp_part = -0.5 * u.T * A * np.linalg.inv(common) * A.T * u
    sqrt_part = np.linalg.det(BETA ** 2 * A.T * A) / np.linalg.det(common)
    return math.sqrt(sqrt_part) * math.exp(exp_part) 
Example 21
Project: kicker-module   Author: EvanTheB   File: trueskill.py    GNU General Public License v3.0 5 votes vote down vote up
def gaussian_at(x, mean=0.0, standard_dev=1.0):
    """
    gaussian function at x
    """
    # // See http://mathworld.wolfram.com/NormalDistribution.html
    # // 1 -(x-mean)^2 / (2*stdDev^2)
    # // P(x) = ------------------- * e
    # // stdDev * sqrt(2*pi)
    multiplier = 1.0 / (standard_dev * math.sqrt(2 * math.pi))
    exp_part = (-1.0 * (x - mean) ** 2 / (2 * (standard_dev ** 2)))
    result = multiplier * math.exp(exp_part)
    return result 
Example 22
Project: pyblish-win   Author: pyblish   File: random.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def lognormvariate(self, mu, sigma):
        """Log normal distribution.

        If you take the natural logarithm of this distribution, you'll get a
        normal distribution with mean mu and standard deviation sigma.
        mu can have any value, and sigma must be greater than zero.

        """
        return _exp(self.normalvariate(mu, sigma))

## -------------------- exponential distribution -------------------- 
Example 23
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def testExp(self):
        self.assertRaises(TypeError, math.exp)
        self.ftest('exp(-1)', math.exp(-1), 1/math.e)
        self.ftest('exp(0)', math.exp(0), 1)
        self.ftest('exp(1)', math.exp(1), math.e)
        self.assertEqual(math.exp(INF), INF)
        self.assertEqual(math.exp(NINF), 0.)
        self.assertTrue(math.isnan(math.exp(NAN))) 
Example 24
Project: comet-commonsense   Author: atcbosselut   File: demo_bilinear.py    Apache License 2.0 5 votes vote down vote up
def sigmoid(x):
    return 1 / (1 + math.exp(-x)) 
Example 25
Project: subword-qac   Author: clovaai   File: utils.py    MIT License 5 votes vote down vote up
def print_str(self, time_avg_=False):
        loss_query, loss_token = self.average()
        time_str = f"{self.elapsed_time() * 1000. / self.cnt_add:6.2f} ms/batch" if time_avg_ else \
                   f"{self.elapsed_time():6.2f} s"
        return f"{time_str} | loss_query {loss_query:6.2f} | token_ppl {math.exp(loss_token):6.2f}" 
Example 26
Project: subword-qac   Author: clovaai   File: generate.py    MIT License 5 votes vote down vote up
def log_sum_exp(a, b):
    return max(a, b) + np.log(1 + math.exp(-abs(a - b))) 
Example 27
Project: streetview_objectmapping   Author: vlkryl   File: objectmapping.py    MIT License 5 votes vote down vote up
def MetersToLatLon( mx, my ):
    "Converts XY point from Spherical Mercator EPSG:4326 to lat/lon in WGS84 Datum"
    originShift = 2 * pi * 6378137 / 2.0
    lon = (mx / originShift) * 180.0
    lat = (my / originShift) * 180.0
    lat = 180 / pi * (2 * atan(exp(lat * pi / 180.0)) - pi / 2.0)
    return lat, lon


# haversine distance formula between two points specified by their GPS coordinates 
Example 28
Project: deep-learning-note   Author: wdxtub   File: 53_machine_translation.py    MIT License 5 votes vote down vote up
def bleu(pred_tokens, label_tokens, k):
    len_pred, len_label = len(pred_tokens), len(label_tokens)
    score = math.exp(min(0, 1 - len_label / len_pred))
    for n in range(1, k + 1):
        num_matches, label_subs = 0, collections.defaultdict(int)
        for i in range(len_label - n + 1):
            label_subs[''.join(label_tokens[i: i + n])] += 1
        for i in range(len_pred - n + 1):
            if label_subs[''.join(pred_tokens[i: i + n])] > 0:
                num_matches += 1
                label_subs[''.join(pred_tokens[i: i + n])] -= 1
        score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n))
    return score 
Example 29
Project: FastTextKorean   Author: skyer9   File: get_frequent_word.py    Apache License 2.0 5 votes vote down vote up
def check_morphs(lst, corpus_fname, output_fname, log_fname):
    mcab = mecab.MeCab()

    model_fname = 'soyword.model'
    word_extractor = WordExtractor(
        min_frequency=100,
        min_cohesion_forward=0.05,
        min_right_branching_entropy=0.0
    )
    word_extractor.load(model_fname)
    scores = word_extractor.word_scores()
    scores = {key:(scores[key].cohesion_forward * math.exp(scores[key].right_branching_entropy)) for key in scores.keys()}
    soy_tokenizer = LTokenizer(scores=scores)

    with open(corpus_fname, 'r', encoding='utf-8') as f1, \
         open(output_fname, 'w', encoding='utf-8') as f2, \
         open(log_fname, 'w', encoding='utf-8') as f3:
        sentences = f1.read()

        for item in lst:
            cnt, word = item

            if cnt < 100 or len(word) == 1:
                continue

            tokens = mcab.morphs(word)
            if len(tokens) == 1:
                continue

            (cho, jung, jong) = hgtk.letter.decompose(word[-1])
            if 'ㄱ' <= jong <= 'ㅎ':
                dic_line = "{},,,,NNP,*,{},{},*,*,*,*,*".format(word, 'T', word)
            else:
                dic_line = "{},,,,NNP,*,{},{},*,*,*,*,*".format(word, 'F', word)
            f2.writelines(dic_line + '\n')
            f3.writelines("{}\t{}\t{}".format(word, ' '.join(tokens), cnt) + '\n') 
Example 30
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: train.py    Apache License 2.0 5 votes vote down vote up
def evaluate(valid_module, data_iter, epoch, mode, bptt, batch_size):
    total_loss = 0.0
    nbatch = 0
    for batch in data_iter:
        valid_module.forward(batch, is_train=False)
        outputs = valid_module.get_loss()
        total_loss += mx.nd.sum(outputs[0]).asscalar()
        nbatch += 1
    data_iter.reset()
    loss = total_loss / bptt / batch_size / nbatch
    logging.info('Iter[%d] %s loss:\t%.7f, Perplexity: %.7f' % \
                 (epoch, mode, loss, math.exp(loss)))
    return loss 
Example 31
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: metric.py    Apache License 2.0 5 votes vote down vote up
def get(self):
        """Returns the current evaluation result.

        Returns
        -------
        Tuple of (str, float)
            Representing name of the metric and evaluation result.
        """
        return (self.name, math.exp(self.sum_metric/self.num_inst))

####################
# REGRESSION METRICS
#################### 
Example 32
Project: DensityPeakCluster   Author: lanbing510   File: cluster.py    MIT License 5 votes vote down vote up
def local_density(max_id, distances, dc, guass=True, cutoff=False):
	'''
	Compute all points' local density

	Args:
		max_id    : max continues id
		distances : distance dict
		gauss     : use guass func or not(can't use together with cutoff)
		cutoff    : use cutoff func or not(can't use together with guass)
	
	Returns:
	    local density vector that index is the point index that start from 1
	'''
	assert guass and cutoff == False and guass or cutoff == True
	logger.info("PROGRESS: compute local density")
	guass_func = lambda dij, dc : math.exp(- (dij / dc) ** 2)
	cutoff_func = lambda dij, dc: 1 if dij < dc else 0
	func = guass and guass_func or cutoff_func
	rho = [-1] + [0] * max_id
	for i in xrange(1, max_id):
		for j in xrange(i + 1, max_id + 1):
			rho[i] += func(distances[(i, j)], dc)
			rho[j] += func(distances[(i, j)], dc)
		if i % (max_id / 10) == 0:
			logger.info("PROGRESS: at index #%i" % (i))
	return np.array(rho, np.float32) 
Example 33
Project: DOTA_models   Author: ringringyi   File: accountant.py    Apache License 2.0 5 votes vote down vote up
def accumulate_privacy_spending(self, eps_delta, unused_sigma,
                                  num_examples):
    """Accumulate the privacy spending.

    Currently only support approximate privacy. Here we assume we use Gaussian
    noise on randomly sampled batch so we get better composition: 1. the per
    batch privacy is computed using privacy amplication via sampling bound;
    2. the composition is done using the composition with Gaussian noise.
    TODO(liqzhang) Add a link to a document that describes the bounds used.

    Args:
      eps_delta: EpsDelta pair which can be tensors.
      unused_sigma: the noise sigma. Unused for this accountant.
      num_examples: the number of examples involved.
    Returns:
      a TensorFlow operation for updating the privacy spending.
    """

    eps, delta = eps_delta
    with tf.control_dependencies(
        [tf.Assert(tf.greater(delta, 0),
                   ["delta needs to be greater than 0"])]):
      amortize_ratio = (tf.cast(num_examples, tf.float32) * 1.0 /
                        self._total_examples)
      # Use privacy amplification via sampling bound.
      # See Lemma 2.2 in http://arxiv.org/pdf/1405.7085v2.pdf
      # TODO(liqzhang) Add a link to a document with formal statement
      # and proof.
      amortize_eps = tf.reshape(tf.log(1.0 + amortize_ratio * (
          tf.exp(eps) - 1.0)), [1])
      amortize_delta = tf.reshape(amortize_ratio * delta, [1])
      return tf.group(*[tf.assign_add(self._eps_squared_sum,
                                      tf.square(amortize_eps)),
                        tf.assign_add(self._delta_sum, amortize_delta)]) 
Example 34
Project: DOTA_models   Author: ringringyi   File: accountant.py    Apache License 2.0 5 votes vote down vote up
def _compute_log_moment(self, sigma, q, moment_order):
    """Compute high moment of privacy loss.

    Args:
      sigma: the noise sigma, in the multiples of the sensitivity.
      q: the sampling ratio.
      moment_order: the order of moment.
    Returns:
      log E[exp(moment_order * X)]
    """
    pass 
Example 35
Project: DOTA_models   Author: ringringyi   File: accountant.py    Apache License 2.0 5 votes vote down vote up
def _differential_moments(self, sigma, s, t):
    """Compute 0 to t-th differential moments for Gaussian variable.

        E[(P(x+s)/P(x+s-1)-1)^t]
      = sum_{i=0}^t (t choose i) (-1)^{t-i} E[(P(x+s)/P(x+s-1))^i]
      = sum_{i=0}^t (t choose i) (-1)^{t-i} E[exp(-i*(2*x+2*s-1)/(2*sigma^2))]
      = sum_{i=0}^t (t choose i) (-1)^{t-i} exp(i(i+1-2*s)/(2 sigma^2))
    Args:
      sigma: the noise sigma, in the multiples of the sensitivity.
      s: the shift.
      t: 0 to t-th moment.
    Returns:
      0 to t-th moment as a tensor of shape [t+1].
    """
    assert t <= self._max_moment_order, ("The order of %d is out "
                                         "of the upper bound %d."
                                         % (t, self._max_moment_order))
    binomial = tf.slice(self._binomial_table, [0, 0],
                        [t + 1, t + 1])
    signs = numpy.zeros((t + 1, t + 1), dtype=numpy.float64)
    for i in range(t + 1):
      for j in range(t + 1):
        signs[i, j] = 1.0 - 2 * ((i - j) % 2)
    exponents = tf.constant([j * (j + 1.0 - 2.0 * s) / (2.0 * sigma * sigma)
                             for j in range(t + 1)], dtype=tf.float64)
    # x[i, j] = binomial[i, j] * signs[i, j] = (i choose j) * (-1)^{i-j}
    x = tf.multiply(binomial, signs)
    # y[i, j] = x[i, j] * exp(exponents[j])
    #         = (i choose j) * (-1)^{i-j} * exp(j(j-1)/(2 sigma^2))
    # Note: this computation is done by broadcasting pointwise multiplication
    # between [t+1, t+1] tensor and [t+1] tensor.
    y = tf.multiply(x, tf.exp(exponents))
    # z[i] = sum_j y[i, j]
    #      = sum_j (i choose j) * (-1)^{i-j} * exp(j(j-1)/(2 sigma^2))
    z = tf.reduce_sum(y, 1)
    return z 
Example 36
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    Apache License 2.0 5 votes vote down vote up
def pdf_gauss_mp(x, sigma, mean):
  return mp.mpf(1.) / mp.sqrt(mp.mpf("2.") * sigma ** 2 * mp.pi) * mp.exp(
      - (x - mean) ** 2 / (mp.mpf("2.") * sigma ** 2)) 
Example 37
Project: DOTA_models   Author: ringringyi   File: data_utils.py    Apache License 2.0 5 votes vote down vote up
def safe_exp(x):
  perp = 10000
  x = float(x)
  if x < 100: perp = math.exp(x)
  if perp > 10000: return 10000
  return perp 
Example 38
Project: DOTA_models   Author: ringringyi   File: caption_generator_test.py    Apache License 2.0 5 votes vote down vote up
def _assertExpectedCaptions(self,
                              expected_captions,
                              beam_size=3,
                              max_caption_length=20,
                              length_normalization_factor=0):
    """Tests that beam search generates the expected captions.

    Args:
      expected_captions: A sequence of pairs (sentence, probability), where
        sentence is a list of integer ids and probability is a float in [0, 1].
      beam_size: Parameter passed to beam_search().
      max_caption_length: Parameter passed to beam_search().
      length_normalization_factor: Parameter passed to beam_search().
    """
    expected_sentences = [c[0] for c in expected_captions]
    expected_probabilities = [c[1] for c in expected_captions]

    # Generate captions.
    generator = caption_generator.CaptionGenerator(
        model=FakeModel(),
        vocab=FakeVocab(),
        beam_size=beam_size,
        max_caption_length=max_caption_length,
        length_normalization_factor=length_normalization_factor)
    actual_captions = generator.beam_search(sess=None, encoded_image=None)

    actual_sentences = [c.sentence for c in actual_captions]
    actual_probabilities = [math.exp(c.logprob) for c in actual_captions]

    self.assertEqual(expected_sentences, actual_sentences)
    self.assertAllClose(expected_probabilities, actual_probabilities) 
Example 39
Project: DOTA_models   Author: ringringyi   File: run_inference.py    Apache License 2.0 5 votes vote down vote up
def main(_):
  # Build the inference graph.
  g = tf.Graph()
  with g.as_default():
    model = inference_wrapper.InferenceWrapper()
    restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
                                               FLAGS.checkpoint_path)
  g.finalize()

  # Create the vocabulary.
  vocab = vocabulary.Vocabulary(FLAGS.vocab_file)

  filenames = []
  for file_pattern in FLAGS.input_files.split(","):
    filenames.extend(tf.gfile.Glob(file_pattern))
  tf.logging.info("Running caption generation on %d files matching %s",
                  len(filenames), FLAGS.input_files)

  with tf.Session(graph=g) as sess:
    # Load the model from checkpoint.
    restore_fn(sess)

    # Prepare the caption generator. Here we are implicitly using the default
    # beam search parameters. See caption_generator.py for a description of the
    # available beam search parameters.
    generator = caption_generator.CaptionGenerator(model, vocab)

    for filename in filenames:
      with tf.gfile.GFile(filename, "r") as f:
        image = f.read()
      captions = generator.beam_search(sess, image)
      print("Captions for image %s:" % os.path.basename(filename))
      for i, caption in enumerate(captions):
        # Ignore begin and end words.
        sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
        sentence = " ".join(sentence)
        print("  %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob))) 
Example 40
Project: gradient-descent   Author: codebox   File: hypothesis.py    MIT License 5 votes vote down vote up
def calculate(self, values):
        exp_value = None
        try:
            exp_value = math.exp(-super(LogisticHypothesis, self).sum_of_products(values))
        except OverflowError:
            Log().warn('OverflowError for values: ' + str(values))
            return 1

        return 1 / (1 + exp_value) 
Example 41
Project: sic   Author: Yanixos   File: random.py    GNU General Public License v3.0 5 votes vote down vote up
def lognormvariate(self, mu, sigma):
        """Log normal distribution.

        If you take the natural logarithm of this distribution, you'll get a
        normal distribution with mean mu and standard deviation sigma.
        mu can have any value, and sigma must be greater than zero.

        """
        return _exp(self.normalvariate(mu, sigma))

## -------------------- exponential distribution -------------------- 
Example 42
Project: CAFA_assessment_tool   Author: ashleyzhou972   File: Stats.py    GNU General Public License v3.0 5 votes vote down vote up
def hypergeometric_probability(k, n, K, N):
    """
    Returns probability of k successes in n draws without replacement from
    a finite population of size N containing a maximum of K successes.
    """
    return exp(lncombination(K, k) + lncombination(N - K, n - k) - lncombination(N, n)) 
Example 43
Project: PersonalRecommendation   Author: ma-zhiyuan   File: check.py    Apache License 2.0 5 votes vote down vote up
def sigmoid(x):
    """
    sigmoid function
    """
    return 1/(1+math.exp(-x)) 
Example 44
Project: PersonalRecommendation   Author: ma-zhiyuan   File: check.py    Apache License 2.0 5 votes vote down vote up
def sigmoid(x):
    """
    sigmoid function
    """
    return 1/(1+math.exp(-x)) 
Example 45
Project: TradzQAI   Author: kkuette   File: utils.py    Apache License 2.0 5 votes vote down vote up
def sigmoid(x):
    try:
        exp = math.exp(-x)
    except:
        exp = float('Inf')
    return 1 / (1 + exp) 
Example 46
Project: python.math.expression.parser.pymep   Author: sbesada   File: complex.py    Apache License 2.0 5 votes vote down vote up
def __pow__(self,exp):
        a = self.__log__()
        a = self.mul(exp, a)
        return a.__exp__() 
Example 47
Project: python.math.expression.parser.pymep   Author: sbesada   File: complex.py    Apache License 2.0 5 votes vote down vote up
def __rpow__(self,exp):
        a = self.__log__()
        a = self.rmul(exp, a)
        return a.__exp__() 
Example 48
Project: python.math.expression.parser.pymep   Author: sbesada   File: complex.py    Apache License 2.0 5 votes vote down vote up
def __exp__(self):
        exp_x = math.exp(self.real)
        return Complex(exp_x * math.cos(self.imag), exp_x * math.sin(self.imag)) 
Example 49
Project: python.math.expression.parser.pymep   Author: sbesada   File: complex.py    Apache License 2.0 5 votes vote down vote up
def rpow(c, exp):
        return c.__rpow__(exp) 
Example 50
Project: python.math.expression.parser.pymep   Author: sbesada   File: complex.py    Apache License 2.0 5 votes vote down vote up
def pow(c, exp):
        return c.__pow__(exp) 
Example 51
Project: GCN-SeA   Author: sumanbanerjee1   File: nlp.py    Apache License 2.0 5 votes vote down vote up
def sentence_bleu_4(hyp, refs, weights=[0.25, 0.25, 0.25, 0.25]):
    # input : single sentence, multiple references
    count = [0, 0, 0, 0]
    clip_count = [0, 0, 0, 0]
    r = 0
    c = 0

    for i in range(4):
        hypcnts = Counter(ngrams(hyp, i + 1))
        cnt = sum(hypcnts.values())
        count[i] += cnt

        # compute clipped counts
        max_counts = {}
        for ref in refs:
            refcnts = Counter(ngrams(ref, i + 1))
            for ng in hypcnts:
                max_counts[ng] = max(max_counts.get(ng, 0), refcnts[ng])
        clipcnt = dict((ng, min(count, max_counts[ng])) \
                       for ng, count in hypcnts.items())
        clip_count[i] += sum(clipcnt.values())

    bestmatch = [1000, 1000]
    for ref in refs:
        if bestmatch[0] == 0:
            break
        diff = abs(len(ref) - len(hyp))
        if diff < bestmatch[0]:
            bestmatch[0] = diff
            bestmatch[1] = len(ref)
    r = bestmatch[1]
    c = len(hyp)

    p0 = 1e-7
    bp = math.exp(-abs(1.0 - float(r) / float(c + p0)))

    p_ns = [float(clip_count[i]) / float(count[i] + p0) + p0 for i in range(4)]
    s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns) if p_n)
    bleu_hyp = bp * math.exp(s)

    return bleu_hyp 
Example 52
Project: lung_nodule_classifier   Author: xairc   File: main.py    MIT License 5 votes vote down vote up
def sigmoid(x):
  return 1 / (1 + math.exp(-x)) 
Example 53
Project: pcfg-sampling   Author: wilkeraziz   File: slice_variable.py    Apache License 2.0 5 votes vote down vote up
def weight(self, sym, start, end, theta):
        state = (sym, start, end)
        try:
            u = self.slice_variables[state]
        except:
            raise ValueError('I do not expect to reweight a rule for an unseen state: %s' % str(state))

        if theta > u:
            return - beta.logpdf(math.exp(u), self.a, self.b)

        else:
            raise ValueError('I do not expect to reweight rules scoring less than the threshold') 
Example 54
Project: fine-lm   Author: akzaidi   File: common_layers.py    MIT License 5 votes vote down vote up
def inverse_exp_decay(max_step, min_value=0.01):
  """Inverse-decay exponentially from 0.01 to 1.0 reached at max_step."""
  inv_base = tf.exp(tf.log(min_value) / float(max_step))
  step = tf.to_float(tf.train.get_global_step())
  return inv_base**tf.maximum(float(max_step) - step, 0.0) 
Example 55
Project: fine-lm   Author: akzaidi   File: common_layers.py    MIT License 5 votes vote down vote up
def relu_density_logit(x, reduce_dims):
  """logit(density(x)).

  Useful for histograms.

  Args:
    x: a Tensor, typically the output of tf.relu
    reduce_dims: a list of dimensions

  Returns:
    a Tensor
  """
  frac = tf.reduce_mean(tf.to_float(x > 0.0), reduce_dims)
  scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))
  return scaled 
Example 56
Project: transferlearning   Author: jindongwang   File: MRAN.py    MIT License 5 votes vote down vote up
def train(epoch, model, source_loader, target_loader):
    #最后的全连接层学习率为前面的10倍
    LEARNING_RATE = args.lr / math.pow((1 + 10 * (epoch - 1) / args.epochs), 0.75)
    print("learning rate:", LEARNING_RATE)
    if args.diff_lr:
        optimizer = torch.optim.SGD([
        {'params': model.sharedNet.parameters()},
        {'params': model.Inception.parameters(), 'lr': LEARNING_RATE},
        ], lr=LEARNING_RATE / 10, momentum=args.momentum, weight_decay=args.l2_decay)
    else:
        optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=args.momentum,weight_decay = args.l2_decay)
    model.train()
    tgt_iter = iter(target_loader)
    for batch_idx, (source_data, source_label) in enumerate(source_loader):
        try:
            target_data, _ = tgt_iter.next()
        except Exception as err:
            tgt_iter=iter(target_loader)
            target_data, _ = tgt_iter.next()
        
        if args.cuda:
            source_data, source_label = source_data.cuda(), source_label.cuda()
            target_data = target_data.cuda()
        optimizer.zero_grad()

        s_output, mmd_loss = model(source_data, target_data, source_label)
        soft_loss = F.nll_loss(F.log_softmax(s_output, dim=1), source_label)
        # print((2 / (1 + math.exp(-10 * (epoch) / args.epochs)) - 1))
        if args.gamma == 1:
            gamma = 2 / (1 + math.exp(-10 * (epoch) / args.epochs)) - 1
        if args.gamma == 2:
            gamma = epoch /args.epochs
        loss = soft_loss + gamma * mmd_loss
        loss.backward()
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tlabel_Loss: {:.6f}\tmmd_Loss: {:.6f}'.format(
                epoch, batch_idx * len(source_data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item(), soft_loss.item(), mmd_loss.item())) 
Example 57
Project: dac   Author: KBNLresearch   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def predict(self, example):
        '''
        Classify a new example.
        '''
        dec = self.model.decision_function([example])[0]
        prob = 1 / (1 + math.exp(dec * -1))
        return prob 
Example 58
Project: rowgenerators   Author: Metatab   File: pipeline.py    MIT License 5 votes vote down vote up
def __init__(self, count=20, skip=5, est_length=10000):

        from math import log, exp
        self.skip = float(skip)
        self.skip_factor = exp(log(est_length / self.skip) / (count - 1))
        self.count = count
        self.i = 0 
Example 59
Project: pyCEST   Author: pganssle   File: cjlib.py    MIT License 5 votes vote down vote up
def t2fit_leastsq(te, mm):
    """ Do a mono-exponential decay curve fit to given data """

    # Calculate some mins and max
    noise_max = 2*min(mm)
    pd_max = 2*max(mm)

    coeffs = polyfit( te, log(mm), 1 )    
    t2_guess = -1 / coeffs[0]

    # Lambda to calculate the residuals between the simulated and the measured data

    #residuals = lambda x, te, mm: (x[0]*array([math.exp(-tt/x[1]) for tt in te])+x[2] ) - mm  
    def residuals( x, te, mm ): 

        if any(x<0):
            x[x<0] = 0.001 
        res = (x[0]*array([math.exp(-tt/x[1]) for tt in te])+x[2] ) - mm  
        return res

    p0 = array([exp(coeffs[1]), t2_guess, mm[-1]/2])

    # Call the optimization program
    x,ier = leastsq(residuals, p0, args=(te, mm))
    
    # Return the appropriate values
    return x,ier

##==============================================================================
##
##  Filtering
##
##============================================================================== 
Example 60
Project: pyCEST   Author: pganssle   File: cjlib.py    MIT License 5 votes vote down vote up
def gauss_kern(size, sizey=None):
     """ Returns a normalized 2D gauss kernel array for convolutions """
     size = int(size)
     if not sizey:
         sizey = size
     else:
         sizey = int(sizey)
     x, y = mgrid[-size:size+1, -sizey:sizey+1]
     g = exp(-(x**2/float(size)+y**2/float(sizey)))
     return g / g.sum() 
Example 61
Project: pyCEST   Author: pganssle   File: cjlib.py    MIT License 5 votes vote down vote up
def aniso(v, kappa=-1, N=1):

    if kappa == -1:
        kappa = prctile(v, 40)

        vf = v.copy()

        for ii in range(N):
                dE = -vf + roll(vf,-1,0)
                dW = vf - roll(vf,1,0)

                dN = -vf + roll(vf,-1,1)
                dS = vf - roll(vf,1,1)

        if len(v.shape) > 2:
            dU = -vf + roll(vf,-1,2)
            dD = vf - roll(vf,1,2)

            vf = (vf + 
                  3./28. * (((exp(- (abs(dE) / kappa)**2 ) * dE) - (exp(- (abs(dW) / kappa)**2 ) * dW)) +
                            ((exp(- (abs(dN) / kappa)**2 ) * dN) - (exp(- (abs(dS) / kappa)**2 ) * dS))))

        if len(v.shape) > 2:
                        vf += 1./28. * ((exp(- (abs(dU) / kappa)**2 ) * dU) - (exp(- (abs(dD) / kappa)**2 ) * dD))

        return vf


##==============================================================================
##
##  Calculating
##
##============================================================================== 
Example 62
Project: scikit-geodesic   Author: suttond   File: exp_metric.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def metric_coefficient(x):
    return exp(-np.inner(n,x)) 
Example 63
Project: scikit-geodesic   Author: suttond   File: exp_metric.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def metric_coefficient_gradient(x):
    return -n * exp(-np.inner(n,x)) 
Example 64
Project: pyblish-win   Author: pyblish   File: random.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def vonmisesvariate(self, mu, kappa):
        """Circular data distribution.

        mu is the mean angle, expressed in radians between 0 and 2*pi, and
        kappa is the concentration parameter, which must be greater than or
        equal to zero.  If kappa is equal to zero, this distribution reduces
        to a uniform random angle over the range 0 to 2*pi.

        """
        # mu:    mean angle (in radians between 0 and 2*pi)
        # kappa: concentration parameter kappa (>= 0)
        # if kappa = 0 generate uniform random angle

        # Based upon an algorithm published in: Fisher, N.I.,
        # "Statistical Analysis of Circular Data", Cambridge
        # University Press, 1993.

        # Thanks to Magnus Kessler for a correction to the
        # implementation of step 4.

        random = self.random
        if kappa <= 1e-6:
            return TWOPI * random()

        s = 0.5 / kappa
        r = s + _sqrt(1.0 + s * s)

        while 1:
            u1 = random()
            z = _cos(_pi * u1)

            d = z / (r + z)
            u2 = random()
            if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
                break

        q = 1.0 / r
        f = (q + z) / (1.0 + q * z)
        u3 = random()
        if u3 > 0.5:
            theta = (mu + _acos(f)) % TWOPI
        else:
            theta = (mu - _acos(f)) % TWOPI

        return theta

## -------------------- gamma distribution -------------------- 
Example 65
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def test_mtestfile(self):
        ALLOWED_ERROR = 20  # permitted error, in ulps
        fail_fmt = "{}:{}({!r}): expected {!r}, got {!r}"

        failures = []
        for id, fn, arg, expected, flags in parse_mtestfile(math_testcases):
            func = getattr(math, fn)

            if 'invalid' in flags or 'divide-by-zero' in flags:
                expected = 'ValueError'
            elif 'overflow' in flags:
                expected = 'OverflowError'

            try:
                got = func(arg)
            except ValueError:
                got = 'ValueError'
            except OverflowError:
                got = 'OverflowError'

            accuracy_failure = None
            if isinstance(got, float) and isinstance(expected, float):
                if math.isnan(expected) and math.isnan(got):
                    continue
                if not math.isnan(expected) and not math.isnan(got):
                    if fn == 'lgamma':
                        # we use a weaker accuracy test for lgamma;
                        # lgamma only achieves an absolute error of
                        # a few multiples of the machine accuracy, in
                        # general.
                        accuracy_failure = acc_check(expected, got,
                                                  rel_err = 5e-15,
                                                  abs_err = 5e-15)
                    elif fn == 'erfc':
                        # erfc has less-than-ideal accuracy for large
                        # arguments (x ~ 25 or so), mainly due to the
                        # error involved in computing exp(-x*x).
                        #
                        # XXX Would be better to weaken this test only
                        # for large x, instead of for all x.
                        accuracy_failure = ulps_check(expected, got, 2000)

                    else:
                        accuracy_failure = ulps_check(expected, got, 20)
                    if accuracy_failure is None:
                        continue

            if isinstance(got, str) and isinstance(expected, str):
                if got == expected:
                    continue

            fail_msg = fail_fmt.format(id, fn, arg, expected, got)
            if accuracy_failure is not None:
                fail_msg += ' ({})'.format(accuracy_failure)
            failures.append(fail_msg)

        if failures:
            self.fail('Failures in test_mtestfile:\n  ' +
                      '\n  '.join(failures)) 
Example 66
Project: deep-learning-note   Author: wdxtub   File: utils.py    MIT License 4 votes vote down vote up
def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
                          vocab_size, device, corpus_indices, idx_to_char,
                          char_to_idx, is_random_iter, num_epochs, num_steps,
                          lr, clipping_theta, batch_size, pred_period,
                          pred_len, prefixes):
    if is_random_iter:
        data_iter_fn = data_iter_random
    else:
        data_iter_fn = data_iter_consecutive
    params = get_params()
    loss = nn.CrossEntropyLoss()

    for epoch in range(num_epochs):
        if not is_random_iter:  # 如使用相邻采样,在epoch开始时初始化隐藏状态
            state = init_rnn_state(batch_size, num_hiddens, device)
        l_sum, n, start = 0.0, 0, time.time()
        data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, device)
        for X, Y in data_iter:
            if is_random_iter:  # 如使用随机采样,在每个小批量更新前初始化隐藏状态
                state = init_rnn_state(batch_size, num_hiddens, device)
            else:
                # 否则需要使用detach函数从计算图分离隐藏状态, 这是为了
                # 使模型参数的梯度计算只依赖一次迭代读取的小批量序列(防止梯度计算开销太大)
                for s in state:
                    s.detach_()

            inputs = to_onehot(X, vocab_size)
            # outputs有num_steps个形状为(batch_size, vocab_size)的矩阵
            (outputs, state) = rnn(inputs, state, params)
            # 拼接之后形状为(num_steps * batch_size, vocab_size)
            outputs = torch.cat(outputs, dim=0)
            # Y的形状是(batch_size, num_steps),转置后再变成长度为
            # batch * num_steps 的向量,这样跟输出的行一一对应
            y = torch.transpose(Y, 0, 1).contiguous().view(-1)
            # 使用交叉熵损失计算平均分类误差
            l = loss(outputs, y.long())

            # 梯度清0
            if params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()
            l.backward()
            grad_clipping(params, clipping_theta, device)  # 裁剪梯度
            sgd(params, lr, 1)  # 因为误差已经取过均值,梯度不用再做平均
            l_sum += l.item() * y.shape[0]
            n += y.shape[0]

        if (epoch + 1) % pred_period == 0:
            print('epoch %d, perplexity %f, time %.2f sec' % (
                epoch + 1, math.exp(l_sum / n), time.time() - start))
            for prefix in prefixes:
                print(' -', predict_rnn(prefix, pred_len, rnn, params, init_rnn_state,
                                        num_hiddens, vocab_size, device, idx_to_char, char_to_idx)) 
Example 67
Project: deep-learning-note   Author: wdxtub   File: utils.py    MIT License 4 votes vote down vote up
def train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device,
                                  corpus_indices, idx_to_char, char_to_idx,
                                  num_epochs, num_steps, lr, clipping_theta,
                                  batch_size, pred_period, pred_len, prefixes):
    loss = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    model.to(device)
    state = None
    for epoch in range(num_epochs):
        l_sum, n, start = 0.0, 0, time.time()
        data_iter = data_iter_consecutive(corpus_indices, batch_size, num_steps, device)  # 相邻采样
        for X, Y in data_iter:
            if state is not None:
                # 使用detach函数从计算图分离隐藏状态, 这是为了
                # 使模型参数的梯度计算只依赖一次迭代读取的小批量序列(防止梯度计算开销太大)
                if isinstance(state, tuple):  # LSTM, state:(h, c)
                    state = (state[0].detach(), state[1].detach())
                else:
                    state = state.detach()

            (output, state) = model(X, state)  # output: 形状为(num_steps * batch_size, vocab_size)

            # Y的形状是(batch_size, num_steps),转置后再变成长度为
            # batch * num_steps 的向量,这样跟输出的行一一对应
            y = torch.transpose(Y, 0, 1).contiguous().view(-1)
            l = loss(output, y.long())

            optimizer.zero_grad()
            l.backward()
            # 梯度裁剪
            grad_clipping(model.parameters(), clipping_theta, device)
            optimizer.step()
            l_sum += l.item() * y.shape[0]
            n += y.shape[0]

        try:
            perplexity = math.exp(l_sum / n)
        except OverflowError:
            perplexity = float('inf')
        if (epoch + 1) % pred_period == 0:
            print('epoch %d, perplexity %f, time %.2f sec' % (
                epoch + 1, perplexity, time.time() - start))
            for prefix in prefixes:
                print(' -', predict_rnn_pytorch(
                    prefix, pred_len, model, vocab_size, device, idx_to_char,
                    char_to_idx)) 
Example 68
Project: deep-learning-note   Author: wdxtub   File: 31_char_rnn_raw.py    MIT License 4 votes vote down vote up
def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
                          vocab_size, device, corpus_indices, idx_to_char,
                          char_to_idx, is_random_iter, num_epochs, num_steps,
                          lr, clipping_theta, batch_size, pred_period,
                          pred_len, prefixes):
    if is_random_iter:
        data_iter_fn = utils.data_iter_random
    else:
        data_iter_fn = utils.data_iter_consecutive
    params = get_params()
    loss = nn.CrossEntropyLoss()

    for epoch in range(num_epochs):
        if not is_random_iter:  # 如使用相邻采样,在epoch开始时初始化隐藏状态
            state = init_rnn_state(batch_size, num_hiddens, device)
        l_sum, n, start = 0.0, 0, time.time()
        data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, device)
        for X, Y in data_iter:
            if is_random_iter:  # 如使用随机采样,在每个小批量更新前初始化隐藏状态
                state = init_rnn_state(batch_size, num_hiddens, device)
            else:
                # 否则需要使用detach函数从计算图分离隐藏状态, 这是为了
                # 使模型参数的梯度计算只依赖一次迭代读取的小批量序列(防止梯度计算开销太大)
                for s in state:
                    s.detach_()

            inputs = to_onehot(X, vocab_size)
            # outputs有num_steps个形状为(batch_size, vocab_size)的矩阵
            (outputs, state) = rnn(inputs, state, params)
            # 拼接之后形状为(num_steps * batch_size, vocab_size)
            outputs = torch.cat(outputs, dim=0)
            # Y的形状是(batch_size, num_steps),转置后再变成长度为
            # batch * num_steps 的向量,这样跟输出的行一一对应
            y = torch.transpose(Y, 0, 1).contiguous().view(-1)
            # 使用交叉熵损失计算平均分类误差
            l = loss(outputs, y.long())

            # 梯度清0
            if params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()
            l.backward()
            grad_clipping(params, clipping_theta, device)  # 裁剪梯度
            utils.sgd(params, lr, 1)  # 因为误差已经取过均值,梯度不用再做平均
            l_sum += l.item() * y.shape[0]
            n += y.shape[0]

        if (epoch + 1) % pred_period == 0:
            print('epoch %d, perplexity %f, time %.2f sec' % (
                epoch + 1, math.exp(l_sum / n), time.time() - start))
            for prefix in prefixes:
                print(' -', predict_rnn(prefix, pred_len, rnn, params, init_rnn_state,
                                        num_hiddens, vocab_size, device, idx_to_char, char_to_idx))


# 这里用前 20000 个字符制作词典 
Example 69
Project: FastTextKorean   Author: skyer9   File: get_freq_newword.py    Apache License 2.0 4 votes vote down vote up
def check_morphs(lst, corpus_fname, output_fname, log_fname):
    mcab = mecab.MeCab()

    model_fname = 'soyword.model'
    word_extractor = WordExtractor(
        min_frequency=100,
        min_cohesion_forward=0.05,
        min_right_branching_entropy=0.0
    )
    word_extractor.load(model_fname)
    scores = word_extractor.word_scores()
    scores = {key:(scores[key].cohesion_forward * math.exp(scores[key].right_branching_entropy)) for key in scores.keys()}
    soy_tokenizer = LTokenizer(scores=scores)

    with open(corpus_fname, 'r', encoding='utf-8') as f1, \
         open(output_fname, 'w', encoding='utf-8') as f2, \
         open(log_fname, 'w', encoding='utf-8') as f3:
        sentences = f1.read()

        for item in lst:
            cnt, word = item

            if cnt < 10 or len(word) == 1:
                continue

            tokens = mcab.morphs(word)
            if len(tokens) == 1:
                continue

            soy_tokens = soy_tokenizer.tokenize(word)
            if ' '.join(tokens) == ' '.join(soy_tokens):
                continue

            if is_all_nng(mcab.pos(word)):
                #print("nouns only : {}".format(word))
                #print("{}\t{}\t{}\t{}".format(word, ' '.join(tokens), ' '.join(soy_tokens), cnt))
                continue

            if len(soy_tokens) > 1:
                continue

            #print("{}\t{}\t{}\t{}".format(word, ' '.join(tokens), ' '.join(soy_tokens), cnt))

            words = re.findall(' '.join(tokens), sentences)
            if len(words) < (cnt * 0.05):
                # 형태소 분리된 단어의 빈도수가 분리안된 단어의 빈수도의 5% 미만이면 형태소 분리오류
                (cho, jung, jong) = hgtk.letter.decompose(word[-1])
                if 'ㄱ' <= jong <= 'ㅎ':
                    dic_line = "{},,,1000,NNP,*,{},{},*,*,*,*,*".format(word, 'T', word)
                else:
                    dic_line = "{},,,1000,NNP,*,{},{},*,*,*,*,*".format(word, 'F', word)
                print("{}\t{}\t{}\t{}\t{}\t{}".format(word, ' '.join(tokens), ' '.join(soy_tokens), cnt, len(words), jong))
                f2.writelines(dic_line + '\n')
                f3.writelines("{}\t{}\t{}\t{}\t{}".format(word, ' '.join(tokens), ' '.join(soy_tokens), cnt, len(words)) + '\n') 
Example 70
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: train.py    Apache License 2.0 4 votes vote down vote up
def train():
    best_val = float("Inf")
    for epoch in range(args.epochs):
        total_L = 0.0
        start_time = time.time()
        hidden = model.begin_state(func=mx.nd.zeros, batch_size=args.batch_size, ctx=context)
        for i, (data, target) in enumerate(train_data):
            data = data.as_in_context(context).T
            target = target.as_in_context(context).T.reshape((-1, 1))
            hidden = detach(hidden)
            with autograd.record():
                output, hidden = model(data, hidden)
                # Here L is a vector of size batch_size * bptt size
                L = loss(output, target)
                L = L / (args.bptt * args.batch_size)
                L.backward()

            grads = [p.grad(context) for p in model.collect_params().values()]
            gluon.utils.clip_global_norm(grads, args.clip)

            trainer.step(1)
            total_L += mx.nd.sum(L).asscalar()

            if i % args.log_interval == 0 and i > 0:
                cur_L = total_L / args.log_interval
                print('[Epoch %d Batch %d] loss %.2f, ppl %.2f'%(
                    epoch, i, cur_L, math.exp(cur_L)))
                total_L = 0.0

        val_L = eval(val_data)

        print('[Epoch %d] time cost %.2fs, valid loss %.2f, valid ppl %.2f'%(
            epoch, time.time()-start_time, val_L, math.exp(val_L)))

        if val_L < best_val:
            best_val = val_L
            test_L = eval(test_data)
            model.save_parameters(args.save)
            print('test loss %.2f, test ppl %.2f'%(test_L, math.exp(test_L)))
        else:
            args.lr = args.lr*0.25
            trainer.set_learning_rate(args.lr) 
Example 71
Project: DOTA_models   Author: ringringyi   File: track_perplexity.py    Apache License 2.0 4 votes vote down vote up
def evaluate_model(sess, losses, weights, num_batches, global_step,
                   summary_writer, summary_op):
  """Computes perplexity-per-word over the evaluation dataset.

  Summaries and perplexity-per-word are written out to the eval directory.

  Args:
    sess: Session object.
    losses: A Tensor of any shape; the target cross entropy losses for the
      current batch.
    weights: A Tensor of weights corresponding to losses.
    num_batches: Integer; the number of evaluation batches.
    global_step: Integer; global step of the model checkpoint.
    summary_writer: Instance of SummaryWriter.
    summary_op: Op for generating model summaries.
  """
  # Log model summaries on a single batch.
  summary_str = sess.run(summary_op)
  summary_writer.add_summary(summary_str, global_step)

  start_time = time.time()
  sum_losses = 0.0
  sum_weights = 0.0
  for i in xrange(num_batches):
    batch_losses, batch_weights = sess.run([losses, weights])
    sum_losses += np.sum(batch_losses * batch_weights)
    sum_weights += np.sum(batch_weights)
    if not i % 100:
      tf.logging.info("Computed losses for %d of %d batches.", i + 1,
                      num_batches)
  eval_time = time.time() - start_time

  perplexity = math.exp(sum_losses / sum_weights)
  tf.logging.info("Perplexity = %f (%.2f sec)", perplexity, eval_time)

  # Log perplexity to the SummaryWriter.
  summary = tf.Summary()
  value = summary.value.add()
  value.simple_value = perplexity
  value.tag = "perplexity"
  summary_writer.add_summary(summary, global_step)

  # Write the Events file to the eval directory.
  summary_writer.flush()
  tf.logging.info("Finished processing evaluation at global step %d.",
                  global_step) 
Example 72
Project: DOTA_models   Author: ringringyi   File: evaluate.py    Apache License 2.0 4 votes vote down vote up
def evaluate_model(sess, model, global_step, summary_writer, summary_op):
  """Computes perplexity-per-word over the evaluation dataset.

  Summaries and perplexity-per-word are written out to the eval directory.

  Args:
    sess: Session object.
    model: Instance of ShowAndTellModel; the model to evaluate.
    global_step: Integer; global step of the model checkpoint.
    summary_writer: Instance of FileWriter.
    summary_op: Op for generating model summaries.
  """
  # Log model summaries on a single batch.
  summary_str = sess.run(summary_op)
  summary_writer.add_summary(summary_str, global_step)

  # Compute perplexity over the entire dataset.
  num_eval_batches = int(
      math.ceil(FLAGS.num_eval_examples / model.config.batch_size))

  start_time = time.time()
  sum_losses = 0.
  sum_weights = 0.
  for i in xrange(num_eval_batches):
    cross_entropy_losses, weights = sess.run([
        model.target_cross_entropy_losses,
        model.target_cross_entropy_loss_weights
    ])
    sum_losses += np.sum(cross_entropy_losses * weights)
    sum_weights += np.sum(weights)
    if not i % 100:
      tf.logging.info("Computed losses for %d of %d batches.", i + 1,
                      num_eval_batches)
  eval_time = time.time() - start_time

  perplexity = math.exp(sum_losses / sum_weights)
  tf.logging.info("Perplexity = %f (%.2g sec)", perplexity, eval_time)

  # Log perplexity to the FileWriter.
  summary = tf.Summary()
  value = summary.value.add()
  value.simple_value = perplexity
  value.tag = "Perplexity"
  summary_writer.add_summary(summary, global_step)

  # Write the Events file to the eval directory.
  summary_writer.flush()
  tf.logging.info("Finished processing evaluation at global step %d.",
                  global_step) 
Example 73
Project: sic   Author: Yanixos   File: random.py    GNU General Public License v3.0 4 votes vote down vote up
def vonmisesvariate(self, mu, kappa):
        """Circular data distribution.

        mu is the mean angle, expressed in radians between 0 and 2*pi, and
        kappa is the concentration parameter, which must be greater than or
        equal to zero.  If kappa is equal to zero, this distribution reduces
        to a uniform random angle over the range 0 to 2*pi.

        """
        # mu:    mean angle (in radians between 0 and 2*pi)
        # kappa: concentration parameter kappa (>= 0)
        # if kappa = 0 generate uniform random angle

        # Based upon an algorithm published in: Fisher, N.I.,
        # "Statistical Analysis of Circular Data", Cambridge
        # University Press, 1993.

        # Thanks to Magnus Kessler for a correction to the
        # implementation of step 4.

        random = self.random
        if kappa <= 1e-6:
            return TWOPI * random()

        s = 0.5 / kappa
        r = s + _sqrt(1.0 + s * s)

        while 1:
            u1 = random()
            z = _cos(_pi * u1)

            d = z / (r + z)
            u2 = random()
            if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
                break

        q = 1.0 / r
        f = (q + z) / (1.0 + q * z)
        u3 = random()
        if u3 > 0.5:
            theta = (mu + _acos(f)) % TWOPI
        else:
            theta = (mu - _acos(f)) % TWOPI

        return theta

## -------------------- gamma distribution -------------------- 
Example 74
Project: iceaddr   Author: sveinbjornt   File: add_placename_data.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def isnet93_to_wgs84(xx, yy):
    x = xx
    y = yy
    a = 6378137.0
    f = 1 / 298.257222101
    lat1 = 64.25
    lat2 = 65.75
    latc = 65.00
    lonc = 19.00
    eps = 0.00000000001

    def fx(p):
        return a * math.cos(p / rho) / math.sqrt(1 - math.pow(e * math.sin(p / rho), 2))

    def f1(p):
        return math.log((1 - p) / (1 + p))

    def f2(p):
        return f1(p) - e * f1(e * p)

    def f3(p):
        return pol1 * math.exp((f2(math.sin(p / rho)) - f2sin1) * sint / 2)

    rho = 45 / math.atan2(1.0, 1.0)
    e = math.sqrt(f * (2 - f))
    dum = f2(math.sin(lat1 / rho)) - f2(math.sin(lat2 / rho))
    sint = 2 * (math.log(fx(lat1)) - math.log(fx(lat2))) / dum
    f2sin1 = f2(math.sin(lat1 / rho))
    pol1 = fx(lat1) / sint
    polc = f3(latc) + 500000.0
    peq = (
        a
        * math.cos(latc / rho)
        / (sint * math.exp(sint * math.log((45 - latc / 2) / rho)))
    )
    pol = math.sqrt(math.pow(x - 500000, 2) + math.pow(polc - y, 2))
    lat = 90 - 2 * rho * math.atan(math.exp(math.log(pol / peq) / sint))
    lon = 0
    fact = rho * math.cos(lat / rho) / sint / pol
    fact = rho * math.cos(lat / rho) / sint / pol
    delta = 1.0
    while math.fabs(delta) > eps:
        delta = (f3(lat) - pol) * fact
        lat += delta
    lon = -(lonc + rho * math.atan((500000 - x) / (polc - y)) / sint)

    return {"lat": round(lat, 7), "lng": round(lon, 7)} 
Example 75
Project: GCN-SeA   Author: sumanbanerjee1   File: nlp.py    Apache License 2.0 4 votes vote down vote up
def score(self, hypothesis, corpus, n=1):
        # containers
        count = [0, 0, 0, 0]
        clip_count = [0, 0, 0, 0]
        r = 0
        c = 0
        weights = [0.25, 0.25, 0.25, 0.25]

        # accumulate ngram statistics
        for hyps, refs in zip(hypothesis, corpus):
            if type(hyps[0]) is list:
                hyps = [hyp.split() for hyp in hyps[0]]
            else:
                hyps = [hyp.split() for hyp in hyps]

            refs = [ref.split() for ref in refs]

            # Shawn's evaluation
            refs[0] = [u'GO_'] + refs[0] + [u'EOS_']
            hyps[0] = [u'GO_'] + hyps[0] + [u'EOS_']

            for idx, hyp in enumerate(hyps):
                for i in range(4):
                    # accumulate ngram counts
                    hypcnts = Counter(ngrams(hyp, i + 1))
                    cnt = sum(hypcnts.values())
                    count[i] += cnt

                    # compute clipped counts
                    max_counts = {}
                    for ref in refs:
                        refcnts = Counter(ngrams(ref, i + 1))
                        for ng in hypcnts:
                            max_counts[ng] = max(max_counts.get(ng, 0), refcnts[ng])
                    clipcnt = dict((ng, min(count, max_counts[ng])) \
                                   for ng, count in hypcnts.items())
                    clip_count[i] += sum(clipcnt.values())

                # accumulate r & c
                bestmatch = [1000, 1000]
                for ref in refs:
                    if bestmatch[0] == 0: break
                    diff = abs(len(ref) - len(hyp))
                    if diff < bestmatch[0]:
                        bestmatch[0] = diff
                        bestmatch[1] = len(ref)
                r += bestmatch[1]
                c += len(hyp)
                if n == 1:
                    break
        # computing bleu score
        p0 = 1e-7
        bp = 1 if c > r else math.exp(1 - float(r) / float(c))
        p_ns = [float(clip_count[i]) / float(count[i] + p0) + p0 \
                for i in range(4)]
        s = math.fsum(w * math.log(p_n) \
                      for w, p_n in zip(weights, p_ns) if p_n)
        bleu = bp * math.exp(s)
        return bleu 
Example 76
Project: pcfg-sampling   Author: wilkeraziz   File: parse.py    Apache License 2.0 4 votes vote down vote up
def exact_sample(wcfg, wfsa, root='[S]', goal='[GOAL]', n=1, intersection='nederhof'):
    """
    Sample a derivation given a wcfg and a wfsa, with exact sampling, a
    form of MC-sampling
    """
    samples = []

    if intersection == 'nederhof':
        parser = Nederhof(wcfg, wfsa)
        logging.info('Using Nederhof parser')
    elif intersection == 'earley':
        parser = Earley(wcfg, wfsa)
        logging.info('Using Earley parser')
    else:
        raise NotImplementedError('I do not know this algorithm: %s' % intersection)

    logging.debug('Parsing...')
    forest = parser.do(root, goal)

    if not forest:
        print 'NO PARSE FOUND'
        return False
    else:

        logging.debug('Forest: rules=%d', len(forest))

        logging.debug('Topsorting...')
        # sort the forest
        sorted_nodes = top_sort(forest)

        # calculate the inside weight of the sorted forest
        logging.debug('Inside...')
        inside_prob = inside(forest, sorted_nodes)

        gen_sampling = GeneralisedSampling(forest, inside_prob)

        logging.debug('Sampling...')
        it = 0
        while len(samples) < n:
            it += 1
            if it % 10 == 0:
                logging.info('%d/%d', it, n)

            # retrieve a random derivation, with respect to the inside weight distribution
            d = gen_sampling.sample(goal)

            samples.append(d)

        counts = Counter(tuple(d) for d in samples)
        for d, n in counts.most_common():
            score = sum(r.log_prob for r in d)
            prob = math.exp(score - inside_prob[goal])
            print '# n=%s estimate=%s prob=%s score=%s' % (n, float(n)/len(samples), prob, score)
            tree = make_nltk_tree(d)
            inline_tree = inlinetree(tree)
            print inline_tree, "\n" 
Example 77
Project: fine-lm   Author: akzaidi   File: common_layers.py    MIT License 4 votes vote down vote up
def sample_from_discretized_mix_logistic(pred, seed=None):
  """Sampling from a discretized mixture of logistics.

  Args:
    pred: A [batch, height, width, num_mixtures*10] tensor of floats
      comprising one unconstrained mixture probability, three means
      (one per channel), three standard deviations (one per channel),
      and three coefficients which linearly parameterize dependence across
      channels.
    seed: Random seed.

  Returns:
    A tensor of shape [batch, height, width, 3] with real intensities scaled
    between -1 and 1.
  """

  logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
      pred)

  # Sample mixture indicator given logits using the gumbel max trick.
  num_mixtures = shape_list(logits)[-1]
  gumbel_noise = -tf.log(-tf.log(
      tf.random_uniform(
          tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed)))
  sel = tf.one_hot(
      tf.argmax(logits + gumbel_noise, -1),
      depth=num_mixtures,
      dtype=tf.float32)

  # Select mixture component's parameters.
  sel = tf.expand_dims(sel, -1)
  locs = tf.reduce_sum(locs * sel, 3)
  log_scales = tf.reduce_sum(log_scales * sel, 3)
  coeffs = tf.reduce_sum(coeffs * sel, 3)

  # Sample from 3-D logistic & clip to interval. Note we don't round to the
  # nearest 8-bit value when sampling.
  uniform_noise = tf.random_uniform(
      tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed)
  logistic_noise = tf.log(uniform_noise) - tf.log(1. - uniform_noise)
  x = locs + tf.exp(log_scales) * logistic_noise
  x0 = x[..., 0]
  x1 = x[..., 1] + coeffs[..., 0] * x0
  x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1
  x = tf.stack([x0, x1, x2], axis=-1)
  x = tf.clip_by_value(x, -1., 1.)
  return x 
Example 78
Project: transferlearning   Author: jindongwang   File: finetune.py    MIT License 4 votes vote down vote up
def train_epoch(self, optimizer = None, epoch = 0, epoches = 0, rank_filters = False):
        LEARNING_RATE = 0.01 / math.pow((1 + 10 * (epoch - 1) / epoches), 0.75) # 10*
        optimizer = torch.optim.SGD([
            {'params': self.model.features.parameters()},
            {'params': self.model.classifier.parameters()},
            {'params': self.model.cls_fc.parameters(), 'lr': LEARNING_RATE},
            ], lr=LEARNING_RATE / 5, momentum=0.9, weight_decay=5e-4)

        iter_source = iter(self.source_loader)
        iter_target = iter(self.target_train_loader)
        self.model.train()

        for i in range(1, self.len_source_loader):
            data_source, label_source = iter_source.next()
            data_target, _ = iter_target.next()
            if len(data_target < BATCH):
                iter_target = iter(self.target_train_loader)
                data_target, _ = iter_target.next()
            data_source, label_source = data_source.cuda(), label_source.cuda()
            data_target = data_target.cuda()
            data_source, label_source = Variable(data_source), Variable(label_source)
            data_target = Variable(data_target)
            self.model.zero_grad()
            if rank_filters:    # prune
                # add cls_loss and mmd_loss
                pred, loss_mmd = self.prunner.forward(data_source, data_target)
                loss_cls = F.nll_loss(F.log_softmax(pred, dim=1), label_source)
                gamma = 2 / (1 + math.exp(-10 * (epoch) / epoches)) - 1
                loss = loss_cls + gamma * loss_mmd
                loss.backward()
                print('prune loss: {:.5f}  {:.5f}'.format(loss_cls.item(), loss_mmd.item()))
            else:
                label_source_pred, loss_mmd = self.model(data_source, data_target)
                loss_cls = F.nll_loss(F.log_softmax(label_source_pred, dim=1), label_source)
                gamma = 2 / (1 + math.exp(-10 * (epoch) / epoches)) - 1
                loss = loss_cls +  gamma * loss_mmd
                loss.backward()
                optimizer.step()
                if i % 50 == 0:
                    print('Train Epoch:{} [{}/{}({:.0f}%)]\tlr:{:.5f}\tLoss: {:.6f}\tsoft_Loss: {:.6f}\tmmd_Loss: {:.6f}'.format(
                    epoch, i * len(data_source), self.len_source_dataset,
                        100. * i / self.len_source_loader, LEARNING_RATE, loss.item(), loss_cls.item(), loss_mmd.item())) 
Example 79
Project: transferlearning   Author: jindongwang   File: DAN.py    MIT License 4 votes vote down vote up
def train(model):
    src_iter = iter(src_loader)
    tgt_iter = iter(tgt_train_loader)
    correct = 0
    for i in range(1, iteration+1):
        model.train()
        LEARNING_RATE = lr / math.pow((1 + 10 * (i - 1) / (iteration)), 0.75)
        if (i-1)%100==0:
            print('learning rate{: .4f}'.format(LEARNING_RATE) )
        optimizer = torch.optim.SGD([
        {'params': model.sharedNet.parameters()},
        {'params': model.cls_fc.parameters(), 'lr': LEARNING_RATE},
        ], lr=LEARNING_RATE / 10, momentum=momentum, weight_decay=l2_decay)
        try:
            src_data, src_label = src_iter.next()
        except Exception as err:
            src_iter=iter(src_loader)
            src_data, src_label = src_iter.next()
            
        try:
            tgt_data, _ = tgt_iter.next()
        except Exception as err:
            tgt_iter=iter(tgt_train_loader)
            tgt_data, _ = tgt_iter.next()
            
        if cuda:
            src_data, src_label = src_data.cuda(), src_label.cuda()
            tgt_data = tgt_data.cuda()

        optimizer.zero_grad()
        src_pred, mmd_loss = model(src_data, tgt_data)
        cls_loss = F.nll_loss(F.log_softmax(src_pred, dim=1), src_label)
        lambd = 2 / (1 + math.exp(-10 * (i) / iteration)) - 1
        loss = cls_loss + lambd * mmd_loss
        loss.backward()
        optimizer.step()
        if i % log_interval == 0:
            print('Train iter: {} [({:.0f}%)]\tLoss: {:.6f}\tsoft_Loss: {:.6f}\tmmd_Loss: {:.6f}'.format(
                i, 100. * i / iteration, loss.item(), cls_loss.item(), mmd_loss.item()))

        if i%(log_interval*20)==0:
            t_correct = test(model)
            if t_correct > correct:
                correct = t_correct
            print('src: {} to tgt: {} max correct: {} max accuracy{: .2f}%\n'.format(
              src_name, tgt_name, correct, 100. * correct / tgt_dataset_len )) 
Example 80
Project: Old-school-processing   Author: cianfrocco-lab   File: apTiltTransform.py    MIT License 4 votes vote down vote up
def willsq(a1, a2, \
		 theta0, gamma0=0.0, phi0=0.0, scale0=1.0, shiftx0=0.0, shifty0=0.0,\
		 xscale=numpy.ones((6), dtype=numpy.float32)):
	"""
	given two sets of particles; find the tilt, and twist of them
	"""	
	#x0 initial values
	fit = {}
	initx = numpy.array((
		theta0 * math.pi/180.0,
		gamma0 * math.pi/180.0,
		phi0   * math.pi/180.0,
		scale0,
		shiftx0,
		shifty0,
	), dtype=numpy.float32)

	#x1 delta values
	x0 = numpy.zeros(6, dtype=numpy.float32)
	#xscale scaling values
	#xscale = numpy.ones(5, dtype=numpy.float32)
	#xscale = numpy.array((1,1,1,1,1), dtype=numpy.float32)

	#print "optimizing angles and shift..."
	#print "initial rmsd:",_diffParticles(x0, initx, xscale, a1, a2)
	a1f = numpy.asarray(a1, dtype=numpy.float32)
	a2f = numpy.asarray(a2, dtype=numpy.float32)
	solved = optimize.fmin(_diffParticles, x0, args=(initx, xscale, a1f, a2f), 
		xtol=1e-4, ftol=1e-4, maxiter=500, maxfun=500, disp=0, full_output=1)
	x1 = solved[0]
	fit['rmsd'] = float(solved[1]) #_diffParticles(x1, initx, xscale, a1, a2)
	fit['iter'] = int(solved[3])
	#print "final rmsd: "+str(fit['rmsd'])+" in "+str(fit['iter'])+" iterations"

	#x3 final values
	x3 = x1 * xscale + initx
	fit['theta']  = x3[0]*180.0/math.pi
	fit['gamma']  = x3[1]*180.0/math.pi % 180.0
	fit['phi']    = x3[2]*180.0/math.pi % 180.0
	if fit['gamma'] > 90:
		fit['gamma'] -= 180.0
	if fit['phi'] > 90:
		fit['phi'] -= 180.0
	fit['scale']  = x3[3]
	fit['shiftx'] = x3[4]
	fit['shifty'] = x3[5]
	fit['point1'], fit['point2'] = getPointsFromArrays(a1, a2, fit['shiftx'], fit['shifty'])
	#print "Final=",fit['point1'],"\t", fit['point2']
	fit['prob'] = math.exp(-1.0*math.sqrt(abs(fit['rmsd'])))**2
	return fit

#================================
#================================