Python math.log() Examples

The following are code examples for showing how to use math.log(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: pyblish-win   Author: pyblish   File: random.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def normalvariate(self, mu, sigma):
        """Normal distribution.

        mu is the mean, and sigma is the standard deviation.

        """
        # mu = mean, sigma = standard deviation

        # Uses Kinderman and Monahan method. Reference: Kinderman,
        # A.J. and Monahan, J.F., "Computer generation of random
        # variables using the ratio of uniform deviates", ACM Trans
        # Math Software, 3, (1977), pp257-260.

        random = self.random
        while 1:
            u1 = random()
            u2 = 1.0 - random()
            z = NV_MAGICCONST*(u1-0.5)/u2
            zz = z*z/4.0
            if zz <= -_log(u2):
                break
        return mu + z*sigma

## -------------------- lognormal distribution -------------------- 
Example 2
Project: pyblish-win   Author: pyblish   File: random.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def expovariate(self, lambd):
        """Exponential distribution.

        lambd is 1.0 divided by the desired mean.  It should be
        nonzero.  (The parameter would be called "lambda", but that is
        a reserved word in Python.)  Returned values range from 0 to
        positive infinity if lambd is positive, and from negative
        infinity to 0 if lambd is negative.

        """
        # lambd: rate lambd = 1/mean
        # ('lambda' is a Python reserved word)

        # we use 1-random() instead of random() to preclude the
        # possibility of taking the log of zero.
        return -_log(1.0 - self.random())/lambd

## -------------------- von Mises distribution -------------------- 
Example 3
Project: pyblish-win   Author: pyblish   File: test_long.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_logs(self):
        LOG10E = math.log10(math.e)

        for exp in range(10) + [100, 1000, 10000]:
            value = 10 ** exp
            log10 = math.log10(value)
            self.assertAlmostEqual(log10, exp)

            # log10(value) == exp, so log(value) == log10(value)/log10(e) ==
            # exp/LOG10E
            expected = exp / LOG10E
            log = math.log(value)
            self.assertAlmostEqual(log, expected)

        for bad in -(1L << 10000), -2L, 0L:
            self.assertRaises(ValueError, math.log, bad)
            self.assertRaises(ValueError, math.log10, bad) 
Example 4
Project: pyblish-win   Author: pyblish   File: test_random.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_randbelow_logic(self, _log=log, int=int):
        # check bitcount transition points:  2**i and 2**(i+1)-1
        # show that: k = int(1.001 + _log(n, 2))
        # is equal to or one greater than the number of bits in n
        for i in xrange(1, 1000):
            n = 1L << i # check an exact power of two
            numbits = i+1
            k = int(1.00001 + _log(n, 2))
            self.assertEqual(k, numbits)
            self.assertTrue(n == 2**(k-1))

            n += n - 1      # check 1 below the next power of two
            k = int(1.00001 + _log(n, 2))
            self.assertIn(k, [numbits, numbits+1])
            self.assertTrue(2**k > n > 2**(k-2))

            n -= n >> 15     # check a little farther below the next power of two
            k = int(1.00001 + _log(n, 2))
            self.assertEqual(k, numbits)        # note the stronger assertion
            self.assertTrue(2**k > n > 2**(k-1))   # note the stronger assertion 
Example 5
Project: nizza   Author: fstahlberg   File: model2.py    Apache License 2.0 6 votes vote down vote up
def compute_positional_embeddings(
        self, max_pos, params, n_channels, max_timescale=1.0e4):
    """Compute the positional embeddings which serve as input to DistNet.

    Args:
      max_pos: A scalar with the maximal position
      params (HParams): hyper-parameters for that model
      n_channels (int): A Python int with the required embedding dimensionality
      max_timescale: a Python float with the maximum period

    Returns:
      A [max_pos+1, embed_size] float32 tensor with positional embeddings.
    """
    position = tf.to_float(tf.range(max_pos+1))
    num_timescales = n_channels // 2
    log_timescale_increment = (
        math.log(float(max_timescale)) /
        (tf.to_float(num_timescales) - 1))
    inv_timescales = tf.exp(
        tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
    scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
    signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
    signal = tf.pad(signal, [[0, 0], [0, tf.mod(n_channels, 2)]])
    signal = tf.reshape(signal, [max_pos+1, n_channels])
    return signal 
Example 6
Project: text-rank   Author: ouprince   File: util.py    MIT License 6 votes vote down vote up
def get_similarity(word_list1, word_list2):
    """默认的用于计算两个句子相似度的函数。

    Keyword arguments:
    word_list1, word_list2  --  分别代表两个句子,都是由单词组成的列表
    """
    words   = list(set(word_list1 + word_list2))        
    vector1 = [float(word_list1.count(word)) for word in words]
    vector2 = [float(word_list2.count(word)) for word in words]
    
    vector3 = [vector1[x]*vector2[x]  for x in xrange(len(vector1))]
    vector4 = [1 for num in vector3 if num > 0.]
    co_occur_num = sum(vector4)

    if abs(co_occur_num) <= 1e-12:
        return 0.
    
    denominator = math.log(float(len(word_list1))) + math.log(float(len(word_list2))) # 分母
    
    if abs(denominator) < 1e-12:
        return 0.
    
    return co_occur_num / denominator 
Example 7
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: keys.py    MIT License 6 votes vote down vote up
def from_coords(cls, x, y):
        """
        Creates an ECPoint object from the X and Y integer coordinates of the
        point

        :param x:
            The X coordinate, as an integer

        :param y:
            The Y coordinate, as an integer

        :return:
            An ECPoint object
        """

        x_bytes = int(math.ceil(math.log(x, 2) / 8.0))
        y_bytes = int(math.ceil(math.log(y, 2) / 8.0))

        num_bytes = max(x_bytes, y_bytes)

        byte_string = b'\x04'
        byte_string += int_to_bytes(x, width=num_bytes)
        byte_string += int_to_bytes(y, width=num_bytes)

        return cls(byte_string) 
Example 8
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: keys.py    MIT License 6 votes vote down vote up
def hash_algo(self):
        """
        Returns the name of the family of hash algorithms used to generate a
        DSA key

        :raises:
            ValueError - when the key is not a DSA key

        :return:
            A unicode string of "sha1" or "sha2"
        """

        if self.algorithm != 'dsa':
            raise ValueError(unwrap(
                '''
                Only DSA keys are generated using a hash algorithm, this key is
                %s
                ''',
                self.algorithm.upper()
            ))

        byte_len = math.log(self['private_key_algorithm']['parameters']['q'].native, 2) / 8

        return 'sha1' if byte_len <= 20 else 'sha2' 
Example 9
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: keys.py    MIT License 6 votes vote down vote up
def bit_size(self):
        """
        :return:
            The bit size of the private key, as an integer
        """

        if self._bit_size is None:
            if self.algorithm == 'rsa':
                prime = self['private_key'].parsed['modulus'].native
            elif self.algorithm == 'dsa':
                prime = self['private_key_algorithm']['parameters']['p'].native
            elif self.algorithm == 'ec':
                prime = self['private_key'].parsed['private_key'].native
            self._bit_size = int(math.ceil(math.log(prime, 2)))
            modulus = self._bit_size % 8
            if modulus != 0:
                self._bit_size += 8 - modulus
        return self._bit_size 
Example 10
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: keys.py    MIT License 6 votes vote down vote up
def bit_size(self):
        """
        :return:
            The bit size of the public key, as an integer
        """

        if self._bit_size is None:
            if self.algorithm == 'ec':
                self._bit_size = ((len(self['public_key'].native) - 1) / 2) * 8
            else:
                if self.algorithm == 'rsa':
                    prime = self['public_key'].parsed['modulus'].native
                elif self.algorithm == 'dsa':
                    prime = self['algorithm']['parameters']['p'].native
                self._bit_size = int(math.ceil(math.log(prime, 2)))
                modulus = self._bit_size % 8
                if modulus != 0:
                    self._bit_size += 8 - modulus

        return self._bit_size 
Example 11
Project: Perspective   Author: TypesettingTools   File: perspective.py    MIT License 6 votes vote down vote up
def find_ex(f):
    w_center = [0, 0]
    w_size = 100000.0
    iterations = int(math.log(w_size*100, 4))
    s = 4
    for k in range(iterations):
        res = []
        for i in range(-s, s):
            x = w_center[0] + w_size*i/10
            for j in range(-s, s):
                y = w_center[1] + w_size*j/10
                res.append((unrot(coord, Point(x, y)), x, y))
        ex = f(res)
        w_center = [ex[1], ex[2]]
        w_size/=3
    return Point(ex[1], ex[2]) 
Example 12
Project: synthetic-data-tutorial   Author: theodi   File: PrivBayes.py    MIT License 6 votes vote down vote up
def sensitivity(num_tuples):
    """Sensitivity function for Bayesian network construction. PrivBayes Lemma 1.

    Parameters
    ----------
    num_tuples : int
        Number of tuples in sensitive dataset.

    Return
    --------
    int
        Sensitivity value.
    """
    a = (2 / num_tuples) * log((num_tuples + 1) / 2)
    b = (1 - 1 / num_tuples) * log(1 + 2 / (num_tuples - 1))
    return a + b 
Example 13
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: sampler.py    Apache License 2.0 6 votes vote down vote up
def draw(self, true_classes):
        """Draw samples from log uniform distribution and returns sampled candidates,
        expected count for true classes and sampled classes."""
        range_max = self.range_max
        num_sampled = self.num_sampled
        ctx = true_classes.context
        log_range = math.log(range_max + 1)
        num_tries = 0
        true_classes = true_classes.reshape((-1,))
        sampled_classes, num_tries = self.sampler.sample_unique(num_sampled)

        true_cls = true_classes.as_in_context(ctx).astype('float64')
        prob_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range
        count_true = self._prob_helper(num_tries, num_sampled, prob_true)

        sampled_classes = ndarray.array(sampled_classes, ctx=ctx, dtype='int64')
        sampled_cls_fp64 = sampled_classes.astype('float64')
        prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range
        count_sampled = self._prob_helper(num_tries, num_sampled, prob_sampled)
        return [sampled_classes, count_true, count_sampled] 
Example 14
Project: MFEprimer_linux   Author: nick-youngblut   File: GelMobility.py    MIT License 6 votes vote down vote up
def cal_mobility(X, gel_conc=1.0, ref_mobility=50, formula='Helling'):
    '''Cal mobility based on size'''
    import math
    gel_para_dict, a, b, k = load_gel_para_dict(gel_conc=gel_conc, formula=formula)

    X = float(X)
    gel_conc = float(gel_conc)

    # X: size (bp)
    # ref_mobility: the mobility distance of the fastest DNA segment

    if formula == 'Helling':
        Y = a - b * math.log(X + k)
    else:
        pass
        #Y = math.exp(a - b * math.log(X + k))

    # Y: the relative mobility = mobility distance / ref_mobility
    Y = Y * ref_mobility
    # Y: the mobility distance
    return round(Y, 1) 
Example 15
Project: MFEprimer_linux   Author: nick-youngblut   File: GelMobility.py    MIT License 6 votes vote down vote up
def cal_size(Y, gel_conc=1.0, ref_mobility=50, formula='Helling'):
    '''Predict size based on the relative mobility'''
    import math

    gel_para_dict, a, b, k = load_gel_para_dict(gel_conc=gel_conc, formula=formula)

    # Y: the mobility distance
    Y = Y / ref_mobility
    # ref_mobility: the mobility distance of the fastest DNA segment
    if formula == 'Helling':
        #Y = a - b * math.log(X + k)
        X = math.exp((a - Y) / b) - k
    else:
        pass

    return int(round(X, 0)) 
Example 16
Project: MFEprimer_linux   Author: nick-youngblut   File: TmDeltaG.py    MIT License 6 votes vote down vote up
def calDeltaG(qseq, sseq, mono_conc=50, diva_conc=1.5, dntp_conc=0.25, deltaH=None, deltaS=None): 
    """ Calculate the free Gibbs energy """

    mono_conc = float(mono_conc)
    diva_conc = float(diva_conc)
    dntp_conc = float(dntp_conc)

    if not (deltaH and deltaS):
	deltaH, deltaS = calDeltaHS(qseq, sseq)

    # Calculate the free Gibbs energy
    tao = 273.15 + 37 # Constant temperature tao in Kelvin

    # Many thanks for the anonymous referee who help me fix the bug in last version.
    mono_conc = mono_conc + divalent2monovalent(diva_conc, dntp_conc)
    mono_conc = mono_conc / 1000

    deltaS_adjust = deltaS + 0.368 * (len(sseq) - 1) * math.log(mono_conc, math.e)

    deltaG = (deltaH * 1000 - tao * deltaS_adjust) / 1000
    return deltaG 
Example 17
Project: MFEprimer_linux   Author: nick-youngblut   File: TmDeltaG.py    MIT License 6 votes vote down vote up
def calTm(qseq, sseq, mono_conc=50, diva_conc=1.5, oligo_conc=50, dntp_conc=0.25, deltaH=None, deltaS=None):
    """ Calculate Tm value of amplicon"""

    mono_conc = float(mono_conc)
    diva_conc = float(diva_conc)
    oligo_conc = float(oligo_conc)
    dntp_conc = float(dntp_conc)

    if not (deltaH and deltaS):
	deltaH, deltaS = calDeltaHS(qseq, sseq)

    deltaH = deltaH * 1000

    oligo_conc = oligo_conc / 1000000000

    # Many thanks for the anonymous referee who help me fix the bug in last version.
    mono_conc = mono_conc + divalent2monovalent(diva_conc, dntp_conc)
    mono_conc = mono_conc / 1000

    deltaS = deltaS + 0.368 * (len(qseq) - 1) * math.log(mono_conc, math.e)

    Tm = deltaH / (deltaS + 1.987 * math.log(oligo_conc / 4, math.e)) - 273.15

    return Tm 
Example 18
Project: DOTA_models   Author: ringringyi   File: accountant.py    Apache License 2.0 6 votes vote down vote up
def _compute_delta(self, log_moments, eps):
    """Compute delta for given log_moments and eps.

    Args:
      log_moments: the log moments of privacy loss, in the form of pairs
        of (moment_order, log_moment)
      eps: the target epsilon.
    Returns:
      delta
    """
    min_delta = 1.0
    for moment_order, log_moment in log_moments:
      if math.isinf(log_moment) or math.isnan(log_moment):
        sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
        continue
      if log_moment < moment_order * eps:
        min_delta = min(min_delta,
                        math.exp(log_moment - moment_order * eps))
    return min_delta 
Example 19
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    Apache License 2.0 6 votes vote down vote up
def _compute_delta(log_moments, eps):
  """Compute delta for given log_moments and eps.

  Args:
    log_moments: the log moments of privacy loss, in the form of pairs
      of (moment_order, log_moment)
    eps: the target epsilon.
  Returns:
    delta
  """
  min_delta = 1.0
  for moment_order, log_moment in log_moments:
    if moment_order == 0:
      continue
    if math.isinf(log_moment) or math.isnan(log_moment):
      sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
      continue
    if log_moment < moment_order * eps:
      min_delta = min(min_delta,
                      math.exp(log_moment - moment_order * eps))
  return min_delta 
Example 20
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    Apache License 2.0 6 votes vote down vote up
def _compute_eps(log_moments, delta):
  """Compute epsilon for given log_moments and delta.

  Args:
    log_moments: the log moments of privacy loss, in the form of pairs
      of (moment_order, log_moment)
    delta: the target delta.
  Returns:
    epsilon
  """
  min_eps = float("inf")
  for moment_order, log_moment in log_moments:
    if moment_order == 0:
      continue
    if math.isinf(log_moment) or math.isnan(log_moment):
      sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
      continue
    min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
  return min_eps 
Example 21
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    Apache License 2.0 6 votes vote down vote up
def get_privacy_spent(log_moments, target_eps=None, target_delta=None):
  """Compute delta (or eps) for given eps (or delta) from log moments.

  Args:
    log_moments: array of (moment_order, log_moment) pairs.
    target_eps: if not None, the epsilon for which we would like to compute
      corresponding delta value.
    target_delta: if not None, the delta for which we would like to compute
      corresponding epsilon value. Exactly one of target_eps and target_delta
      is None.
  Returns:
    eps, delta pair
  """
  assert (target_eps is None) ^ (target_delta is None)
  assert not ((target_eps is None) and (target_delta is None))
  if target_eps is not None:
    return (target_eps, _compute_delta(log_moments, target_eps))
  else:
    return (_compute_eps(log_moments, target_delta), target_delta) 
Example 22
Project: DOTA_models   Author: ringringyi   File: blocks_entropy_coding_test.py    Apache License 2.0 6 votes vote down vote up
def testCodeLength(self):
    shape = [2, 4]
    proba_feed = [[0.65, 0.25, 0.70, 0.10],
                  [0.28, 0.20, 0.44, 0.54]]
    symbol_feed = [[1.0, 0.0, 1.0, 0.0],
                   [0.0, 0.0, 0.0, 1.0]]
    mean_code_length = - (
        (math.log(0.65) + math.log(0.75) + math.log(0.70) + math.log(0.90) +
         math.log(0.72) + math.log(0.80) + math.log(0.56) + math.log(0.54)) /
        math.log(2.0)) / (shape[0] * shape[1])

    symbol = tf.placeholder(dtype=tf.float32, shape=shape)
    proba = tf.placeholder(dtype=tf.float32, shape=shape)
    code_length_calculator = blocks_entropy_coding.CodeLength()
    code_length = code_length_calculator(symbol, proba)

    with self.test_session():
      tf.global_variables_initializer().run()
      code_length_eval = code_length.eval(
          feed_dict={symbol: symbol_feed, proba: proba_feed})

    self.assertAllClose(mean_code_length, code_length_eval) 
Example 23
Project: cvpr2018-hnd   Author: kibok90   File: models.py    MIT License 6 votes vote down vote up
def __init__(self, T, opts):
        super(LOOLoss, self).__init__()
        
        self.gpu = opts.gpu
        self.loo = opts.loo if 'LOO' in opts.method else 0.
        self.label_smooth = opts.label_smooth
        self.kld_u_const = math.log(len(T['wnids']))
        self.relevant = [torch.from_numpy(rel) for rel in T['relevant']]
        self.labels_relevant = torch.from_numpy(T['labels_relevant'].astype(np.uint8))
        ch_slice = T['ch_slice']
        if opts.class_wise:
            num_children = T['num_children']
            num_supers = len(num_children)
            self.class_weight = torch.zeros(ch_slice[-1])
            for m, num_ch in enumerate(num_children):
                self.class_weight[ch_slice[m]:ch_slice[m+1]] = 1. / (num_ch * num_supers)
        else:
            self.class_weight = torch.ones(ch_slice[-1]) / ch_slice[-1] 
Example 24
Project: aurora   Author: carnby   File: tasks.py    MIT License 6 votes vote down vote up
def select_tweets(timeline, allow_rts=True, allow_replies=False, popular_only=True):
    texts = []

    for t in timeline:
        if not 'retweeted_status' in t:
            if not allow_replies and t['in_reply_to_status_id_str']:
                continue
            t['tweet_score'] = log(t['retweet_count'] + 1.0) + log(t['favorite_count'] + 1.0)
            t['__is_rt__'] = False
            texts.append(t)
        else:
            if allow_rts:
                t['retweeted_status']['tweet_score'] = log10(t['retweet_count'] + 1.0) + log10(t['favorite_count'] + 1.0)
                t['retweeted_status']['source_created_at'] = t['retweeted_status']['created_at']
                t['retweeted_status']['created_at'] = t['created_at']
                t['retweeted_status']['text'] = t['retweeted_status']['text']
                t['retweeted_status']['__is_rt__'] = True
                texts.append(t['retweeted_status'])

    #texts = sorted(texts, key=lambda x: x['tweet_score'], reverse=True)[0:100]
    if popular_only:
        texts = list(filter(lambda x: x['tweet_score'] > 0, texts))

    return texts 
Example 25
Project: aurora   Author: carnby   File: filtering.py    MIT License 6 votes vote down vote up
def __estimate_entropy__(self):
        counts = self.feature_vector_counts #Counter(self.timeline_feature_vectors)
        #print counts
        #N = float(sum(counts.values()))
        N = float(len(self.timeline) + 1)
        max_H = np.log(float(len(list(filter(lambda x: x, counts)))))

        if np.equal(max_H, 0.0):
            return 0.0

        entropy = 0.0

        for key in counts.keys():
            if counts[key] > 0:
                key_probability = counts[key] / N
                entropy += -(key_probability * np.log(key_probability))

        entropy /= max_H

        #print u'N={0}, |counts|={3}, max_H={1}, entropy={2}, counter={4}'.format(N, max_H, entropy, len(counts), counts)
        return entropy 
Example 26
Project: sic   Author: Yanixos   File: random.py    GNU General Public License v3.0 6 votes vote down vote up
def normalvariate(self, mu, sigma):
        """Normal distribution.

        mu is the mean, and sigma is the standard deviation.

        """
        # mu = mean, sigma = standard deviation

        # Uses Kinderman and Monahan method. Reference: Kinderman,
        # A.J. and Monahan, J.F., "Computer generation of random
        # variables using the ratio of uniform deviates", ACM Trans
        # Math Software, 3, (1977), pp257-260.

        random = self.random
        while 1:
            u1 = random()
            u2 = 1.0 - random()
            z = NV_MAGICCONST*(u1-0.5)/u2
            zz = z*z/4.0
            if zz <= -_log(u2):
                break
        return mu + z*sigma

## -------------------- lognormal distribution -------------------- 
Example 27
Project: sic   Author: Yanixos   File: random.py    GNU General Public License v3.0 6 votes vote down vote up
def expovariate(self, lambd):
        """Exponential distribution.

        lambd is 1.0 divided by the desired mean.  It should be
        nonzero.  (The parameter would be called "lambda", but that is
        a reserved word in Python.)  Returned values range from 0 to
        positive infinity if lambd is positive, and from negative
        infinity to 0 if lambd is negative.

        """
        # lambd: rate lambd = 1/mean
        # ('lambda' is a Python reserved word)

        # we use 1-random() instead of random() to preclude the
        # possibility of taking the log of zero.
        return -_log(1.0 - self.random())/lambd

## -------------------- von Mises distribution -------------------- 
Example 28
Project: pepperon.ai   Author: JonWiggins   File: utils.py    MIT License 6 votes vote down vote up
def random_unit_vector(dimensions, seed=None):
    """
    Returns a random unit vector in the given number of dimensions
    Created using Gausian Random vars

    :param dimensions: desired dimensions
    :param seed: nullable, random var see

    :return: random unit vecotor
    """
    raw = []
    magnitude = 0
    if seed:
        random.seed(seed)
        
    for count in range(dimensions):
        uniform1 = random.uniform(0, 1)
        uniform2 = random.uniform(0, 1)
        toadd = math.sqrt(-2 * math.log(uniform1)) * math.cos(2 * math.pi * uniform2)
        magnitude += (toadd ** 2)
        raw.append(toadd)
    
    magnitude = math.sqrt(magnitude)
    return [element / magnitude for element in raw] 
Example 29
Project: algorithms-in-python   Author: xiaowang1105   File: 2sat.py    MIT License 6 votes vote down vote up
def Papadimitriou(num_clauses, clauses):
    success_flag = 0
    for i in range(int(log(num_clauses, 2))):
        print("Running %d times"%i)
        clauses_dict = initialize(num_clauses)
        if(success_flag == 1):
            break
        for j in tqdm(range(2*num_clauses**2)):
            # if(j%1000000==0):
            #     print("\tInner loop %d times"%j)
            if(is_sat(clauses_dict, clauses)):
                success_flag = 1
                break
            else:
                temp = random.randint(0, num_clauses-1)
                clauses_dict[temp] = int(not clauses_dict[temp])

    return success_flag 
Example 30
Project: BlueLightMeter   Author: chripell   File: blm_client.py    Apache License 2.0 5 votes vote down vote up
def process_lux(self, queue):
        if self.need_to_set:
            self.setter(None, None, None)
        try:
            data = queue.get_nowait()
        except:
            data = None
        if data:
            s = data['state']
            self.debug.set_text('ch: %d,%d mode: %d %s int: %d' %
                                (s['ch0'], s['ch1'], s['mode'], ('lo', 'hi')[s['higain']],
                                 s['int_time']))
            if self.first_data:
                self.higain.set_active(s['higain'])
                self.but_choices['mode'][s['mode']].set_active(True)
                self.int_time.set_text('%d' % s['int_time'])
                self.first_data = False
            self.cur_lux.set_markup('<span size="38000">%.2f</span>' % data['med_lux'])
            self.max_lux.set_markup('<span size="38000">%.2f</span>' % data['max_lux'])
            if data['med_lux'] <= 0.0:
                self.ev = -100
            else:
                self.ev = math.log(float(data['med_lux']) / 2.5, 2) 
            if data['max_lux'] <= 0.0:
                self.ev_max = -100
            else:
                self.ev_max = math.log(float(data['max_lux']) / 2.5, 2) 
            self.cur_ev.set_markup('<span size="38000">%.1f</span>' % self.ev)
            self.max_ev.set_markup('<span size="38000">%.1f</span>' % self.ev_max)
            self.calc_goal()
        return True 
Example 31
Project: BlueLightMeter   Author: chripell   File: blm_client.py    Apache License 2.0 5 votes vote down vote up
def calc_ev(self, av, tv):
        return math.log(math.pow(av, 2.0) / tv, 2.0) 
Example 32
Project: BlueLightMeter   Author: chripell   File: blm_client.py    Apache License 2.0 5 votes vote down vote up
def calc_goal(self):
        if self.which == 'Flash':
            ev = self.ev_max
        else:
            ev = self.ev
        if self.what == 'Av' or self.what == 'Tv':
            delta_ev = math.log(self.ISO / 100.0, 2.0)
            ev += delta_ev
            ev2 = math.pow(2.0, ev)
            if self.what == 'Tv':
                tv = math.pow(self.Av, 2.0) / ev2
                tvn = self.find_nearer(tv, self.TVc)
                self.goal.set_markup('<span size="38000">%s s</span>' % tvn)
                self.goal_ev.set_text('Ev=%.1f' %
                                      (self.calc_ev(self.Av, self.make_float(tvn)) - delta_ev))
            elif self.what == 'Av':
                av = math.sqrt(ev2 * self.Tv)
                avn = self.find_nearer(av, self.AVc)
                self.goal.set_markup('<span size="38000">f/%s</span>' % avn)
                self.goal_ev.set_text('Ev=%.1f' %
                                      (self.calc_ev(float(avn), self.Tv) - delta_ev))
        elif self.what == 'ISO':
            evb = self.calc_ev(self.Av, self.Tv)
            isov = math.pow(2.0, evb - ev) * 100.0
            isovn = self.find_nearer(isov, self.ISOc)
            self.goal.set_markup('<span size="38000">%s ISO</span>' % isovn)
            self.goal_ev.set_text('Ev=%.1f' %
                                  (evb + math.log(float(isovn) / 100.0, 2.0))) 
Example 33
Project: malcode   Author: moonsea   File: gramfreq.py    GNU General Public License v3.0 5 votes vote down vote up
def traveseFile(path):
    totaltf = dict()
    totaldf = dict()
    totalterm = 0
    maxterm = 0
    totaldocument = 0
    maxdocument = 0

    for parent, dirnames, filenames in os.walk(path):
        log('Entering', parent, subpath='classfier')

        totaldocument += len(filenames)
        for filename in filenames:

            filepath = os.path.join(parent, filename)
            print filepath

            with open(filepath) as asmfile:
                lines = asmfile.readlines()

            log('Generating', filename, subpath='classfier')
            genSingleTF(lines, filename)
            # totalterm += len(lines)
            getTotalTF(lines, totaltf, totaldf)

    # print totaltf
    desfilepath = os.path.join(BASEPATH, '2-gram-totaltf')
    maxterm = max(totaltf.values())
    maxdocument = max(totaldf.values())
    totalterm = len(totaltf)
    with open(desfilepath, 'w') as desfile:
        for key in totaltf.keys():
            # print key, totaltf[key]
            tmp = '----'.join([key, str(totaltf[key]), str(totalterm), str(totaltf[key] / maxterm), str(
                totaldf.get(key, 0)), str(totaldocument), str(totaldf.get(key, 0) / maxdocument), str(math.log(totaldocument / totaldf.get(key, 1)))])
            desfile.write(tmp + '\n') 
Example 34
Project: explirefit   Author: codogogo   File: simple_stats.py    Apache License 2.0 5 votes vote down vote up
def kullback_leibler(ground_prob_dist, target_prob_dist):
	sum = 0.0
	for i in range(len(ground_prob_dist)):
		sum += ground_prob_dist[i] * math.log(ground_prob_dist[i] / target_prob_dist[i])
	return sum 
Example 35
Project: pyblish-win   Author: pyblish   File: random.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def _randbelow(self, n, _log=_log, _int=int, _maxwidth=1L<<BPF,
                   _Method=_MethodType, _BuiltinMethod=_BuiltinMethodType): 
Example 36
Project: pyblish-win   Author: pyblish   File: random.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def weibullvariate(self, alpha, beta):
        """Weibull distribution.

        alpha is the scale parameter and beta is the shape parameter.

        """
        # Jain, pg. 499; bug fix courtesy Bill Arms

        u = 1.0 - self.random()
        return alpha * pow(-_log(u), 1.0/beta)

## -------------------- Wichmann-Hill ------------------- 
Example 37
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 38
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def testLog10(self):
        self.assertRaises(TypeError, math.log10)
        self.ftest('log10(0.1)', math.log10(0.1), -1)
        self.ftest('log10(1)', math.log10(1), 0)
        self.ftest('log10(10)', math.log10(10), 1)
        self.assertEqual(math.log(INF), INF)
        self.assertRaises(ValueError, math.log10, NINF)
        self.assertTrue(math.isnan(math.log10(NAN)))
        # Log values should match for int and long (issue #18739).
        for n in range(1, 1000):
            self.assertEqual(math.log10(n), math.log10(long(n))) 
Example 39
Project: pyblish-win   Author: pyblish   File: test_long.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_bit_length(self):
        tiny = 1e-10
        for x in xrange(-65000, 65000):
            x = long(x)
            k = x.bit_length()
            # Check equivalence with Python version
            self.assertEqual(k, len(bin(x).lstrip('-0b')))
            # Behaviour as specified in the docs
            if x != 0:
                self.assertTrue(2**(k-1) <= abs(x) < 2**k)
            else:
                self.assertEqual(k, 0)
            # Alternative definition: x.bit_length() == 1 + floor(log_2(x))
            if x != 0:
                # When x is an exact power of 2, numeric errors can
                # cause floor(log(x)/log(2)) to be one too small; for
                # small x this can be fixed by adding a small quantity
                # to the quotient before taking the floor.
                self.assertEqual(k, 1 + math.floor(
                        math.log(abs(x))/math.log(2) + tiny))

        self.assertEqual((0L).bit_length(), 0)
        self.assertEqual((1L).bit_length(), 1)
        self.assertEqual((-1L).bit_length(), 1)
        self.assertEqual((2L).bit_length(), 2)
        self.assertEqual((-2L).bit_length(), 2)
        for i in [2, 3, 15, 16, 17, 31, 32, 33, 63, 64, 234]:
            a = 2L**i
            self.assertEqual((a-1).bit_length(), i)
            self.assertEqual((1-a).bit_length(), i)
            self.assertEqual((a).bit_length(), i+1)
            self.assertEqual((-a).bit_length(), i+1)
            self.assertEqual((a+1).bit_length(), i+1)
            self.assertEqual((-a-1).bit_length(), i+1) 
Example 40
Project: pyblish-win   Author: pyblish   File: test_int.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_bit_length(self):
        tiny = 1e-10
        for x in xrange(-65000, 65000):
            k = x.bit_length()
            # Check equivalence with Python version
            self.assertEqual(k, len(bin(x).lstrip('-0b')))
            # Behaviour as specified in the docs
            if x != 0:
                self.assertTrue(2**(k-1) <= abs(x) < 2**k)
            else:
                self.assertEqual(k, 0)
            # Alternative definition: x.bit_length() == 1 + floor(log_2(x))
            if x != 0:
                # When x is an exact power of 2, numeric errors can
                # cause floor(log(x)/log(2)) to be one too small; for
                # small x this can be fixed by adding a small quantity
                # to the quotient before taking the floor.
                self.assertEqual(k, 1 + math.floor(
                        math.log(abs(x))/math.log(2) + tiny))

        self.assertEqual((0).bit_length(), 0)
        self.assertEqual((1).bit_length(), 1)
        self.assertEqual((-1).bit_length(), 1)
        self.assertEqual((2).bit_length(), 2)
        self.assertEqual((-2).bit_length(), 2)
        for i in [2, 3, 15, 16, 17, 31, 32, 33, 63, 64]:
            a = 2**i
            self.assertEqual((a-1).bit_length(), i)
            self.assertEqual((1-a).bit_length(), i)
            self.assertEqual((a).bit_length(), i+1)
            self.assertEqual((-a).bit_length(), i+1)
            self.assertEqual((a+1).bit_length(), i+1)
            self.assertEqual((-a-1).bit_length(), i+1) 
Example 41
Project: wikilinks   Author: trovdimi   File: normalized_entropy.py    MIT License 5 votes vote down vote up
def entropy_step(x):
    sum = 0
    for i in x.data:
        sum-= i*math.log(i)
    return sum 
Example 42
Project: wikilinks   Author: trovdimi   File: normalized_entropy.py    MIT License 5 votes vote down vote up
def plot_entropy_distribution():
    fig = plt.figure()
    ax = fig.add_subplot(111)

    entropy = read_pickle('output/normalized_entropy.obj')

    hist, bin_edges = np.histogram(entropy, bins=10000)
    print hist, bin_edges

    #ax.set_yscale('log')
    #ax.set_xscale('log')
    ax.plot(bin_edges[:-1], hist, marker='o', markersize=3, markeredgecolor='none', color='#D65F5F')

    #ax.set_ylim([10**0, 10**6])
    #ax.set_xlim([10**0, 10**6])
    ax.set_xlabel('Entropy')
    ax.set_ylabel('Frequency')

    fig.tight_layout()
    fig.savefig( 'output/normalized_entropy_distribution.pdf', bbox_inches='tight') 
Example 43
Project: wikilinks   Author: trovdimi   File: normalized_entropy.py    MIT License 5 votes vote down vote up
def plot_entropy_hist():
    fig = plt.figure()
    ax = fig.add_subplot(111)

    entropy = read_pickle('output/normalized_entropy.obj')
    number_of_zeros = [1 if item is 0 else 0 for item in entropy]

    print len(number_of_zeros)
    print sum(number_of_zeros)
    n, bins, patches = ax.hist(entropy, 50)
    ax.plot(bins, )
    #ax.set_ylim([-1,1])
    ax.set_xlim([0,1])
    ax.set_yscale('log')
    ax.set_xlabel('Normalized entropy')
    ax.set_ylabel('Frequency (log)')

    fig.tight_layout()
    fig.savefig( 'output/normalized_entropy_hist.pdf', bbox_inches='tight') 
Example 44
Project: wikilinks   Author: trovdimi   File: normalized_entropy.py    MIT License 5 votes vote down vote up
def plot_gini_hist(name):
    fig = plt.figure()
    ax = fig.add_subplot(111)

    gini = read_pickle('output/'+name+'.obj')
    number_of_zeros = [1 if item is 0 else 0 for item in gini]

    print len(number_of_zeros)
    print sum(number_of_zeros)
    #n, bins, patches = ax.hist(gini, 50,  color='#D65F5F', edgecolor='none')
    n, bins, patches = ax.hist(gini, 50,  edgecolor='none')
    ax.plot(bins)
    #ax.set_ylim([-1,1])
    ax.set_xlim([0,1])
    #ax.set_yscale('log')
    ax.set_xlabel('Gini coefficient')
    ax.set_ylabel('Frequency')

    fig.tight_layout()
    fig.savefig( 'output/'+name+'.pdf', bbox_inches='tight') 
Example 45
Project: MusicDownloader   Author: wwwpf   File: download_model.py    GNU General Public License v3.0 5 votes vote down vote up
def get_human_read(s):
    post_fix = ["B", "KB", "MB", "GB"]
    n = int(math.log(s, 1024)) if s > 0 else 0
    n = min(n, 3)
    return "%.2f" % (s / (1 << (10*n))) + post_fix[n] 
Example 46
Project: streetview_objectmapping   Author: vlkryl   File: objectmapping.py    MIT License 5 votes vote down vote up
def LatLonToMeters( lat, lon ):
    "Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:4326"
    originShift = 2 * pi * 6378137 / 2.0
    mx = lon * originShift / 180.0
    my = log( tan((90 + lat) * pi / 360.0 )) / (pi / 180.0)
    my = my * originShift / 180.0
    return mx, my

# conversion from meters to (lat,lon) 
Example 47
Project: CLRS   Author: JasonVann   File: CLRS.py    MIT License 5 votes vote down vote up
def merge_sort_insertion(A, p, r):
    # r: index of last item
    # switch to insertion sort on small arrays
    k = int(math.log(len(A), 2))
    #print k
    if p < r:
        q = (p+r)/2
        #print 'a', p, q, r
        if r - p < k:
            A[p: r + 1] = insertion_sort(A[p: r + 1])
        else:
            merge_sort_insertion(A, p, q)
            merge_sort_insertion(A, q + 1, r)
            merge_clrs(A, p, q, r)
    return A 
Example 48
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: keys.py    MIT License 5 votes vote down vote up
def hash_algo(self):
        """
        Returns the name of the family of hash algorithms used to generate a
        DSA key

        :raises:
            ValueError - when the key is not a DSA key

        :return:
            A unicode string of "sha1" or "sha2" or None if no parameters are
            present
        """

        if self.algorithm != 'dsa':
            raise ValueError(unwrap(
                '''
                Only DSA keys are generated using a hash algorithm, this key is
                %s
                ''',
                self.algorithm.upper()
            ))

        parameters = self['algorithm']['parameters']
        if parameters.native is None:
            return None

        byte_len = math.log(parameters['q'].native, 2) / 8

        return 'sha1' if byte_len <= 20 else 'sha2' 
Example 49
Project: DOTA_models   Author: ringringyi   File: accountant.py    Apache License 2.0 5 votes vote down vote up
def accumulate_privacy_spending(self, eps_delta, unused_sigma,
                                  num_examples):
    """Accumulate the privacy spending.

    Currently only support approximate privacy. Here we assume we use Gaussian
    noise on randomly sampled batch so we get better composition: 1. the per
    batch privacy is computed using privacy amplication via sampling bound;
    2. the composition is done using the composition with Gaussian noise.
    TODO(liqzhang) Add a link to a document that describes the bounds used.

    Args:
      eps_delta: EpsDelta pair which can be tensors.
      unused_sigma: the noise sigma. Unused for this accountant.
      num_examples: the number of examples involved.
    Returns:
      a TensorFlow operation for updating the privacy spending.
    """

    eps, delta = eps_delta
    with tf.control_dependencies(
        [tf.Assert(tf.greater(delta, 0),
                   ["delta needs to be greater than 0"])]):
      amortize_ratio = (tf.cast(num_examples, tf.float32) * 1.0 /
                        self._total_examples)
      # Use privacy amplification via sampling bound.
      # See Lemma 2.2 in http://arxiv.org/pdf/1405.7085v2.pdf
      # TODO(liqzhang) Add a link to a document with formal statement
      # and proof.
      amortize_eps = tf.reshape(tf.log(1.0 + amortize_ratio * (
          tf.exp(eps) - 1.0)), [1])
      amortize_delta = tf.reshape(amortize_ratio * delta, [1])
      return tf.group(*[tf.assign_add(self._eps_squared_sum,
                                      tf.square(amortize_eps)),
                        tf.assign_add(self._delta_sum, amortize_delta)]) 
Example 50
Project: DOTA_models   Author: ringringyi   File: accountant.py    Apache License 2.0 5 votes vote down vote up
def _compute_log_moment(self, sigma, q, moment_order):
    """Compute high moment of privacy loss.

    Args:
      sigma: the noise sigma, in the multiples of the sensitivity.
      q: the sampling ratio.
      moment_order: the order of moment.
    Returns:
      log E[exp(moment_order * X)]
    """
    pass 
Example 51
Project: DOTA_models   Author: ringringyi   File: accountant.py    Apache License 2.0 5 votes vote down vote up
def _compute_eps(self, log_moments, delta):
    min_eps = float("inf")
    for moment_order, log_moment in log_moments:
      if math.isinf(log_moment) or math.isnan(log_moment):
        sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
        continue
      min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
    return min_eps 
Example 52
Project: DOTA_models   Author: ringringyi   File: accountant.py    Apache License 2.0 5 votes vote down vote up
def _compute_log_moment(self, sigma, q, moment_order):
    """Compute high moment of privacy loss.

    Args:
      sigma: the noise sigma, in the multiples of the sensitivity.
      q: the sampling ratio.
      moment_order: the order of moment.
    Returns:
      log E[exp(moment_order * X)]
    """
    assert moment_order <= self._max_moment_order, ("The order of %d is out "
                                                    "of the upper bound %d."
                                                    % (moment_order,
                                                       self._max_moment_order))
    binomial_table = tf.slice(self._binomial_table, [moment_order, 0],
                              [1, moment_order + 1])
    # qs = [1 q q^2 ... q^L] = exp([0 1 2 ... L] * log(q))
    qs = tf.exp(tf.constant([i * 1.0 for i in range(moment_order + 1)],
                            dtype=tf.float64) * tf.cast(
                                tf.log(q), dtype=tf.float64))
    moments0 = self._differential_moments(sigma, 0.0, moment_order)
    term0 = tf.reduce_sum(binomial_table * qs * moments0)
    moments1 = self._differential_moments(sigma, 1.0, moment_order)
    term1 = tf.reduce_sum(binomial_table * qs * moments1)
    return tf.squeeze(tf.log(tf.cast(q * term0 + (1.0 - q) * term1,
                                     tf.float64))) 
Example 53
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    Apache License 2.0 5 votes vote down vote up
def compute_b_mp(sigma, q, lmbd, verbose=False):
  lmbd_int = int(math.ceil(lmbd))
  if lmbd_int == 0:
    return 1.0

  mu0, _, mu = distributions_mp(sigma, q)

  b_lambda_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
  b_lambda = integral_inf_mp(b_lambda_fn)

  m = sigma ** 2 * (mp.log((2 - q) / (1 - q)) + 1 / (2 * (sigma ** 2)))
  b_fn = lambda z: ((mu0(z) / mu(z)) ** lmbd_int -
                    (mu(-z) / mu0(z)) ** lmbd_int)
  if verbose:
    print "M =", m
    print "f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m))
    assert b_fn(-m) < 0 and b_fn(m) < 0

  b_lambda_int1_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
  b_lambda_int2_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
  b_int1 = integral_bounded_mp(b_lambda_int1_fn, -m, m)
  b_int2 = integral_bounded_mp(b_lambda_int2_fn, -m, m)

  a_lambda_m1 = compute_a_mp(sigma, q, lmbd - 1)
  b_bound = a_lambda_m1 + b_int1 - b_int2

  if verbose:
    print "B by numerical integration", b_lambda
    print "B must be no more than    ", b_bound
  assert b_lambda < b_bound + 1e-5
  return _to_np_float64(b_lambda) 
Example 54
Project: DOTA_models   Author: ringringyi   File: gaussian_moments.py    Apache License 2.0 5 votes vote down vote up
def compute_log_moment(q, sigma, steps, lmbd, verify=False, verbose=False):
  """Compute the log moment of Gaussian mechanism for given parameters.

  Args:
    q: the sampling ratio.
    sigma: the noise sigma.
    steps: the number of steps.
    lmbd: the moment order.
    verify: if False, only compute the symbolic version. If True, computes
      both symbolic and numerical solutions and verifies the results match.
    verbose: if True, print out debug information.
  Returns:
    the log moment with type np.float64, could be np.inf.
  """
  moment = compute_a(sigma, q, lmbd, verbose=verbose)
  if verify:
    mp.dps = 50
    moment_a_mp = compute_a_mp(sigma, q, lmbd, verbose=verbose)
    moment_b_mp = compute_b_mp(sigma, q, lmbd, verbose=verbose)
    np.testing.assert_allclose(moment, moment_a_mp, rtol=1e-10)
    if not np.isinf(moment_a_mp):
      # The following test fails for (1, np.inf)!
      np.testing.assert_array_less(moment_b_mp, moment_a_mp)
  if np.isinf(moment):
    return np.inf
  else:
    return np.log(moment) * steps 
Example 55
Project: DOTA_models   Author: ringringyi   File: losses_test.py    Apache License 2.0 5 votes vote down vote up
def testReturnsCorrectLoss(self):
    prediction_tensor = tf.constant([[[-100, 100, -100],
                                      [100, -100, -100],
                                      [100, 0, -100],
                                      [-100, -100, 100]],
                                     [[-100, 0, 100],
                                      [-100, 100, -100],
                                      [100, 100, 100],
                                      [0, 0, -1]]], tf.float32)
    target_tensor = tf.constant([[[0, 1, 0],
                                  [1, 0, 0],
                                  [1, 0, 0],
                                  [0, 0, 1]],
                                 [[0, 0, 1],
                                  [0, 1, 0],
                                  [1, 1, 1],
                                  [1, 0, 0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1],
                           [1, 1, 1, 0]], tf.float32)
    loss_op = losses.WeightedSigmoidClassificationLoss()
    loss = loss_op(prediction_tensor, target_tensor, weights=weights)

    exp_loss = -2 * math.log(.5)
    with self.test_session() as sess:
      loss_output = sess.run(loss)
      self.assertAllClose(loss_output, exp_loss) 
Example 56
Project: DOTA_models   Author: ringringyi   File: losses_test.py    Apache License 2.0 5 votes vote down vote up
def testReturnsCorrectAnchorWiseLoss(self):
    prediction_tensor = tf.constant([[[-100, 100, -100],
                                      [100, -100, -100],
                                      [100, 0, -100],
                                      [-100, -100, 100]],
                                     [[-100, 0, 100],
                                      [-100, 100, -100],
                                      [100, 100, 100],
                                      [0, 0, -1]]], tf.float32)
    target_tensor = tf.constant([[[0, 1, 0],
                                  [1, 0, 0],
                                  [1, 0, 0],
                                  [0, 0, 1]],
                                 [[0, 0, 1],
                                  [0, 1, 0],
                                  [1, 1, 1],
                                  [1, 0, 0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1],
                           [1, 1, 1, 0]], tf.float32)
    loss_op = losses.WeightedSigmoidClassificationLoss(True)
    loss = loss_op(prediction_tensor, target_tensor, weights=weights)

    exp_loss = np.matrix([[0, 0, -math.log(.5), 0],
                          [-math.log(.5), 0, 0, 0]])
    with self.test_session() as sess:
      loss_output = sess.run(loss)
      self.assertAllClose(loss_output, exp_loss) 
Example 57
Project: DOTA_models   Author: ringringyi   File: losses_test.py    Apache License 2.0 5 votes vote down vote up
def testReturnsCorrectLossWithClassIndices(self):
    prediction_tensor = tf.constant([[[-100, 100, -100, 100],
                                      [100, -100, -100, -100],
                                      [100, 0, -100, 100],
                                      [-100, -100, 100, -100]],
                                     [[-100, 0, 100, 100],
                                      [-100, 100, -100, 100],
                                      [100, 100, 100, 100],
                                      [0, 0, -1, 100]]], tf.float32)
    target_tensor = tf.constant([[[0, 1, 0, 0],
                                  [1, 0, 0, 1],
                                  [1, 0, 0, 0],
                                  [0, 0, 1, 1]],
                                 [[0, 0, 1, 0],
                                  [0, 1, 0, 0],
                                  [1, 1, 1, 0],
                                  [1, 0, 0, 0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1],
                           [1, 1, 1, 0]], tf.float32)
    # Ignores the last class.
    class_indices = tf.constant([0, 1, 2], tf.int32)
    loss_op = losses.WeightedSigmoidClassificationLoss(True)
    loss = loss_op(prediction_tensor, target_tensor, weights=weights,
                   class_indices=class_indices)

    exp_loss = np.matrix([[0, 0, -math.log(.5), 0],
                          [-math.log(.5), 0, 0, 0]])
    with self.test_session() as sess:
      loss_output = sess.run(loss)
      self.assertAllClose(loss_output, exp_loss) 
Example 58
Project: DOTA_models   Author: ringringyi   File: losses_test.py    Apache License 2.0 5 votes vote down vote up
def testReturnsCorrectLoss(self):
    prediction_tensor = tf.constant([[[-100, 100, -100],
                                      [100, -100, -100],
                                      [0, 0, -100],
                                      [-100, -100, 100]],
                                     [[-100, 0, 0],
                                      [-100, 100, -100],
                                      [-100, 100, -100],
                                      [100, -100, -100]]], tf.float32)
    target_tensor = tf.constant([[[0, 1, 0],
                                  [1, 0, 0],
                                  [1, 0, 0],
                                  [0, 0, 1]],
                                 [[0, 0, 1],
                                  [0, 1, 0],
                                  [0, 1, 0],
                                  [1, 0, 0]]], tf.float32)
    weights = tf.constant([[1, 1, .5, 1],
                           [1, 1, 1, 0]], tf.float32)
    loss_op = losses.WeightedSoftmaxClassificationLoss()
    loss = loss_op(prediction_tensor, target_tensor, weights=weights)

    exp_loss = - 1.5 * math.log(.5)
    with self.test_session() as sess:
      loss_output = sess.run(loss)
      self.assertAllClose(loss_output, exp_loss) 
Example 59
Project: DOTA_models   Author: ringringyi   File: losses_test.py    Apache License 2.0 5 votes vote down vote up
def testReturnsCorrectAnchorWiseLoss(self):
    prediction_tensor = tf.constant([[[-100, 100, -100],
                                      [100, -100, -100],
                                      [0, 0, -100],
                                      [-100, -100, 100]],
                                     [[-100, 0, 0],
                                      [-100, 100, -100],
                                      [-100, 100, -100],
                                      [100, -100, -100]]], tf.float32)
    target_tensor = tf.constant([[[0, 1, 0],
                                  [1, 0, 0],
                                  [1, 0, 0],
                                  [0, 0, 1]],
                                 [[0, 0, 1],
                                  [0, 1, 0],
                                  [0, 1, 0],
                                  [1, 0, 0]]], tf.float32)
    weights = tf.constant([[1, 1, .5, 1],
                           [1, 1, 1, 0]], tf.float32)
    loss_op = losses.WeightedSoftmaxClassificationLoss(True)
    loss = loss_op(prediction_tensor, target_tensor, weights=weights)

    exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0],
                          [-math.log(.5), 0, 0, 0]])
    with self.test_session() as sess:
      loss_output = sess.run(loss)
      self.assertAllClose(loss_output, exp_loss) 
Example 60
Project: DOTA_models   Author: ringringyi   File: losses_test.py    Apache License 2.0 5 votes vote down vote up
def testReturnsCorrectLossHardBootstrapping(self):
    prediction_tensor = tf.constant([[[-100, 100, 0],
                                      [100, -100, -100],
                                      [100, -100, -100],
                                      [-100, -100, 100]],
                                     [[-100, -100, 100],
                                      [-100, 100, -100],
                                      [100, 100, 100],
                                      [0, 0, -1]]], tf.float32)
    target_tensor = tf.constant([[[0, 1, 0],
                                  [1, 0, 0],
                                  [1, 0, 0],
                                  [0, 0, 1]],
                                 [[0, 0, 1],
                                  [0, 1, 0],
                                  [1, 1, 1],
                                  [1, 0, 0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1],
                           [1, 1, 1, 0]], tf.float32)
    alpha = tf.constant(.5, tf.float32)
    loss_op = losses.BootstrappedSigmoidClassificationLoss(
        alpha, bootstrap_type='hard')
    loss = loss_op(prediction_tensor, target_tensor, weights=weights)
    exp_loss = -math.log(.5)
    with self.test_session() as sess:
      loss_output = sess.run(loss)
      self.assertAllClose(loss_output, exp_loss) 
Example 61
Project: DOTA_models   Author: ringringyi   File: losses_test.py    Apache License 2.0 5 votes vote down vote up
def testReturnsCorrectAnchorWiseLoss(self):
    prediction_tensor = tf.constant([[[-100, 100, -100],
                                      [100, -100, -100],
                                      [100, 0, -100],
                                      [-100, -100, 100]],
                                     [[-100, 0, 100],
                                      [-100, 100, -100],
                                      [100, 100, 100],
                                      [0, 0, -1]]], tf.float32)
    target_tensor = tf.constant([[[0, 1, 0],
                                  [1, 0, 0],
                                  [1, 0, 0],
                                  [0, 0, 1]],
                                 [[0, 0, 1],
                                  [0, 1, 0],
                                  [1, 1, 1],
                                  [1, 0, 0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1],
                           [1, 1, 1, 0]], tf.float32)
    alpha = tf.constant(.5, tf.float32)
    loss_op = losses.BootstrappedSigmoidClassificationLoss(
        alpha, bootstrap_type='hard', anchorwise_output=True)
    loss = loss_op(prediction_tensor, target_tensor, weights=weights)

    exp_loss = np.matrix([[0, 0, -math.log(.5), 0],
                          [-math.log(.5), 0, 0, 0]])
    with self.test_session() as sess:
      loss_output = sess.run(loss)
      self.assertAllClose(loss_output, exp_loss) 
Example 62
Project: DOTA_models   Author: ringringyi   File: blocks_entropy_coding.py    Apache License 2.0 5 votes vote down vote up
def _Apply(self, c, p):
    """Theoretical bound of the coded length given a probability distribution.

    Args:
      c: The binary codes. Belong to {0, 1}.
      p: The probability of: P(code==+1)

    Returns:
      The average code length.
      Note: the average code length can be greater than 1 bit (e.g. when
          encoding the least likely symbol).
    """
    entropy = ((1.0 - c) * tf.log(1.0 - p) + c * tf.log(p)) / (-math.log(2))
    entropy = tf.reduce_mean(entropy)
    return entropy 
Example 63
Project: soccer-matlab   Author: utra-robosoccer   File: utility.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def diag_normal_logpdf(mean, logstd, loc):
  """Log density of a normal with diagonal covariance."""
  constant = -0.5 * (math.log(2 * math.pi) + logstd)
  value = -0.5 * ((loc - mean) / tf.exp(logstd)) ** 2
  return tf.reduce_sum(constant + value, -1) 
Example 64
Project: soccer-matlab   Author: utra-robosoccer   File: utility.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def diag_normal_entropy(mean, logstd):
  """Empirical entropy of a normal with diagonal covariance."""
  constant = mean.shape[-1].value * math.log(2 * math.pi * math.e)
  return (constant + tf.reduce_sum(2 * logstd, 1)) / 2 
Example 65
Project: soccer-matlab   Author: utra-robosoccer   File: utility.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def diag_normal_logpdf(mean, logstd, loc):
  """Log density of a normal with diagonal covariance."""
  constant = -0.5 * math.log(2 * math.pi) - logstd
  value = -0.5 * ((loc - mean) / tf.exp(logstd)) ** 2
  return tf.reduce_sum(constant + value, -1) 
Example 66
Project: soccer-matlab   Author: utra-robosoccer   File: utility.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def diag_normal_entropy(mean, logstd):
  """Empirical entropy of a normal with diagonal covariance."""
  constant = mean.shape[-1].value * math.log(2 * math.pi * math.e)
  return (constant + tf.reduce_sum(2 * logstd, 1)) / 2 
Example 67
Project: cvpr2018-hnd   Author: kibok90   File: models.py    MIT License 5 votes vote down vote up
def __init__(self, T, opts):
        super(TDLoss, self).__init__()
        
        self.gpu = opts.gpu
        self.label_smooth = opts.label_smooth
        self.ex_smooth = opts.ex_smooth if opts.method == 'TD' else 0.
        self.class_wise = opts.class_wise
        self.novel_score = opts.novel_score
        self.labels_ch = torch.from_numpy(T['labels_ch'])
        self.labels_in = torch.from_numpy(T['labels_in'].astype(np.uint8))
        self.labels_out = torch.from_numpy(T['labels_out'].astype(np.uint8))
        self.root = T['root'] - len(T['wnids_leaf'])
        self.num_children = T['num_children']
        self.ch_slice = T['ch_slice']
        self.kld_u_const = [math.log(num_ch) for num_ch in self.num_children] 
Example 68
Project: gradient-descent   Author: codebox   File: cost_function.py    MIT License 5 votes vote down vote up
def cost_delta(self, predicted, actual, m):
        if actual == 1:
            val = predicted
        else:
            val = 1 - predicted

        if val == 0:
            return -sys.maxint - 1
        return -math.log(val) 
Example 69
Project: aurora   Author: carnby   File: tasks.py    MIT License 5 votes vote down vote up
def kld(lda, lda_topics):
    """
    Builds a function to estimate Kullback-Leibler distance w.r.t. lda_topics using lda.

    Bigi, B. (2003). Using Kullback-Leibler distance for text categorization (pp. 305-319). Springer Berlin Heidelberg.

    :param lda: LDA model.
    :param lda_topics: LDA topic probabilities for a given target user.
    """
    if lda.num_topics != len(lda_topics):
        Q_eps = (1.0 - sum(x[1] for x in lda_topics)) / (lda.num_topics - len(lda_topics))
    else:
        Q_eps = 0.0

    Q_x = defaultdict(lambda: Q_eps)
    Q_x.update(lda_topics)

    def kullback_leibler_distance(P):
        """
        Estimates the KLD between topic distributions Q (target) and P (candidate).
        :param P: LDA topic probabilities for a given candidate user.
        """
        if lda.num_topics != len(P):
            P_eps = (1.0 - sum(x[1] for x in P)) / (lda.num_topics - len(P))
        else:
            P_eps = 0.0

        P_x = defaultdict(lambda: P_eps)
        P_x.update(P)

        result = 0.0
        for i in range(0, lda.num_topics):
            result += (P_x[i] - Q_x[i]) * log(P_x[i] / Q_x[i])

        return result

    return kullback_leibler_distance 
Example 70
Project: aurora   Author: carnby   File: filtering.py    MIT License 5 votes vote down vote up
def prepare_tweet(self, tweet):
        if 'char' not in tweet:
            tweet['char'] = self.characterizer.characterize_text(tweet['text'])

        tweet['buckets'] = {
            'followers': int(math.log(tweet['user__followers_count'] + 1)),
            'friends': int(math.log(tweet['user__friends_count'] + 1)),
            'n_tweets': int(math.log(tweet['user__statuses_count'] + 1)),
            'url': bool(tweet['char']['links']),
            'reply': bool(tweet['characterization__is_reply']),
            'diffusion': bool(tweet['characterization__manual_rt']),
            'popularity': int(math.log(tweet['popularity'] + 1))
        }

        if not self.skip_field('geography'):
            tweet['buckets']['geography'] = tweet['geography'],

        if tweet['buckets']['friends'] == 0 or tweet['buckets']['followers'] == 0:
            tweet['buckets']['hub'] = 0
        else:
            hub_relation = float(tweet['buckets']['followers']) / tweet['buckets']['friends']
            tweet['buckets']['hub'] = int(math.log(hub_relation))

        delta = tweet['datetime'] - self.min_date
        total_minutes = int(delta.total_seconds() / 60.0)
        time_bucket = total_minutes / self.time_bucket_size
        tweet['buckets']['time'] = time_bucket

        if not self.skip_field('topics'):
            if tweet['char']['hashtags']:
                ht = tweet['char']['hashtags'][0]
            else:
                ht = None

            tweet['buckets']['topics'] = ht

        tweet['__feature_vector__'] = self.__feature_vector__(tweet)
        tweet['__shout_score__'] = self.shout_score(tweet['text']) 
Example 71
Project: sic   Author: Yanixos   File: random.py    GNU General Public License v3.0 5 votes vote down vote up
def weibullvariate(self, alpha, beta):
        """Weibull distribution.

        alpha is the scale parameter and beta is the shape parameter.

        """
        # Jain, pg. 499; bug fix courtesy Bill Arms

        u = 1.0 - self.random()
        return alpha * (-_log(u)) ** (1.0/beta)

## --------------- Operating System Random Source  ------------------ 
Example 72
Project: pepperon.ai   Author: JonWiggins   File: niavebayes.py    MIT License 5 votes vote down vote up
def probe(self, example):
        """
        Probes the model with the given example
        
        :param example: a pd series
        
        :return: a predicted label
        """
        best_label = None
        best_prob = None
        # calculate the value for each possible label, return the one with the highest prob
        for label in self.data[self.target_label].unique():
            subset_with_label = self.data[self.data[self.target_label] == label]
            label_prob = math.log(subset_with_label.shape[0] / self.data.shape[0], 2)

            for attribute in example.to_dict().keys():
                if attribute == self.target_label or attribute not in self.attributes:
                    continue
                if not self.continuous_classes:
                    label_prob += math.log(
                        self.bernoulli_probability(
                            attribute, subset_with_label, example
                        ),
                        2,
                    )
                else:
                    current_prob = self.gaussian_probability(attribute, label, example)
                    label_prob += math.log(current_prob, 2)

            if best_label is None or best_prob < label_prob:
                best_label = label
                best_prob = label_prob

        return best_label 
Example 73
Project: CAFA_assessment_tool   Author: ashleyzhou972   File: Stats.py    GNU General Public License v3.0 5 votes vote down vote up
def lngamma(z):
    """
    Lanchos approximation of log((z-1)!)
    
    Reference: http://en.wikipedia.org/wiki/Lanczos_approximation
    """
    z -= 1
    x = _p[0]
    for i in range(1, _g+2):
        x += _p[i]/(z+i)
    t = z + _g + 0.5
    return 0.9189385332046727 + (z + 0.5) * log(t) - t + log(x) # log(sqrt(2*pi)) = 0.9189385332046727 
Example 74
Project: face_rekognition   Author: cnidus   File: GimpGradientFile.py    GNU General Public License v3.0 5 votes vote down vote up
def curved(middle, pos):
    return pos ** (log(0.5) / log(max(middle, EPSILON))) 
Example 75
Project: pyblish-win   Author: pyblish   File: random.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def gauss(self, mu, sigma):
        """Gaussian distribution.

        mu is the mean, and sigma is the standard deviation.  This is
        slightly faster than the normalvariate() function.

        Not thread-safe without a lock around calls.

        """

        # When x and y are two variables from [0, 1), uniformly
        # distributed, then
        #
        #    cos(2*pi*x)*sqrt(-2*log(1-y))
        #    sin(2*pi*x)*sqrt(-2*log(1-y))
        #
        # are two *independent* variables with normal distribution
        # (mu = 0, sigma = 1).
        # (Lambert Meertens)
        # (corrected version; bug discovered by Mike Miller, fixed by LM)

        # Multithreading note: When two threads call this function
        # simultaneously, it is possible that they will receive the
        # same return value.  The window is very small though.  To
        # avoid this, you have to use a lock around all calls.  (I
        # didn't want to slow this down in the serial case by using a
        # lock here.)

        random = self.random
        z = self.gauss_next
        self.gauss_next = None
        if z is None:
            x2pi = random() * TWOPI
            g2rad = _sqrt(-2.0 * _log(1.0 - random()))
            z = _cos(x2pi) * g2rad
            self.gauss_next = _sin(x2pi) * g2rad

        return mu + z*sigma

## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
##    def betavariate(self, alpha, beta):
##        # Discrete Event Simulation in C, pp 87-88.
##
##        y = self.expovariate(alpha)
##        z = self.expovariate(1.0/beta)
##        return z/(y+z)
##
## was dead wrong, and how it probably got that way. 
Example 76
Project: pyblish-win   Author: pyblish   File: test_cmath.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def test_cmath_matches_math(self):
        # check that corresponding cmath and math functions are equal
        # for floats in the appropriate range

        # test_values in (0, 1)
        test_values = [0.01, 0.1, 0.2, 0.5, 0.9, 0.99]

        # test_values for functions defined on [-1., 1.]
        unit_interval = test_values + [-x for x in test_values] + \
            [0., 1., -1.]

        # test_values for log, log10, sqrt
        positive = test_values + [1.] + [1./x for x in test_values]
        nonnegative = [0.] + positive

        # test_values for functions defined on the whole real line
        real_line = [0.] + positive + [-x for x in positive]

        test_functions = {
            'acos' : unit_interval,
            'asin' : unit_interval,
            'atan' : real_line,
            'cos' : real_line,
            'cosh' : real_line,
            'exp' : real_line,
            'log' : positive,
            'log10' : positive,
            'sin' : real_line,
            'sinh' : real_line,
            'sqrt' : nonnegative,
            'tan' : real_line,
            'tanh' : real_line}

        for fn, values in test_functions.items():
            float_fn = getattr(math, fn)
            complex_fn = getattr(cmath, fn)
            for v in values:
                z = complex_fn(v)
                self.rAssertAlmostEqual(float_fn(v), z.real)
                self.assertEqual(0., z.imag)

        # test two-argument version of log with various bases
        for base in [0.5, 2., 10.]:
            for v in positive:
                z = cmath.log(v, base)
                self.rAssertAlmostEqual(math.log(v, base), z.real)
                self.assertEqual(0., z.imag) 
Example 77
Project: sic   Author: Yanixos   File: random.py    GNU General Public License v3.0 4 votes vote down vote up
def sample(self, population, k):
        """Chooses k unique random elements from a population sequence or set.

        Returns a new list containing elements from the population while
        leaving the original population unchanged.  The resulting list is
        in selection order so that all sub-slices will also be valid random
        samples.  This allows raffle winners (the sample) to be partitioned
        into grand prize and second place winners (the subslices).

        Members of the population need not be hashable or unique.  If the
        population contains repeats, then each occurrence is a possible
        selection in the sample.

        To choose a sample in a range of integers, use range as an argument.
        This is especially fast and space efficient for sampling from a
        large population:   sample(range(10000000), 60)
        """

        # Sampling without replacement entails tracking either potential
        # selections (the pool) in a list or previous selections in a set.

        # When the number of selections is small compared to the
        # population, then tracking selections is efficient, requiring
        # only a small set and an occasional reselection.  For
        # a larger number of selections, the pool tracking method is
        # preferred since the list takes less space than the
        # set and it doesn't suffer from frequent reselections.

        if isinstance(population, _Set):
            population = tuple(population)
        if not isinstance(population, _Sequence):
            raise TypeError("Population must be a sequence or set.  For dicts, use list(d).")
        randbelow = self._randbelow
        n = len(population)
        if not 0 <= k <= n:
            raise ValueError("Sample larger than population or is negative")
        result = [None] * k
        setsize = 21        # size of a small set minus size of an empty list
        if k > 5:
            setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
        if n <= setsize:
            # An n-length list is smaller than a k-length set
            pool = list(population)
            for i in range(k):         # invariant:  non-selected at [0,n-i)
                j = randbelow(n-i)
                result[i] = pool[j]
                pool[j] = pool[n-i-1]   # move non-selected item into vacancy
        else:
            selected = set()
            selected_add = selected.add
            for i in range(k):
                j = randbelow(n)
                while j in selected:
                    j = randbelow(n)
                selected_add(j)
                result[i] = population[j]
        return result 
Example 78
Project: sic   Author: Yanixos   File: random.py    GNU General Public License v3.0 4 votes vote down vote up
def gauss(self, mu, sigma):
        """Gaussian distribution.

        mu is the mean, and sigma is the standard deviation.  This is
        slightly faster than the normalvariate() function.

        Not thread-safe without a lock around calls.

        """

        # When x and y are two variables from [0, 1), uniformly
        # distributed, then
        #
        #    cos(2*pi*x)*sqrt(-2*log(1-y))
        #    sin(2*pi*x)*sqrt(-2*log(1-y))
        #
        # are two *independent* variables with normal distribution
        # (mu = 0, sigma = 1).
        # (Lambert Meertens)
        # (corrected version; bug discovered by Mike Miller, fixed by LM)

        # Multithreading note: When two threads call this function
        # simultaneously, it is possible that they will receive the
        # same return value.  The window is very small though.  To
        # avoid this, you have to use a lock around all calls.  (I
        # didn't want to slow this down in the serial case by using a
        # lock here.)

        random = self.random
        z = self.gauss_next
        self.gauss_next = None
        if z is None:
            x2pi = random() * TWOPI
            g2rad = _sqrt(-2.0 * _log(1.0 - random()))
            z = _cos(x2pi) * g2rad
            self.gauss_next = _sin(x2pi) * g2rad

        return mu + z*sigma

## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
##    def betavariate(self, alpha, beta):
##        # Discrete Event Simulation in C, pp 87-88.
##
##        y = self.expovariate(alpha)
##        z = self.expovariate(1.0/beta)
##        return z/(y+z)
##
## was dead wrong, and how it probably got that way. 
Example 79
Project: iceaddr   Author: sveinbjornt   File: add_placename_data.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def isnet93_to_wgs84(xx, yy):
    x = xx
    y = yy
    a = 6378137.0
    f = 1 / 298.257222101
    lat1 = 64.25
    lat2 = 65.75
    latc = 65.00
    lonc = 19.00
    eps = 0.00000000001

    def fx(p):
        return a * math.cos(p / rho) / math.sqrt(1 - math.pow(e * math.sin(p / rho), 2))

    def f1(p):
        return math.log((1 - p) / (1 + p))

    def f2(p):
        return f1(p) - e * f1(e * p)

    def f3(p):
        return pol1 * math.exp((f2(math.sin(p / rho)) - f2sin1) * sint / 2)

    rho = 45 / math.atan2(1.0, 1.0)
    e = math.sqrt(f * (2 - f))
    dum = f2(math.sin(lat1 / rho)) - f2(math.sin(lat2 / rho))
    sint = 2 * (math.log(fx(lat1)) - math.log(fx(lat2))) / dum
    f2sin1 = f2(math.sin(lat1 / rho))
    pol1 = fx(lat1) / sint
    polc = f3(latc) + 500000.0
    peq = (
        a
        * math.cos(latc / rho)
        / (sint * math.exp(sint * math.log((45 - latc / 2) / rho)))
    )
    pol = math.sqrt(math.pow(x - 500000, 2) + math.pow(polc - y, 2))
    lat = 90 - 2 * rho * math.atan(math.exp(math.log(pol / peq) / sint))
    lon = 0
    fact = rho * math.cos(lat / rho) / sint / pol
    fact = rho * math.cos(lat / rho) / sint / pol
    delta = 1.0
    while math.fabs(delta) > eps:
        delta = (f3(lat) - pol) * fact
        lat += delta
    lon = -(lonc + rho * math.atan((500000 - x) / (polc - y)) / sint)

    return {"lat": round(lat, 7), "lng": round(lon, 7)} 
Example 80
Project: face_rekognition   Author: cnidus   File: IcoImagePlugin.py    GNU General Public License v3.0 4 votes vote down vote up
def __init__(self, buf):
        """
        Parse image from file-like object containing ico file data
        """

        # check magic
        s = buf.read(6)
        if not _accept(s):
            raise SyntaxError("not an ICO file")

        self.buf = buf
        self.entry = []

        # Number of items in file
        self.nb_items = i16(s[4:])

        # Get headers for each item
        for i in range(self.nb_items):
            s = buf.read(16)

            icon_header = {
                'width': i8(s[0]),
                'height': i8(s[1]),
                'nb_color': i8(s[2]),  # No. of colors in image (0 if >=8bpp)
                'reserved': i8(s[3]),
                'planes': i16(s[4:]),
                'bpp': i16(s[6:]),
                'size': i32(s[8:]),
                'offset': i32(s[12:])
            }

            # See Wikipedia
            for j in ('width', 'height'):
                if not icon_header[j]:
                    icon_header[j] = 256

            # See Wikipedia notes about color depth.
            # We need this just to differ images with equal sizes
            icon_header['color_depth'] = (icon_header['bpp'] or
                                          (icon_header['nb_color'] != 0 and
                                           ceil(log(icon_header['nb_color'],
                                                    2))) or 256)

            icon_header['dim'] = (icon_header['width'], icon_header['height'])
            icon_header['square'] = (icon_header['width'] *
                                     icon_header['height'])

            self.entry.append(icon_header)

        self.entry = sorted(self.entry, key=lambda x: x['color_depth'])
        # ICO images are usually squares
        # self.entry = sorted(self.entry, key=lambda x: x['width'])
        self.entry = sorted(self.entry, key=lambda x: x['square'])
        self.entry.reverse()