Python math.log1p() Examples

The following are code examples for showing how to use math.log1p(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: BiLatticeRNN-data-processing   Author: alecokas   File: posterior.py    MIT License 6 votes vote down vote up
def logadd(x,y):
    """
    For M{x=log(a)} and M{y=log(b)}, return M{z=log(a+b)}.
    @param x: M{log(a)}
    @type x: float
    @param y: M{log(b)}
    @type y: float
    @return: M{log(a+b)}
    @rtype: float
    """
    if x < y:
        return logadd(y, x)
    if y == LOGZERO:
        return x
    else:
        return x + math.log1p(math.exp(y-x)) 
Example 2
Project: rpython-lang-scheme   Author: tomoh1r   File: rfloat.py    MIT License 6 votes vote down vote up
def acosh(x):
        "NOT_RPYTHON"
        if isnan(x):
            return NAN
        if x < 1.:
            raise ValueError("math domain error")
        if x >= _2_to_p28:
            if isinf(x):
                return x
            else:
                return math.log(x) + _ln2
        if x == 1.:
            return 0.
        if x >= 2.:
            t = x * x
            return math.log(2. * x - 1. / (x + math.sqrt(t - 1.0)))
        t = x - 1.0
        return log1p(t + math.sqrt(2. * t + t * t)) 
Example 3
Project: ironpython2   Author: IronLanguages   File: test_math.py    Apache License 2.0 6 votes vote down vote up
def test_math_subclass(self):
        """verify subtypes of float/long work w/ math functions"""
        import math
        class myfloat(float): pass
        class mylong(long): pass

        mf = myfloat(1)
        ml = mylong(1)

        for x in math.log, math.log10, math.log1p, math.asinh, math.acosh, math.atanh, math.factorial, math.trunc, math.isinf:
            try:
                resf = x(mf)
            except ValueError:
                resf = None
            try:
                resl = x(ml)
            except ValueError:
                resl = None
            self.assertEqual(resf, resl) 
Example 4
Project: tensor2tensor   Author: tensorflow   File: wiki_revision_utils.py    Apache License 2.0 6 votes vote down vote up
def include_revision(revision_num, skip_factor=1.1):
  """Decide whether to include a revision.

  If the number of revisions is large, we exclude some revisions to avoid
  a quadratic blowup in runtime, since the article is likely also large.

  We make the ratio between consecutive included revision numbers
  appproximately equal to "factor".

  Args:
    revision_num: an integer
    skip_factor: a floating point number >= 1.0

  Returns:
    a boolean
  """
  if skip_factor <= 1.0:
    return True
  return (int(math.log1p(revision_num) / math.log(skip_factor)) != int(
      math.log(revision_num + 2.0) / math.log(skip_factor))) 
Example 5
Project: BERT   Author: yyht   File: wiki_revision_utils.py    Apache License 2.0 6 votes vote down vote up
def include_revision(revision_num, skip_factor=1.1):
  """Decide whether to include a revision.

  If the number of revisions is large, we exclude some revisions to avoid
  a quadratic blowup in runtime, since the article is likely also large.

  We make the ratio between consecutive included revision numbers
  appproximately equal to "factor".

  Args:
    revision_num: an integer
    skip_factor: a floating point number >= 1.0

  Returns:
    a boolean
  """
  if skip_factor <= 1.0:
    return True
  return (int(math.log1p(revision_num) / math.log(skip_factor)) != int(
      math.log(revision_num + 2.0) / math.log(skip_factor))) 
Example 6
Project: proverbot9001   Author: UCSD-PL   File: features.py    GNU General Public License v3.0 6 votes vote down vote up
def __call__(self, context : TacticContext) -> List[float]:
        identifiers = get_symbols(context.goal)
        locallyBoundInHyps = serapi_instance.get_vars_in_hyps(context.hypotheses)
        binders = ["forall\s+(.*)(?::.*)?,",
                   "fun\s+(.*)(?::.*)?,",
                   "let\s+\S+\s+:="]
        punctuation = ["(", ")", ":", ",", "_", ":=", "=>", "{|", "|}"]
        locallyBoundInTerm = [var
                              for binder_pattern in binders
                              for varString in re.findall(binder_pattern, context.goal)
                              for var in re.findall("\((\S+)\s+:", varString)
                              if var not in punctuation]
        globallyBoundIdentifiers = \
            [ident for ident in identifiers
             if not ident in locallyBoundInHyps + locallyBoundInTerm + punctuation]
        locallyBoundIdentifiers = [ident for ident in identifiers
                                   if not ident in globallyBoundIdentifiers + punctuation]
        for var in locallyBoundInTerm:
            assert var in locallyBoundIdentifiers, \
                "{}, {}".format(globallyBoundIdentifiers, locallyBoundInTerm)
            locallyBoundIdentifiers.remove(var)
        return [math.log1p(float(len(locallyBoundIdentifiers))) ,
                # math.log1p(float(len(globallyBoundIdentifiers))),
                float(len(globallyBoundIdentifiers)) /
                float(len(globallyBoundIdentifiers) + len(locallyBoundIdentifiers))] 
Example 7
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 8
Project: RouteOptimization   Author: andre-le   File: geomath.py    MIT License 5 votes vote down vote up
def log1p(x):
    """log(1 + x) accurate for small x (missing from python 2.5.2)"""

    if sys.version_info > (2, 6):
      return math.log1p(x)

    y = 1 + x
    z = y - 1
    # Here's the explanation for this magic: y = 1 + z, exactly, and z
    # approx x, thus log(y)/z (which is nearly constant near z = 0) returns
    # a good approximation to the true log(1 + x)/x.  The multiplication x *
    # (log(y)/z) introduces little additional error.
    return x if z == 0 else x * math.log(y) / z 
Example 9
Project: RouteOptimization   Author: andre-le   File: geomath.py    MIT License 5 votes vote down vote up
def atanh(x):
    """atanh(x) (missing from python 2.5.2)"""

    if sys.version_info > (2, 6):
      return math.atanh(x)

    y = abs(x)                  # Enforce odd parity
    y = Math.log1p(2 * y/(1 - y))/2
    return -y if x < 0 else y 
Example 10
Project: NiujiaoDebugger   Author: MrSrc   File: test_math.py    GNU General Public License v3.0 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        for n in [2, 2**90, 2**300]:
            self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
        self.assertRaises(ValueError, math.log1p, -1)
        self.assertEqual(math.log1p(INF), INF) 
Example 11
Project: workload-collocation-agent   Author: intel   File: numa_allocator.py    Apache License 2.0 5 votes vote down vote up
def _get_most_used_nodes(preferences: Preferences) -> Set[int]:
    d = {}
    for node in preferences:
        d[node] = round(math.log1p(preferences[node] * 1000))
    nodes = sorted(d.items(), reverse=True, key=lambda x: x[1])
    z = nodes[0][1]
    best_nodes = {x[0] for x in nodes if x[1] == z}
    return best_nodes 
Example 12
Project: rpython-lang-scheme   Author: tomoh1r   File: rfloat.py    MIT License 5 votes vote down vote up
def asinh(x):
        "NOT_RPYTHON"
        absx = abs(x)
        if not isfinite(x):
            return x
        if absx < _2_to_m28:
            return x
        if absx > _2_to_p28:
            w = math.log(absx) + _ln2
        elif absx > 2.:
            w = math.log(2. * absx + 1. / (math.sqrt(x * x + 1.) + absx))
        else:
            t = x * x
            w = log1p(absx + t / (1. + math.sqrt(1. + t)))
        return copysign(w, x) 
Example 13
Project: rpython-lang-scheme   Author: tomoh1r   File: rfloat.py    MIT License 5 votes vote down vote up
def atanh(x):
        "NOT_RPYTHON"
        if isnan(x):
            return x
        absx = abs(x)
        if absx >= 1.:
            raise ValueError("math domain error")
        if absx < _2_to_m28:
            return x
        if absx < .5:
            t = absx + absx
            t = .5 * log1p(t + t * absx / (1. - absx))
        else:
            t = .5 * log1p((absx + absx) / (1. - absx))
        return copysign(t, x) 
Example 14
Project: rpython-lang-scheme   Author: tomoh1r   File: rfloat.py    MIT License 5 votes vote down vote up
def log1p(x):
        "NOT_RPYTHON"
        if abs(x) < DBL_EPSILON // 2.:
            return x
        elif -.5 <= x <= 1.:
            y = 1. + x
            return math.log(y) - ((y - 1.) - x) / y
        else:
            return math.log(1. + x) 
Example 15
Project: ironpython2   Author: IronLanguages   File: test_math.py    Apache License 2.0 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 16
Project: transducer   Author: awni   File: decoders.py    Apache License 2.0 5 votes vote down vote up
def log_sum_exp(a, b):
    """
    Stable log sum exp.
    """
    return max(a, b) + math.log1p(math.exp(-abs(a-b))) 
Example 17
Project: Blockly-rduino-communication   Author: technologiescollege   File: test_math.py    GNU General Public License v3.0 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 18
Project: nucleus   Author: google   File: genomics_math.py    Apache License 2.0 5 votes vote down vote up
def log10_binomial(k, n, p):
  """Calculates numerically-stable value of log10(binomial(k, n, p)).

  Returns the log10 of the binomial density for k successes in n trials where
  each success has a probability of occurring of p.

  In real-space, we would calculate:

     result = (n choose k) * (1-p)^(n-k) * p^k

  This function computes the log10 of result, which is:

     log10(result) = log10(n choose k) + (n-k) * log10(1-p) + k * log10(p)

  This is equivalent to invoking the R function:
    dbinom(x=k, size=n, prob=p, log=TRUE)

  See https://stat.ethz.ch/R-manual/R-devel/library/stats/html/Binomial.html
  for more details on the binomial.

  Args:
    k: int >= 0. Number of successes.
    n: int >= k. Number of trials.
    p: 0.0 <= float <= 1.0. Probability of success.

  Returns:
    log10 probability of seeing k successes in n trials with p.
  """
  r = math.lgamma(n + 1) - (math.lgamma(k + 1) + math.lgamma(n - k + 1))
  if k > 0:
    r += k * math.log(p)
  if n > k:
    r += (n-k) * math.log1p(-p)
  return r / LOG_E_OF_10 
Example 19
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: test_math.py    GNU General Public License v2.0 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        for n in [2, 2**90, 2**300]:
            self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
        self.assertRaises(ValueError, math.log1p, -1)
        self.assertEqual(math.log1p(INF), INF) 
Example 20
Project: oss-ftp   Author: aliyun   File: test_math.py    MIT License 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 21
Project: ufora   Author: ufora   File: MathTestCases.py    Apache License 2.0 5 votes vote down vote up
def test_pure_python_math_module(self):
        vals = [1, -.5, 1.5, 0, 0.0, -2, -2.2, .2]

        # not being tested: math.asinh, math.atanh, math.lgamma, math.erfc, math.acos
        def f():
            functions = [
                math.sqrt, math.cos, math.sin, math.tan, math.asin, math.atan,
                math.acosh, math.cosh, math.sinh, math.tanh, math.ceil,
                math.erf, math.exp, math.expm1, math.factorial, math.floor,
                math.log, math.log10, math.log1p
            ]
            tr = []
            for idx1 in range(len(vals)):
                v1 = vals[idx1]
                for funIdx in range(len(functions)):
                    function = functions[funIdx]
                    try:
                        tr = tr + [function(v1)]
                    except ValueError as ex:
                        pass

            return tr

        r1 = self.evaluateWithExecutor(f)
        r2 = f()
        self.assertGreater(len(r1), 100)
        self.assertTrue(numpy.allclose(r1, r2, 1e-6)) 
Example 22
Project: tichu-tournament   Author: aragos   File: calculator.py    MIT License 5 votes vote down vote up
def _log_rps(self, rps):
        if rps > 0:
            return math.log1p(rps)
        else: 
            return -math.log1p(-rps) 
Example 23
Project: numerai-concorde   Author: oerlikon   File: concorde.py    The Unlicense 5 votes vote down vote up
def logloss(p, t):
    return -t*log(p) - (1-t)*log1p(-p)


# Entry point. 
Example 24
Project: recsys2019   Author: logicai-io   File: accumulators.py    Apache License 2.0 5 votes vote down vote up
def get_stats(self, row, item):
        all_events_list = self.all_events_list[row["user_id"]]
        max_timestamp = row["timestamp"]
        obs = {}
        for action_type in all_events_list.keys():
            for event_num, new_row in enumerate(all_events_list[action_type][::-1][:10]):
                impressions = new_row["fake_impressions"]
                prices = new_row["fake_prices"].split("|")
                # import ipdb; ipdb.set_trace()
                if action_type == "clickout item" and event_num <= 1:
                    for rank, (item_id, price) in enumerate(zip(impressions, prices)):
                        price = int(price)
                        obs[f"co_price_{rank:02d}_{event_num:02d}"] = log1p(price)

                obs[f"{action_type}_{event_num:02d}_timestamp"] = log1p(max_timestamp - new_row["timestamp"])
                if new_row["action_type"] in ACTIONS_WITH_ITEM_REFERENCE:
                    impressions = new_row["fake_impressions"]
                    if new_row["reference"] in impressions:
                        obs[f"{action_type}_rank_{event_num:02d}"] = impressions.index(new_row["reference"]) + 1
                        obs[f"{action_type}_rank_{event_num:02d}_rel"] = item["rank"] - impressions.index(
                            new_row["reference"]
                        )

        int_events_list = self.int_events_list[row["user_id"]]
        for event_num, new_row in enumerate(int_events_list[::-1][:10]):
            obs[f"interaction_{event_num:02d}_timestamp"] = log1p(max_timestamp - new_row["timestamp"])
            impressions = new_row["fake_impressions"]
            if new_row["reference"] in impressions:
                obs[f"interaction_rank_{event_num:02d}"] = impressions.index(new_row["reference"]) + 1
                obs[f"interaction_rank_{event_num:02d}_rel"] = item["rank"] - impressions.index(new_row["reference"])

        return {"actions_tracker": json.dumps(obs)} 
Example 25
Project: sceneKit-wrapper-for-Pythonista   Author: pulbrich   File: 14_physicsDemo-2.py    MIT License 5 votes vote down vote up
def didEndContact(self, aWorld, aContact):
    big, small = (aContact.nodeA, aContact.nodeB) if aContact.nodeA.physicsBody.mass > aContact.nodeB.physicsBody.mass else (aContact.nodeB, aContact.nodeA)
    m1, m2 = big.physicsBody.mass, small.physicsBody.mass
    v1, v2 = big.physicsBody.velocity, small.physicsBody.velocity
    m = m1 + m2
    big.physicsBody.mass = m
    big.physicsBody.charge += small.physicsBody.charge
    big.geometry.radius += 0.4*math.log1p(small.geometry.radius)
    big.physicsBody.physicsShape = scn.PhysicsShape.shapeWithGeometry(big.geometry)
    big.physicsBody.continuousCollisionDetectionThreshold = 1*big.geometry.radius
    big.physicsBody.velocity = ((m1*v1.x+m2*v2.x)/(m), (m1*v1.y+m2*v2.y)/(m), (m1*v1.z+m2*v2.z)/(m))
    small.removeFromParentNode()
    self.particle_number -= 1
    self.counter_scene.counter = self.particle_number 
Example 26
Project: differential-privacy   Author: LaRiffle   File: rdp_accountant.py    MIT License 5 votes vote down vote up
def _log_add(logx, logy):
  """Add two numbers in the log space."""
  a, b = min(logx, logy), max(logx, logy)
  if a == -np.inf:  # adding 0
    return b
  # Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)
  return math.log1p(math.exp(a - b)) + b  # log1p(x) = log(x + 1) 
Example 27
Project: differential-privacy   Author: LaRiffle   File: core.py    MIT License 5 votes vote down vote up
def _log1mexp(x):
  """Numerically stable computation of log(1-exp(x))."""
  if x < -1:
    return math.log1p(-math.exp(x))
  elif x < 0:
    return math.log(-math.expm1(x))
  elif x == 0:
    return -np.inf
  else:
    raise ValueError("Argument must be non-positive.") 
Example 28
Project: differential-privacy   Author: LaRiffle   File: core.py    MIT License 5 votes vote down vote up
def compute_logq_gaussian(counts, sigma):
  """Returns an upper bound on ln Pr[outcome != argmax] for GNMax.

  Implementation of Proposition 7.

  Args:
    counts: A numpy array of scores.
    sigma: The standard deviation of the Gaussian noise in the GNMax mechanism.

  Returns:
    logq: Natural log of the probability that outcome is different from argmax.
  """
  n = len(counts)
  variance = sigma**2
  idx_max = np.argmax(counts)
  counts_normalized = counts[idx_max] - counts
  counts_rest = counts_normalized[np.arange(n) != idx_max]  # exclude one index
  # Upper bound q via a union bound rather than a more precise calculation.
  logq = _logaddexp(
      scipy.stats.norm.logsf(counts_rest, scale=math.sqrt(2 * variance)))

  # A sketch of a more accurate estimate, which is currently disabled for two
  # reasons:
  # 1. Numerical instability;
  # 2. Not covered by smooth sensitivity analysis.
  # covariance = variance * (np.ones((n - 1, n - 1)) + np.identity(n - 1))
  # logq = np.log1p(-statsmodels.sandbox.distributions.extras.mvnormcdf(
  #     counts_rest, np.zeros(n - 1), covariance, maxpts=1e4))

  return min(logq, math.log(1 - (1 / n))) 
Example 29
Project: differential-privacy   Author: LaRiffle   File: core.py    MIT License 5 votes vote down vote up
def rdp_pure_eps(logq, pure_eps, orders):
  """Computes the RDP value given logq and pure privacy eps.

  Implementation of https://arxiv.org/abs/1610.05755, Theorem 3.

  The bound used is the min of three terms. The first term is from
  https://arxiv.org/pdf/1605.02065.pdf.
  The second term is based on the fact that when event has probability (1-q) for
  q close to zero, q can only change by exp(eps), which corresponds to a
  much smaller multiplicative change in (1-q)
  The third term comes directly from the privacy guarantee.

  Args:
    logq: Natural logarithm of the probability of a non-optimal outcome.
    pure_eps: eps parameter for DP
    orders: array_like list of moments to compute.

  Returns:
    Array of upper bounds on rdp (a scalar if orders is a scalar).
  """
  orders_vec = np.atleast_1d(orders)
  q = math.exp(logq)
  log_t = np.full_like(orders_vec, np.inf)
  if q <= 1 / (math.exp(pure_eps) + 1):
    logt_one = math.log1p(-q) + (
        math.log1p(-q) - _log1mexp(pure_eps + logq)) * (
            orders_vec - 1)
    logt_two = logq + pure_eps * (orders_vec - 1)
    log_t = np.logaddexp(logt_one, logt_two)

  ret = np.minimum(
      np.minimum(0.5 * pure_eps * pure_eps * orders_vec,
                 log_t / (orders_vec - 1)), pure_eps)
  if np.isscalar(orders):
    return np.asscalar(ret)
  else:
    return ret 
Example 30
Project: proverbot9001   Author: UCSD-PL   File: features.py    GNU General Public License v3.0 5 votes vote down vote up
def __call__(self, context : TacticContext) -> List[float]:
        return [math.log1p(float(len(context.hypotheses)))] 
Example 31
Project: ancile   Author: ancile-project   File: rdp_accountant.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def _log_add(logx, logy):
  """Add two numbers in the log space."""
  a, b = min(logx, logy), max(logx, logy)
  if a == -np.inf:  # adding 0
    return b
  # Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)
  return math.log1p(math.exp(a - b)) + b  # log1p(x) = log(x + 1) 
Example 32
Project: robot_path_planning   Author: y4h2   File: khepera3.py    MIT License 5 votes vote down vote up
def get_ir_distances(self):
        """Converts the IR distance readings into a distance in meters"""
        
        ir_distances = [ \
            max( min( (log1p(3960) - log1p(reading))/30 + 
                       self.robot.ir_sensors.rmin,
                      self.robot.ir_sensors.rmax),
                 self.robot.ir_sensors.rmin)
            for reading in self.robot.ir_sensors.readings ]

        return ir_distances 
Example 33
Project: lopocs   Author: Oslandia   File: utils.py    GNU Lesser General Public License v2.1 5 votes vote down vote up
def compute_scale_for_cesium(coordmin, coordmax):
    '''
    Cesium quantized positions need to be in uint16
    This function computes the best scale to apply to coordinates
    to fit the range [0, 65535]
    '''
    max_int = np.iinfo(np.uint16).max
    delta = abs(coordmax - coordmin)
    scale = 10 ** -(math.floor(math.log1p(max_int / delta) / math.log1p(10)))
    return scale 
Example 34
Project: ml-privacy-csf18   Author: samuel-yeom   File: logprob.py    MIT License 5 votes vote down vote up
def __add__(self, other):
        if self < other:
            return other + self
        else:
            if self.logprob == Logprob.ninf: #self.value == other.value == 0
                return Logprob(0)
            else: #formula on Wikipedia page for "Log probability" (as of 9 Oct 2016)
                logprob = self.logprob
                logprob += math.log1p(math.exp(other.logprob - self.logprob))
                if logprob > 0:
                    if logprob <= Logprob.tolerance: #float arithmetic rounding error
                        logprob = 0
                    else:
                        raise ValueError('Logprob addition resulted in probability {:.20f}'.format(math.exp(logprob)))
                return Logprob(logprob, True) 
Example 35
Project: osmo.sessdsa   Author: chbpku   File: F-Alpha.py    GNU General Public License v3.0 5 votes vote down vote up
def effect_eject_time(self, a, b):
        ra = a.radius
        rb = b.radius
        if a.radius <= b.radius:
            return 0
        # 喷完还要大
        n1 = math.floor(2 * math.log1p(rb / ra - 1) / math.log1p(-Consts['EJECT_MASS_RATIO']))
        # 喷完要有收益
        n2 = math.floor(math.log1p(- rb ** 2 / ra ** 2) / math.log1p(-Consts['EJECT_MASS_RATIO']))
        n = min(n1, n2, 20)  # 强制限定到15
        nlist = list(range(0, n + 1))
        return nlist

    # 给出一个合理一些的追击范围,不然0-2pi搜索太困难:速度方向和相切的线的夹角吧 
Example 36
Project: bitcoin-turing-machine   Author: veqtrus   File: math.py    GNU General Public License v3.0 5 votes vote down vote up
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1 
Example 37
Project: titus2   Author: animator   File: dist.py    Apache License 2.0 5 votes vote down vote up
def PDF(self,x):
        if (x <= 0.0) or (x >= 1.0):
            return 0.0
        else:
            logX = math.log(x)
            if (x < 0.0) and (x > 0.0):
                return 0.0
            log1mX = math.log1p(-x)
            ret = math.exp((self.alpha - 1.0) * logX + (self.beta - 1.0) \
                  * log1mX - self.Z)
            return ret 
Example 38
Project: Note-refinement-   Author: nkundiushuti   File: lazy_math.py    GNU General Public License v3.0 5 votes vote down vote up
def log1p(x):
  if x == -1:
    return -inf
  elif isinstance(x, complex) or x < -1:
    return cmath.log(1 + x)
  else:
    return math.log1p(x) 
Example 39
Project: PyFlow   Author: wonderworks-software   File: MathLib.py    Apache License 2.0 5 votes vote down vote up
def log1p(x=('FloatPin', 1.0), result=(REF, ('BoolPin', False))):
        '''Return the natural logarithm of `1+x` (base e). The result is calculated in a way which is accurate for `x` near zero.'''
        try:
            result(True)
            return math.log1p(x)
        except:
            result(False)
            return -1 
Example 40
Project: m-stp   Author: MukeunKim   File: core.py    MIT License 5 votes vote down vote up
def log1p(x):
    if isinstance(x, LineValue):
        lx = x.get_value()
    else:
        lx = x
    if lx == NAN: return LineValue(NAN)
    return LineValue(math.log1p(lx)) 
Example 41
Project: privacy   Author: tensorflow   File: core.py    Apache License 2.0 5 votes vote down vote up
def _log1mexp(x):
  """Numerically stable computation of log(1-exp(x))."""
  if x < -1:
    return math.log1p(-math.exp(x))
  elif x < 0:
    return math.log(-math.expm1(x))
  elif x == 0:
    return -np.inf
  else:
    raise ValueError("Argument must be non-positive.") 
Example 42
Project: privacy   Author: tensorflow   File: core.py    Apache License 2.0 5 votes vote down vote up
def compute_logq_gaussian(counts, sigma):
  """Returns an upper bound on ln Pr[outcome != argmax] for GNMax.

  Implementation of Proposition 7.

  Args:
    counts: A numpy array of scores.
    sigma: The standard deviation of the Gaussian noise in the GNMax mechanism.

  Returns:
    logq: Natural log of the probability that outcome is different from argmax.
  """
  n = len(counts)
  variance = sigma**2
  idx_max = np.argmax(counts)
  counts_normalized = counts[idx_max] - counts
  counts_rest = counts_normalized[np.arange(n) != idx_max]  # exclude one index
  # Upper bound q via a union bound rather than a more precise calculation.
  logq = _logaddexp(
      scipy.stats.norm.logsf(counts_rest, scale=math.sqrt(2 * variance)))

  # A sketch of a more accurate estimate, which is currently disabled for two
  # reasons:
  # 1. Numerical instability;
  # 2. Not covered by smooth sensitivity analysis.
  # covariance = variance * (np.ones((n - 1, n - 1)) + np.identity(n - 1))
  # logq = np.log1p(-statsmodels.sandbox.distributions.extras.mvnormcdf(
  #     counts_rest, np.zeros(n - 1), covariance, maxpts=1e4))

  return min(logq, math.log(1 - (1 / n))) 
Example 43
Project: privacy   Author: tensorflow   File: core.py    Apache License 2.0 5 votes vote down vote up
def rdp_pure_eps(logq, pure_eps, orders):
  """Computes the RDP value given logq and pure privacy eps.

  Implementation of https://arxiv.org/abs/1610.05755, Theorem 3.

  The bound used is the min of three terms. The first term is from
  https://arxiv.org/pdf/1605.02065.pdf.
  The second term is based on the fact that when event has probability (1-q) for
  q close to zero, q can only change by exp(eps), which corresponds to a
  much smaller multiplicative change in (1-q)
  The third term comes directly from the privacy guarantee.

  Args:
    logq: Natural logarithm of the probability of a non-optimal outcome.
    pure_eps: eps parameter for DP
    orders: array_like list of moments to compute.

  Returns:
    Array of upper bounds on rdp (a scalar if orders is a scalar).
  """
  orders_vec = np.atleast_1d(orders)
  q = math.exp(logq)
  log_t = np.full_like(orders_vec, np.inf)
  if q <= 1 / (math.exp(pure_eps) + 1):
    logt_one = math.log1p(-q) + (
        math.log1p(-q) - _log1mexp(pure_eps + logq)) * (
            orders_vec - 1)
    logt_two = logq + pure_eps * (orders_vec - 1)
    log_t = np.logaddexp(logt_one, logt_two)

  ret = np.minimum(
      np.minimum(0.5 * pure_eps * pure_eps * orders_vec,
                 log_t / (orders_vec - 1)), pure_eps)
  if np.isscalar(orders):
    return np.asscalar(ret)
  else:
    return ret 
Example 44
Project: privacy   Author: tensorflow   File: rdp_accountant.py    Apache License 2.0 5 votes vote down vote up
def _log_add(logx, logy):
  """Add two numbers in the log space."""
  a, b = min(logx, logy), max(logx, logy)
  if a == -np.inf:  # adding 0
    return b
  # Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)
  return math.log1p(math.exp(a - b)) + b  # log1p(x) = log(x + 1) 
Example 45
Project: fava   Author: beancount   File: ranking.py    MIT License 5 votes vote down vote up
def update(self, item, date):
        """Add 'like' for item.

        Args:
            item: An item in the list that is being ranked.
            date: The date on which the item has been liked.
        """
        score = self.get(item)
        time = date.toordinal()
        higher = max(score, time * self.rate)
        lower = min(score, time * self.rate)
        self.scores[item] = higher + math.log1p(math.exp(lower - higher)) 
Example 46
Project: mici   Author: matt-graham   File: utils.py    MIT License 5 votes vote down vote up
def log1p_exp(val):
    """Numerically stable implementation of `log(1 + exp(val))`."""
    if val > 0.:
        return val + log1p(exp(-val))
    else:
        return log1p(exp(val)) 
Example 47
Project: mici   Author: matt-graham   File: utils.py    MIT License 5 votes vote down vote up
def log1m_exp(val):
    """Numerically stable implementation of `log(1 - exp(val))`."""
    if val >= 0.:
        return nan
    elif val > LOG_2:
        return log(-expm1(val))
    else:
        return log1p(-exp(a)) 
Example 48
Project: tensorflow-rl   Author: steveKapturowski   File: cts.py    Apache License 2.0 5 votes vote down vote up
def log_add(log_x, log_y):
    """Given log x and log y, returns log(x + y)."""
    # Swap variables so log_y is larger.
    if log_x > log_y:
        log_x, log_y = log_y, log_x

    # Use the log(1 + e^p) trick to compute this efficiently
    # If the difference is large enough, this is effectively log y.
    delta = log_y - log_x
    return math.log1p(math.exp(delta)) + log_x if delta <= 50.0 else log_y 
Example 49
Project: Project-New-Reign---Nemesis-Main   Author: ShikyoKira   File: test_math.py    GNU General Public License v3.0 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 50
Project: cells   Author: AlesTsurko   File: test_math.py    MIT License 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        for n in [2, 2**90, 2**300]:
            self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
        self.assertRaises(ValueError, math.log1p, -1)
        self.assertEqual(math.log1p(INF), INF) 
Example 51
Project: qgis-shapetools-plugin   Author: NationalSecurityAgency   File: geomath.py    GNU General Public License v2.0 5 votes vote down vote up
def log1p(x):
    """log(1 + x) accurate for small x (missing from python 2.5.2)"""

    if sys.version_info > (2, 6):
      return math.log1p(x)

    y = 1 + x
    z = y - 1
    # Here's the explanation for this magic: y = 1 + z, exactly, and z
    # approx x, thus log(y)/z (which is nearly constant near z = 0) returns
    # a good approximation to the true log(1 + x)/x.  The multiplication x *
    # (log(y)/z) introduces little additional error.
    return x if z == 0 else x * math.log(y) / z 
Example 52
Project: qgis-shapetools-plugin   Author: NationalSecurityAgency   File: geomath.py    GNU General Public License v2.0 5 votes vote down vote up
def atanh(x):
    """atanh(x) (missing from python 2.5.2)"""

    if sys.version_info > (2, 6):
      return math.atanh(x)

    y = abs(x)                  # Enforce odd parity
    y = Math.log1p(2 * y/(1 - y))/2
    return -y if x < 0 else y 
Example 53
Project: data_algebra   Author: WinVector   File: SQLite.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def prepare_connection(self, conn):
        # https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection.create_function
        conn.create_function("is_bad", 1, _check_scalar_bad)
        # math fns
        conn.create_function("acos", 1, math.acos)
        conn.create_function("acosh", 1, math.acosh)
        conn.create_function("asin", 1, math.asin)
        conn.create_function("asinh", 1, math.asinh)
        conn.create_function("atan", 1, math.atan)
        conn.create_function("atanh", 1, math.atanh)
        conn.create_function("ceil", 1, math.ceil)
        conn.create_function("cos", 1, math.cos)
        conn.create_function("cosh", 1, math.cosh)
        conn.create_function("degrees", 1, math.degrees)
        conn.create_function("erf", 1, math.erf)
        conn.create_function("erfc", 1, math.erfc)
        conn.create_function("exp", 1, math.exp)
        conn.create_function("expm1", 1, math.expm1)
        conn.create_function("fabs", 1, math.fabs)
        conn.create_function("factorial", 1, math.factorial)
        conn.create_function("floor", 1, math.floor)
        conn.create_function("frexp", 1, math.frexp)
        conn.create_function("gamma", 1, math.gamma)
        conn.create_function("isfinite", 1, math.isfinite)
        conn.create_function("isinf", 1, math.isinf)
        conn.create_function("isnan", 1, math.isnan)
        conn.create_function("lgamma", 1, math.lgamma)
        conn.create_function("log", 1, math.log)
        conn.create_function("log10", 1, math.log10)
        conn.create_function("log1p", 1, math.log1p)
        conn.create_function("log2", 1, math.log2)
        conn.create_function("modf", 1, math.modf)
        conn.create_function("radians", 1, math.radians)
        conn.create_function("sin", 1, math.sin)
        conn.create_function("sinh", 1, math.sinh)
        conn.create_function("sqrt", 1, math.sqrt)
        conn.create_function("tan", 1, math.tan)
        conn.create_function("tanh", 1, math.tanh)
        conn.create_function("trunc", 1, math.trunc)
        conn.create_function("atan2", 2, math.atan2)
        conn.create_function("copysign", 2, math.copysign)
        conn.create_function("fmod", 2, math.fmod)
        conn.create_function("gcd", 2, math.gcd)
        conn.create_function("hypot", 2, math.hypot)
        conn.create_function("isclose", 2, math.isclose)
        conn.create_function("ldexp", 2, math.ldexp)
        conn.create_function("pow", 2, math.pow) 
Example 54
Project: data_algebra   Author: WinVector   File: SQLite.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def prepare_connection(self, conn):
        # https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection.create_function
        conn.create_function("is_bad", 1, _check_scalar_bad)
        # math fns
        conn.create_function("acos", 1, math.acos)
        conn.create_function("acosh", 1, math.acosh)
        conn.create_function("asin", 1, math.asin)
        conn.create_function("asinh", 1, math.asinh)
        conn.create_function("atan", 1, math.atan)
        conn.create_function("atanh", 1, math.atanh)
        conn.create_function("ceil", 1, math.ceil)
        conn.create_function("cos", 1, math.cos)
        conn.create_function("cosh", 1, math.cosh)
        conn.create_function("degrees", 1, math.degrees)
        conn.create_function("erf", 1, math.erf)
        conn.create_function("erfc", 1, math.erfc)
        conn.create_function("exp", 1, math.exp)
        conn.create_function("expm1", 1, math.expm1)
        conn.create_function("fabs", 1, math.fabs)
        conn.create_function("factorial", 1, math.factorial)
        conn.create_function("floor", 1, math.floor)
        conn.create_function("frexp", 1, math.frexp)
        conn.create_function("gamma", 1, math.gamma)
        conn.create_function("isfinite", 1, math.isfinite)
        conn.create_function("isinf", 1, math.isinf)
        conn.create_function("isnan", 1, math.isnan)
        conn.create_function("lgamma", 1, math.lgamma)
        conn.create_function("log", 1, math.log)
        conn.create_function("log10", 1, math.log10)
        conn.create_function("log1p", 1, math.log1p)
        conn.create_function("log2", 1, math.log2)
        conn.create_function("modf", 1, math.modf)
        conn.create_function("radians", 1, math.radians)
        conn.create_function("sin", 1, math.sin)
        conn.create_function("sinh", 1, math.sinh)
        conn.create_function("sqrt", 1, math.sqrt)
        conn.create_function("tan", 1, math.tan)
        conn.create_function("tanh", 1, math.tanh)
        conn.create_function("trunc", 1, math.trunc)
        conn.create_function("atan2", 2, math.atan2)
        conn.create_function("copysign", 2, math.copysign)
        conn.create_function("fmod", 2, math.fmod)
        conn.create_function("gcd", 2, math.gcd)
        conn.create_function("hypot", 2, math.hypot)
        conn.create_function("isclose", 2, math.isclose)
        conn.create_function("ldexp", 2, math.ldexp)
        conn.create_function("pow", 2, math.pow) 
Example 55
Project: ssHMM   Author: eldariont   File: seqstructhmm.py    GNU General Public License v3.0 4 votes vote down vote up
def calculate_information_contents_1000_sequences(self):
        """Calculate the information contents based on the 1000 best sequences"""
        best1000 = self.get_best_n_sequences(1000)

        sequenceCounters = [Counter() for i in range(self.motif_length)]
        structureCounters = [Counter() for i in range(self.motif_length)]
        combinedCounters = [Counter() for i in range(self.motif_length)]

        for (sequence, structure) in best1000:
            for position in range(self.motif_length):
                sequenceCounters[position][sequence[position]] += 1
                structureCounters[position][structure[position]] += 1
                combinedCounters[position][sequence[position] + str(structure[position])] += 1

        informationContentSequence = []
        informationContentStructure = []
        informationContentCombined = []
        for position in range(self.motif_length):
            sequenceCounterSum = float(sum(sequenceCounters[position].values()))
            structureCounterSum = float(sum(structureCounters[position].values()))
            combinedCounterSum = float(sum(combinedCounters[position].values()))

            shannonEntropy = 0
            for value in sequenceCounters[position].values():
                valueFraction = value / sequenceCounterSum
                shannonEntropy -= valueFraction * math.log(valueFraction, 2)
            smallSampleCorrection = 3.0 / (math.log1p(2) * 2 * 1000)
            informationContentSequence.append(2 - (shannonEntropy + smallSampleCorrection))

            shannonEntropy = 0
            for value in structureCounters[position].values():
                valueFraction = value / structureCounterSum
                shannonEntropy -= valueFraction * math.log(valueFraction, 2)
            smallSampleCorrection = 4.0 / (math.log1p(2) * 2 * 1000)
            informationContentStructure.append(math.log(5, 2) - (shannonEntropy + smallSampleCorrection))

            shannonEntropy = 0
            for value in combinedCounters[position].values():
                valueFraction = value / combinedCounterSum
                shannonEntropy -= valueFraction * math.log(valueFraction, 2)
            smallSampleCorrection = 19.0 / (math.log1p(2) * 2 * 1000)
            informationContentCombined.append(math.log(20, 2) - (shannonEntropy + smallSampleCorrection))

        return (informationContentSequence, informationContentStructure, informationContentCombined) 
Example 56
Project: SoCDep2   Author: siavooshpayandehazad   File: SimulatedAnnealing.py    GNU General Public License v2.0 4 votes vote down vote up
def next_temp(initial_temp, iteration, max_iteration, current_temp, slope=None, standard_deviation=None):
    if Config.SA_AnnealingSchedule == 'Linear':
        temp = (float(max_iteration-iteration)/max_iteration)*initial_temp
        print("\033[36m* COOLING::\033[0m CURRENT TEMP: "+str(temp))
#   ----------------------------------------------------------------
    elif Config.SA_AnnealingSchedule == 'Exponential':
        temp = current_temp * Config.SA_Alpha
        print("\033[36m* COOLING::\033[0m CURRENT TEMP: "+str(temp))
#   ----------------------------------------------------------------
    elif Config.SA_AnnealingSchedule == 'Logarithmic':
        # this is based on "A comparison of simulated annealing cooling strategies"
        # by Yaghout Nourani and Bjarne Andresen
        temp = Config.LogCoolingConstant * (1.0/log10(1+(iteration+1)))     # iteration should be > 1 so I added 1
        print("\033[36m* COOLING::\033[0m CURRENT TEMP: "+str(temp))
#   ----------------------------------------------------------------
    elif Config.SA_AnnealingSchedule == 'Adaptive':
        temp = current_temp
        if iteration > Config.CostMonitorQueSize:
            if 0 < slope < Config.SlopeRangeForCooling:
                temp = current_temp * Config.SA_Alpha
                print("\033[36m* COOLING::\033[0m CURRENT TEMP: "+str(temp))
#   ----------------------------------------------------------------
    elif Config.SA_AnnealingSchedule == 'Markov':
        temp = initial_temp - (iteration/Config.MarkovNum)*Config.MarkovTempStep
        if temp < current_temp:
            print("\033[36m* COOLING::\033[0m CURRENT TEMP: "+str(temp))
        if temp <= 0:
            temp = current_temp
#   ----------------------------------------------------------------
    elif Config.SA_AnnealingSchedule == 'Aart':
        # This is coming from the following paper:
        # Job Shop Scheduling by Simulated Annealing Author(s): Peter J. M. van Laarhoven,
        # Emile H. L. Aarts, Jan Karel Lenstra
        if iteration % Config.CostMonitorQueSize == 0 and standard_deviation is not None and standard_deviation != 0:
            temp = float(current_temp)/(1+(current_temp*(log1p(Config.Delta)/standard_deviation)))
            print("\033[36m* COOLING::\033[0m CURRENT TEMP: "+str(temp))
        elif standard_deviation == 0:
            temp = float(current_temp)*Config.SA_Alpha
            print("\033[36m* COOLING::\033[0m CURRENT TEMP: "+str(temp))
        else:
            temp = current_temp
#   ----------------------------------------------------------------
    elif Config.SA_AnnealingSchedule == 'Huang':
        if standard_deviation is not None and standard_deviation != 0:
            temp = float(current_temp)/(1+(current_temp*(log1p(Config.Delta)/standard_deviation)))
            print("\033[36m* COOLING::\033[0m CURRENT TEMP: "+str(temp))
        elif standard_deviation == 0:
            temp = float(current_temp)*Config.SA_Alpha
            print("\033[36m* COOLING::\033[0m CURRENT TEMP: "+str(temp))
        else:
            temp = current_temp
#   ----------------------------------------------------------------
    else:
        raise ValueError('Invalid Cooling Method for SA...')
    return temp 
Example 57
Project: CovertMark   Author: icydoge   File: strategy.py    MIT License 4 votes vote down vote up
def _score_performance_stats(self):
        """
        Based on the execution time, TPR, and FPR of strategy runs, score the
        effectiveness of this strategy in identifying the input PT.

        :returns: a floating point score between 0 and 100 for this strategy,
            and the config underwhich this was achieved.
        """

        # Filter out records yielding unacceptable TPR or FPR values.
        acceptables = list(filter(lambda x: x[1]['TPR'] >= constants.TPR_BOUNDARY \
         and x[1]['FPR'] <= constants.FPR_BOUNDARY and all(x[1]), self._time_statistics.items()))
        acceptable_runs = [i[1] for i in acceptables]
        acceptable_configs = [i[0] for i in acceptables]

        # If invalid values or no acceptable runs, this strategy scores zero.
        if len(acceptable_runs) < 1:
            return 0, None

        for i in acceptable_runs:
            if not (0 <= i['TPR'] <= 1) or not (0 <= i['FPR'] <= 1):
                return 0, None

        # Penalise runs for their differences from best TPR/FPR and time values.
        best_tpr = max([i['TPR'] for i in acceptable_runs])
        worst_time = max([i['time'] for i in acceptable_runs])
        scaled_times = [i['time'] / worst_time for i in acceptable_runs]
        best_scaled_time = min(scaled_times)

        tpr_penalties = [log1p((best_tpr - i['TPR'])*100) for i in acceptable_runs]
        fpr_penalties = [log1p((max(0, i['FPR'] - constants.FPR_TARGET))*100) for i in acceptable_runs] # Hard target for FPR.
        time_penalties = [log1p((i - best_scaled_time)*100) for i in scaled_times]

        # For IP falsely blocked rate, penalise from zero.
        block_rate_penalties = [log1p(i['block_rate']*100) for i in acceptable_runs]

        # Calculate weighted penalties across all metrics.
        overall_penalties = []
        for i in range(len(tpr_penalties)):
            overall_penalties.append(tpr_penalties[i] * constants.PENALTY_WEIGHTS[0] + \
                                     fpr_penalties[i] * constants.PENALTY_WEIGHTS[1] + \
                                     time_penalties[i] * constants.PENALTY_WEIGHTS[2] + \
                                     block_rate_penalties[i] * constants.PENALTY_WEIGHTS[3])

        # Now find out the minimum penalty required to reach the acceptable
        # TPR and FPR performance, and calculate the scores accordingly.
        scores = [(log1p(100) - i) / log1p(100) * 100 for i in overall_penalties]

        # Apply strategy-specific penalisation.
        strategy_penalised_scores = []
        for i, score in enumerate(scores):
            # Clip the penalty proportion to between 0 and 1.
            strategy_penalty = sorted([0, self.config_specific_penalisation(acceptable_configs[i]), 1])[1]
            strategy_penalised_scores.append(score * (1-strategy_penalty))

        best_score = max(strategy_penalised_scores)
        best_config = acceptable_configs[strategy_penalised_scores.index(max(strategy_penalised_scores))]

        return best_score, best_config 
Example 58
Project: razer   Author: rossgoodwin   File: razer_rgb.py    MIT License 4 votes vote down vote up
def r_wipe(count=5, r_color=(255,0,0), bg_color=(0,255,0), twinkle=True, line=False):

    def make_random_color(dominant_ix, variety):
        init_rgb = sample(range(variety)*3, 3)
        init_rgb[dominant_ix] = choice(range(256-variety,256))
        return tuple(init_rgb)

    if line:
        starting_pt = 0
        line_variance = 64
    else:
        starting_pt = -3
        line_variance = 1

    for interval in range(count):
        for i in range(starting_pt, COL_COUNT):
            if twinkle:
                dominant_bg_ix = bg_color.index(max(bg_color))
                rgb_list = [
                    make_random_color(dominant_bg_ix, 100) for y in range(ROW_COUNT*COL_COUNT)
                ]
            else:
                rgb_list = [bg_color]*ROW_COUNT*COL_COUNT

            # if i >= 0:
            rl = [i+x for x in range(0,ROW_COUNT*COL_COUNT,16)]

            if not line:
                if -3 <= i <= -1:
                    to_add = [ rl[0]+3, rl[1]+3, rl[4]+3, rl[5]+3 ]
                elif -2 <= i <= -1:
                    to_add = [ rl[0]+2, rl[0]+3, rl[1]+3, rl[2]+2, rl[3]+2, rl[4]+2, rl[4]+3, rl[5]+3 ]
                elif -1 <= i <= 12:
                    to_add = [ rl[0]+1, rl[0]+2, rl[0]+3, rl[1]+3, rl[2]+1, rl[2]+2, rl[3]+2, rl[4]+2, rl[4]+3, rl[5]+3 ]
                elif -1 <= i <= 13:
                    to_add = [ rl[0]+1, rl[0]+2, rl[2]+1, rl[2]+2, rl[3]+2, rl[4]+2 ]
                elif -1 <= i <= 14:
                    to_add = [ rl[0]+1, rl[2]+1 ]

                if rl < 0:
                    rl = to_add
                else:
                    rl.extend(to_add)

            dominant_r_ix = r_color.index(max(r_color))
            for ix in rl:
                rgb_list[ix] = make_random_color(dominant_r_ix, line_variance)

            set_keyboard_rgb(rgb_list)

            sleep( log1p( 0.3 / ( abs(6-i)+1 ) ) ) 
Example 59
Project: topical_word_embeddings   Author: thunlp   File: tfidfmodel.py    MIT License 4 votes vote down vote up
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass 
Example 60
Project: topical_word_embeddings   Author: thunlp   File: tfidfmodel.py    MIT License 4 votes vote down vote up
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass 
Example 61
Project: topical_word_embeddings   Author: thunlp   File: tfidfmodel.py    MIT License 4 votes vote down vote up
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass