Python math.log10() Examples

The following are code examples for showing how to use math.log10(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: pyblish-win   Author: pyblish   File: test_long.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_logs(self):
        LOG10E = math.log10(math.e)

        for exp in range(10) + [100, 1000, 10000]:
            value = 10 ** exp
            log10 = math.log10(value)
            self.assertAlmostEqual(log10, exp)

            # log10(value) == exp, so log(value) == log10(value)/log10(e) ==
            # exp/LOG10E
            expected = exp / LOG10E
            log = math.log(value)
            self.assertAlmostEqual(log, expected)

        for bad in -(1L << 10000), -2L, 0L:
            self.assertRaises(ValueError, math.log, bad)
            self.assertRaises(ValueError, math.log10, bad) 
Example 2
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: super_resolution.py    Apache License 2.0 6 votes vote down vote up
def test(ctx):
    val_data.reset()
    avg_psnr = 0
    batches = 0
    for batch in val_data:
        batches += 1
        data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
        label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
        outputs = []
        for x in data:
            outputs.append(net(x))
        metric.update(label, outputs)
        avg_psnr += 10 * math.log10(1/metric.get()[1])
        metric.reset()
    avg_psnr /= batches
    print('validation avg psnr: %f'%avg_psnr) 
Example 3
Project: aurora   Author: carnby   File: tasks.py    MIT License 6 votes vote down vote up
def select_tweets(timeline, allow_rts=True, allow_replies=False, popular_only=True):
    texts = []

    for t in timeline:
        if not 'retweeted_status' in t:
            if not allow_replies and t['in_reply_to_status_id_str']:
                continue
            t['tweet_score'] = log(t['retweet_count'] + 1.0) + log(t['favorite_count'] + 1.0)
            t['__is_rt__'] = False
            texts.append(t)
        else:
            if allow_rts:
                t['retweeted_status']['tweet_score'] = log10(t['retweet_count'] + 1.0) + log10(t['favorite_count'] + 1.0)
                t['retweeted_status']['source_created_at'] = t['retweeted_status']['created_at']
                t['retweeted_status']['created_at'] = t['created_at']
                t['retweeted_status']['text'] = t['retweeted_status']['text']
                t['retweeted_status']['__is_rt__'] = True
                texts.append(t['retweeted_status'])

    #texts = sorted(texts, key=lambda x: x['tweet_score'], reverse=True)[0:100]
    if popular_only:
        texts = list(filter(lambda x: x['tweet_score'] > 0, texts))

    return texts 
Example 4
Project: xia2   Author: xia2   File: DialsScaler.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def split_experiments(self, experiment, reflection, sweep_handler):
        """Split a multi-experiment dataset into individual datasets and set in the
        sweep handler."""
        splitter = SplitExperiments()
        splitter.add_experiments(experiment)
        splitter.add_reflections(reflection)
        splitter.set_working_directory(self.get_working_directory())
        auto_logfiler(splitter)
        splitter.run()

        nn = len(sweep_handler.get_epochs())
        fmt = "%%0%dd" % (math.log10(nn) + 1)

        for i, epoch in enumerate(sweep_handler.get_epochs()):
            si = sweep_handler.get_sweep_information(epoch)
            nums = fmt % i
            si.set_reflections(
                os.path.join(self.get_working_directory(), "split_%s.refl" % nums)
            )
            si.set_experiments(
                os.path.join(self.get_working_directory(), "split_%s.expt" % nums)
            )
        return sweep_handler 
Example 5
Project: pymoku   Author: liquidinstruments   File: _specan.py    MIT License 6 votes vote down vote up
def _compensation_singen(self):
        # Increase sinewave amplitude as span is reduced. Don't consider
        # spans less than 1 kHz
        span = max(self.f2 - self.f1, 1.0e3)
        self.demod_sinegen_bitshift = max(
            min(17 - math.floor(2.4 * math.log10(span)), 7), 0)
        self.demod_sinegen_enable = (span < 2.0e6) and (
            self.demod_sinegen_bitshift != 0)

        # Phase dither to broaden sinewave peak to ~512 FFT points
        self.demod_phase_bitshift = round(
            14.0 - 0.58 * math.log(1.0e6 / span, 2))

        # Sinewave frequency. Place at 1.9 screens from DC. Need to correct
        # for phase dither offset
        fbin_resolution = _SA_ADC_SMPS / 2.0 / _SA_FFT_LENGTH / (
            self._total_decimation)
        desired_frequency = fbin_resolution * _SA_FFT_LENGTH * 0.475
        phase_step = min(round(desired_frequency / _SA_DAC_SMPS * 2 ** 32),
                         2 ** 32)
        self.demod_sinegen_freq = phase_step - round(
            31.0 / 32.0 * 2 ** (self.demod_phase_bitshift + 4)) 
Example 6
Project: ns3   Author: KyleBenson   File: hud.py    GNU General Public License v2.0 6 votes vote down vote up
def _compute_divisions(self, xi, xf):
        assert xf > xi
        dx = xf - xi
        size = dx
        ndiv = 5
        text_width = dx/ndiv/2

        def rint(x):
            return math.floor(x+0.5)
        
        dx_over_ndiv = dx / ndiv
        for n in range(5): # iterate 5 times to find optimum division size
            #/* div: length of each division */
            tbe = math.log10(dx_over_ndiv)#;   /* looking for approx. 'ndiv' divisions in a length 'dx' */
            div = pow(10, rint(tbe))#;	/* div: power of 10 closest to dx/ndiv */
            if math.fabs(div/2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv): #/* test if div/2 is closer to dx/ndiv */
                div /= 2
            elif math.fabs(div*2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv):
                div *= 2 #			/* test if div*2 is closer to dx/ndiv */
            x0 = div*math.ceil(xi / div) - div
            if n > 1:
                ndiv = rint(size / text_width)
        return x0, div 
Example 7
Project: geophys_utils   Author: GeoscienceAustralia   File: aseg_gdf_utils.py    Apache License 2.0 6 votes vote down vote up
def dfrexp(f):
    '''
    Decimal version of frexp or np.frexp function to return mantissa & exponent
    @param f: Floating point scalar or array
    @return fman: Scalar or array decimal mantissa between 0.0 and 1.0 
    @return fexp: Scalar or array decimal exponent
    '''
    # Compute decimal exponent
    if type(f) == np.ndarray:
        fexp = np.zeros(shape=f.shape, dtype='int32')
        fexp[f != 0] = np.ceil(np.log10(np.abs(f[f != 0]))).astype('int32')
    else: # Scalar
        fexp = int(ceil(log10(abs(f)))) if f != 0 else 0
            
    # Compute decimal mantissa between 0.0 and 1.0
    fman = f/10.0**fexp
    
    logger.debug('fman: {}'.format(fman))
    logger.debug('fexp: {}'.format(fexp))
    
    return fman, fexp


# Approximate maximum number of significant decimal figures for each signed datatype 
Example 8
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 6 votes vote down vote up
def _set_orderOfMagnitude(self, range):
        # if scientific notation is to be used, find the appropriate exponent
        # if using an numerical offset, find the exponent after applying the
        # offset
        if not self._scientific:
            self.orderOfMagnitude = 0
            return
        locs = np.absolute(self.locs)
        if self.offset:
            oom = math.floor(math.log10(range))
        else:
            if locs[0] > locs[-1]:
                val = locs[0]
            else:
                val = locs[-1]
            if val == 0:
                oom = 0
            else:
                oom = math.floor(math.log10(val))
        if oom <= self._powerlimits[0]:
            self.orderOfMagnitude = oom
        elif oom >= self._powerlimits[1]:
            self.orderOfMagnitude = oom
        else:
            self.orderOfMagnitude = 0 
Example 9
Project: OpenBench   Author: AndyGrant   File: utils.py    GNU General Public License v3.0 6 votes vote down vote up
def ELO(wins, losses, draws):

    def _elo(x):
        if x <= 0 or x >= 1: return 0.0
        return -400*math.log10(1/x-1)

    # win/loss/draw ratio
    N = wins + losses + draws;
    if N == 0: return (0, 0, 0)
    w = float(wins)  / N
    l = float(losses)/ N
    d = float(draws) / N

    # mu is the empirical mean of the variables (Xi), assumed i.i.d.
    mu = w + d/2

    # stdev is the empirical standard deviation of the random variable (X1+...+X_N)/N
    stdev = math.sqrt(w*(1-mu)**2 + l*(0-mu)**2 + d*(0.5-mu)**2) / math.sqrt(N)

    # 95% confidence interval for mu
    mu_min = mu + phi_inv(0.025) * stdev
    mu_max = mu + phi_inv(0.975) * stdev

    return (_elo(mu_min), _elo(mu), _elo(mu_max)) 
Example 10
Project: NiujiaoDebugger   Author: MrSrc   File: test_long.py    GNU General Public License v3.0 6 votes vote down vote up
def test_logs(self):
        LOG10E = math.log10(math.e)

        for exp in list(range(10)) + [100, 1000, 10000]:
            value = 10 ** exp
            log10 = math.log10(value)
            self.assertAlmostEqual(log10, exp)

            # log10(value) == exp, so log(value) == log10(value)/log10(e) ==
            # exp/LOG10E
            expected = exp / LOG10E
            log = math.log(value)
            self.assertAlmostEqual(log, expected)

        for bad in -(1 << 10000), -2, 0:
            self.assertRaises(ValueError, math.log, bad)
            self.assertRaises(ValueError, math.log10, bad) 
Example 11
Project: kucher   Author: Zubax   File: textual.py    GNU General Public License v3.0 6 votes vote down vote up
def _get_numpy_formatter(dtype: numpy.dtype) -> dict:
    """Formatter construction can be very slow, we optimize it by caching the results"""
    try:
        if dtype == numpy.bool_:
            return {
                'bool': '{:d}'.format       # Formatting as integer to conserve space
            }
        else:
            info = numpy.iinfo(dtype)
            item_length = max(len(str(info.max)), len(str(info.min)))
            return {
                'int_kind': ('{' + f':{item_length}' + '}').format
            }
    except ValueError:
        decimals = int(abs(math.log10(numpy.finfo(dtype).resolution)) + 0.5)
        return {
            'float_kind': '{:#[email protected]}'.replace('@', str(decimals)).format
        } 
Example 12
Project: gymwipe   Author: bjoluc   File: physical.py    GNU General Public License v3.0 6 votes vote down vote up
def calculateEbToN0Ratio(signalPower: float, noisePower: float, bitRate: float,
                            returnDb: bool = False) -> float:
    """
    Computes :math:`E_b/N_0 = \\frac{S}{N_0 R}` (the "ratio of signal energy per
    bit to noise power density per Hertz" :cite:`stallings2005data`) given the
    signal power :math:`S_{dBm}`, the noise power :math:`N_{0_{dBm}}`, and the
    bit rate :math:`R`, according to p. 95 of :cite:`stallings2005data`.

    Args:
        signalPower: The signal power :math:`S` in dBm
        noisePower: The noise power :math:`N_0` in dBm
        bitRate: The bit rate :math:`R` in bps
        returnDb: If set to ``True``, the ratio will be returned in dB.
    """
    ratio_db = signalPower - noisePower - 10*log10(bitRate)
    if returnDb:
        return ratio_db
    return 10**(ratio_db/10) 
Example 13
Project: mathematics_dataset   Author: deepmind   File: arithmetic.py    Apache License 2.0 6 votes vote down vote up
def simplify_surd(value, sample_args, context=None):
  """E.g., "Simplify (2 + 5*sqrt(3))**2."."""
  del value  # unused
  if context is None:
    context = composition.Context()

  entropy, sample_args = sample_args.peel()

  while True:
    base = random.randint(2, 20)
    if sympy.Integer(base).is_prime:
      break
  num_primes_less_than_20 = 8
  entropy -= math.log10(num_primes_less_than_20)
  exp = _sample_surd(base, entropy, max_power=2, multiples_only=False)
  simplified = sympy.expand(sympy.simplify(exp))

  template = random.choice([
      'Simplify {exp}.',
  ])
  return example.Problem(
      question=example.question(context, template, exp=exp),
      answer=simplified) 
Example 14
Project: mathematics_dataset   Author: deepmind   File: numbers.py    Apache License 2.0 6 votes vote down vote up
def _semi_prime(entropy):
  """Generates a semi-prime with the given entropy."""
  # Add on extra entropy to account for the sparsity of the primes; we don't
  # actually use the integers sampled, but rather a random prime close to them;
  # thus some entropy is lost, which we must account for
  entropy += math.log10(max(1, entropy * math.log(10)))

  # We intentionally uniformy sample the "entropy" (i.e., approx number digits)
  # of the two factors.
  entropy_1, entropy_2 = entropy * np.random.dirichlet([1, 1])

  # Need >= 2 for randprime to always work (Betrand's postulate).
  approx_1 = number.integer(entropy_1, signed=False, min_abs=2)
  approx_2 = number.integer(entropy_2, signed=False, min_abs=2)

  factor_1 = sympy.ntheory.generate.randprime(approx_1 / 2, approx_1 * 2)
  factor_2 = sympy.ntheory.generate.randprime(approx_2 / 2, approx_2 * 2)

  return factor_1 * factor_2 
Example 15
Project: ns3   Author: bvamanan   File: hud.py    GNU General Public License v2.0 6 votes vote down vote up
def _compute_divisions(self, xi, xf):
        assert xf > xi
        dx = xf - xi
        size = dx
        ndiv = 5
        text_width = dx/ndiv/2

        def rint(x):
            return math.floor(x+0.5)
        
        dx_over_ndiv = dx / ndiv
        for n in range(5): # iterate 5 times to find optimum division size
            #/* div: length of each division */
            tbe = math.log10(dx_over_ndiv)#;   /* looking for approx. 'ndiv' divisions in a length 'dx' */
            div = pow(10, rint(tbe))#;	/* div: power of 10 closest to dx/ndiv */
            if math.fabs(div/2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv): #/* test if div/2 is closer to dx/ndiv */
                div /= 2
            elif math.fabs(div*2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv):
                div *= 2 #			/* test if div*2 is closer to dx/ndiv */
            x0 = div*math.ceil(xi / div) - div
            if n > 1:
                ndiv = rint(size / text_width)
        return x0, div 
Example 16
Project: mptcp_with_machine_learning   Author: JamesRaynor67   File: hud.py    GNU General Public License v2.0 6 votes vote down vote up
def _compute_divisions(self, xi, xf):
        assert xf > xi
        dx = xf - xi
        size = dx
        ndiv = 5
        text_width = dx/ndiv/2

        def rint(x):
            return math.floor(x+0.5)
        
        dx_over_ndiv = dx / ndiv
        for n in range(5): # iterate 5 times to find optimum division size
            #/* div: length of each division */
            tbe = math.log10(dx_over_ndiv)#;   /* looking for approx. 'ndiv' divisions in a length 'dx' */
            div = pow(10, rint(tbe))#;	/* div: power of 10 closest to dx/ndiv */
            if math.fabs(div/2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv): #/* test if div/2 is closer to dx/ndiv */
                div /= 2
            elif math.fabs(div*2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv):
                div *= 2 #			/* test if div*2 is closer to dx/ndiv */
            x0 = div*math.ceil(xi / div) - div
            if n > 1:
                ndiv = rint(size / text_width)
        return x0, div 
Example 17
Project: Gurux.DLMS.Python   Author: Gurux   File: GXDLMSExtendedRegister.py    GNU General Public License v2.0 5 votes vote down vote up
def setValue(self, settings, e):
        #pylint: disable=broad-except
        if e.index == 1:
            self.logicalName = _GXCommon.toLogicalName(e.value)
        elif e.index == 2:
            if self.scaler != 0 and e.value:
                try:
                    if settings.isServer:
                        self.value = e.value
                    else:
                        self.value = e.value * math.log10(self.scaler)
                except Exception:
                    #  Sometimes scaler is set for wrong Object type.
                    self.value = e.value
            else:
                self.value = e.value
        elif e.index == 3:
            #  Set default values.
            if not e.value:
                self.scaler = 0
                self.unit = 0
            else:
                if not e.value:
                    self.scaler = 0
                    self.unit = 0
                else:
                    self.scaler = e.value[0]
                    self.unit = e.value[1]
        elif e.index == 4:
            self.status = e.value
        elif e.index == 5:
            if e.value is None:
                self.captureTime = GXDateTime()
            else:
                if isinstance(e.value, bytearray):
                    self.captureTime = _GXCommon.changeType(e.value, DataType.DATETIME)
                else:
                    self.captureTime = e.value
        else:
            e.error = ErrorCode.READ_WRITE_DENIED 
Example 18
Project: Gurux.DLMS.Python   Author: Gurux   File: GXDLMSRegister.py    GNU General Public License v2.0 5 votes vote down vote up
def setValue(self, settings, e):
        if e.index == 1:
            self.logicalName = _GXCommon.toLogicalName(e.value)
        elif e.index == 2:
            if self.scaler != 0 and e.value:
                try:
                    if settings.isServer:
                        self.value = e.value
                    else:
                        self.value = e.value * math.log10(self.scaler)
                except Exception:
                    #  Sometimes scaler is set for wrong Object type.
                    self.value = e.value
            else:
                self.value = e.value
        elif e.index == 3:
            #  Set default values.
            if not e.value:
                self.scaler = 0
                self.unit = 0
            else:
                if not e.value:
                    self.scaler = 0
                    self.unit = 0
                else:
                    self.scaler = e.value[0]
                    self.unit = e.value[1]
        else:
            e.error = ErrorCode.READ_WRITE_DENIED 
Example 19
Project: AboveTustin   Author: kevinabrandon   File: flightdata.py    MIT License 5 votes vote down vote up
def _parse_aircraft_data(self, a, time):
        alt = a.get('Alt', 0)
        dist = -1
        az = 0
        el = 0
        if 'Lat' in a and 'Long' in a:
            rec_pos = (receiver_latitude, receiver_longitude)
            ac_pos = (a['Lat'], a['Long'])
            dist = geomath.distance(rec_pos, ac_pos)
            az = geomath.bearing(rec_pos, ac_pos)
            el = math.degrees(math.atan(alt / (dist * 5280)))
        speed = 0
        if 'Spd' in a:
            speed = geomath.knot2mph(a['Spd'])
        if 'PosTime' in a:
            last_seen_time = datetime.fromtimestamp(a['PosTime'] / 1000.0)
            seen = (time - last_seen_time).total_seconds()
        else:
            seen = 0
        ac_data = AirCraftData(
            a.get('Icao', None).upper(),
            a.get('Sqk', None),
            a.get('Call', None),
            a.get('Reg', None),
            a.get('Lat', None),
            a.get('Long', None),
            alt,
            a.get('Vsi', 0),
            a.get('Trak', None),
            speed,
            a.get('CMsgs', None),
            seen,
            a.get('Mlat', False),
            None,  # NUCP
            None,  # Seen pos
            10.0 * math.log10(a.get('Sig', 0) / 255.0 + 1e-5),
            dist,
            az,
            el,
            time)
        return ac_data 
Example 20
Project: phrydy   Author: Josef-Friedrich   File: mediafile.py    MIT License 5 votes vote down vote up
def _sc_decode(soundcheck):
    """Convert a Sound Check bytestring value to a (gain, peak) tuple as
    used by ReplayGain.
    """
    # We decode binary data. If one of the formats gives us a text
    # string, interpret it as UTF-8.
    if isinstance(soundcheck, six.text_type):
        soundcheck = soundcheck.encode('utf-8')

    # SoundCheck tags consist of 10 numbers, each represented by 8
    # characters of ASCII hex preceded by a space.
    try:
        soundcheck = codecs.decode(soundcheck.replace(b' ', b''), 'hex')
        soundcheck = struct.unpack('!iiiiiiiiii', soundcheck)
    except (struct.error, TypeError, binascii.Error):
        # SoundCheck isn't in the format we expect, so return default
        # values.
        return 0.0, 0.0

    # SoundCheck stores absolute calculated/measured RMS value in an
    # unknown unit. We need to find the ratio of this measurement
    # compared to a reference value of 1000 to get our gain in dB. We
    # play it safe by using the larger of the two values (i.e., the most
    # attenuation).
    maxgain = max(soundcheck[:2])
    if maxgain > 0:
        gain = math.log10(maxgain / 1000.0) * -10
    else:
        # Invalid gain value found.
        gain = 0.0

    # SoundCheck stores peak values as the actual value of the sample,
    # and again separately for the left and right channels. We need to
    # convert this to a percentage of full scale, which is 32768 for a
    # 16 bit sample. Once again, we play it safe by using the larger of
    # the two values.
    peak = max(soundcheck[6:8]) / 32768.0

    return round(gain, 2), round(peak, 6) 
Example 21
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def testLog10(self):
        self.assertRaises(TypeError, math.log10)
        self.ftest('log10(0.1)', math.log10(0.1), -1)
        self.ftest('log10(1)', math.log10(1), 0)
        self.ftest('log10(10)', math.log10(10), 1)
        self.assertEqual(math.log(INF), INF)
        self.assertRaises(ValueError, math.log10, NINF)
        self.assertTrue(math.isnan(math.log10(NAN)))
        # Log values should match for int and long (issue #18739).
        for n in range(1, 1000):
            self.assertEqual(math.log10(n), math.log10(long(n))) 
Example 22
Project: PersonalRecommendation   Author: ma-zhiyuan   File: item_cf.py    Apache License 2.0 5 votes vote down vote up
def update_one_contribute_score(user_total_click_num):
    """
    item cf update sim contribution score by user
    """
    return 1/math.log10(1+user_total_click_num) 
Example 23
Project: PersonalRecommendation   Author: ma-zhiyuan   File: user_cf.py    Apache License 2.0 5 votes vote down vote up
def update_contribution_score(item_user_click_count):
    """
    usercf user contribution score update v1
    Args:
        item_user_click_count: how many user have clicked this item
    Return:
        contribution score
    """
    return 1/math.log10(1 + item_user_click_count) 
Example 24
Project: python.math.expression.parser.pymep   Author: sbesada   File: complex.py    Apache License 2.0 5 votes vote down vote up
def __log10__(self):

        rpart = math.sqrt((self.real * self.real) + (self.imag * self.imag))
        ipart = math.atan2(self.imag,self.real)
        if ipart > math.pi:
            ipart = ipart - (2.0 * math.pi)
        
        return Complex(math.log10(rpart), (1 /math.log(10)) * ipart) 
Example 25
Project: audio   Author: pytorch   File: transforms.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, stype='power', top_db=None):
        super(AmplitudeToDB, self).__init__()
        self.stype = stype
        if top_db is not None and top_db < 0:
            raise ValueError('top_db must be positive value')
        self.top_db = torch.jit.Attribute(top_db, Optional[float])
        self.multiplier = 10.0 if stype == 'power' else 20.0
        self.amin = 1e-10
        self.ref_value = 1.0
        self.db_multiplier = math.log10(max(self.amin, self.ref_value)) 
Example 26
Project: audio   Author: pytorch   File: functional.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def amplitude_to_DB(x, multiplier, amin, db_multiplier, top_db=None):
    # type: (Tensor, float, float, float, Optional[float]) -> Tensor
    r"""Turn a tensor from the power/amplitude scale to the decibel scale.

    This output depends on the maximum value in the input tensor, and so
    may return different values for an audio clip split into snippets vs. a
    a full clip.

    Args:
        x (torch.Tensor): Input tensor before being converted to decibel scale
        multiplier (float): Use 10. for power and 20. for amplitude
        amin (float): Number to clamp ``x``
        db_multiplier (float): Log10(max(reference value and amin))
        top_db (Optional[float]): Minimum negative cut-off in decibels. A reasonable number
            is 80. (Default: ``None``)

    Returns:
        torch.Tensor: Output tensor in decibel scale
    """
    x_db = multiplier * torch.log10(torch.clamp(x, min=amin))
    x_db -= multiplier * db_multiplier

    if top_db is not None:
        x_db = x_db.clamp(min=x_db.max().item() - top_db)

    return x_db 
Example 27
Project: audio   Author: pytorch   File: functional.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def create_fb_matrix(n_freqs, f_min, f_max, n_mels, sample_rate):
    # type: (int, float, float, int, int) -> Tensor
    r"""Create a frequency bin conversion matrix.

    Args:
        n_freqs (int): Number of frequencies to highlight/apply
        f_min (float): Minimum frequency (Hz)
        f_max (float): Maximum frequency (Hz)
        n_mels (int): Number of mel filterbanks
        sample_rate (int): Sample rate of the audio waveform

    Returns:
        torch.Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
        meaning number of frequencies to highlight/apply to x the number of filterbanks.
        Each column is a filterbank so that assuming there is a matrix A of
        size (..., ``n_freqs``), the applied result would be
        ``A * create_fb_matrix(A.size(-1), ...)``.
    """
    # freq bins
    # Equivalent filterbank construction by Librosa
    all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
    i_freqs = all_freqs.ge(f_min) & all_freqs.le(f_max)
    freqs = all_freqs[i_freqs]

    # calculate mel freq bins
    # hertz to mel(f) is 2595. * math.log10(1. + (f / 700.))
    m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0))
    m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0))
    m_pts = torch.linspace(m_min, m_max, n_mels + 2)
    # mel to hertz(mel) is 700. * (10**(mel / 2595.) - 1.)
    f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0)
    # calculate the difference between each mel point and each stft freq point in hertz
    f_diff = f_pts[1:] - f_pts[:-1]  # (n_mels + 1)
    slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1)  # (n_freqs, n_mels + 2)
    # create overlapping triangles
    zero = torch.zeros(1)
    down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1]  # (n_freqs, n_mels)
    up_slopes = slopes[:, 2:] / f_diff[1:]  # (n_freqs, n_mels)
    fb = torch.max(zero, torch.min(down_slopes, up_slopes))
    return fb 
Example 28
Project: pymoku   Author: liquidinstruments   File: _frequency_response_analyzer_data.py    MIT License 5 votes vote down vote up
def __init__(self, input_signal, gain_correction,
                 front_end_scale, output_amp):

        # Extract the length of the signal (this varies with number of
        # sweep points)
        sig_len = len(gain_correction)

        # De-interleave IQ values
        self.i_sig, self.q_sig = zip(*zip(*[iter(input_signal)] * 2))
        self.i_sig = self.i_sig[:sig_len]
        self.q_sig = self.q_sig[:sig_len]

        # Calculates magnitude of a sample given I,Q and gain correction
        # factors
        def calculate_magnitude(I, Q, G, frontend_scale):
            if I is None or Q is None:
                return None
            else:
                return 2.0 * math.sqrt(
                    (I or 0)**2 + (Q or 0)**2) * front_end_scale / (G or 1)

        self.magnitude = [calculate_magnitude(I, Q, G, front_end_scale)
                          for I, Q, G in zip(self.i_sig,
                                             self.q_sig, gain_correction)]

        # Sometimes there's a transient condition at startup where we don't
        # have a valid output_amp. Return Nones in that case in preference to
        # exploding.
        self.magnitude_dB = [None if not x else
                             20.0 * math.log10(x / output_amp)
                             if output_amp else None for x in self.magnitude]

        self.phase = [None if (I is None or Q is None)
                      else (math.atan2(Q or 0, I or 0)) / (2.0 * math.pi)
                      for I, Q in zip(self.i_sig, self.q_sig)] 
Example 29
Project: draco   Author: radiocosmology   File: task.py    MIT License 5 votes vote down vote up
def __init__(self):

        from mpi4py import MPI
        import math

        logging.captureWarnings(True)

        rank_length = int(math.log10(MPI.COMM_WORLD.size)) + 1

        mpi_fmt = "[MPI %%(mpi_rank)%id/%%(mpi_size)%id]" % (rank_length, rank_length)
        filt = MPILogFilter(level_all=self.level_all, level_rank0=self.level_rank0)

        # This uses the fact that caput.pipeline.Manager has already
        # attempted to set up the logging. We just override the level, and
        # insert our custom filter
        root_logger = logging.getLogger()
        root_logger.setLevel(logging.DEBUG)
        ch = root_logger.handlers[0]
        ch.setLevel(logging.DEBUG)
        ch.addFilter(filt)

        formatter = logging.Formatter(
            "%(elapsedTime)8.1fs "
            + mpi_fmt
            + " - %(levelname)-8s %(name)s: %(message)s"
        )

        ch.setFormatter(formatter) 
Example 30
Project: razzy-spinner   Author: rafasashi   File: plot.py    GNU General Public License v3.0 5 votes vote down vote up
def config_axes(self, xlog, ylog):
        if hasattr(self, '_rng'):
            (i1, j1, i2, j2) = self.visible_area()
            zoomed=1
        else:
            zoomed=0
            
        self._xlog = xlog
        self._ylog = ylog
        if xlog: self._rng = [log10(x) for x in self._original_rng]
        else: self._rng = self._original_rng
        if ylog: self._vals = [log10(x) for x in self._original_vals]
        else: self._vals = self._original_vals
            
        self._imin = min(self._rng)
        self._imax = max(self._rng)
        if self._imax == self._imin:
            self._imin -= 1
            self._imax += 1
        self._jmin = min(self._vals)
        self._jmax = max(self._vals)
        if self._jmax == self._jmin:
            self._jmin -= 1
            self._jmax += 1

        if zoomed:
            self.zoom(i1, j1, i2, j2)
        else:
            self.zoom(self._imin, self._jmin, self._imax, self._jmax) 
Example 31
Project: metk   Author: PatWalters   File: metk_report.py    MIT License 5 votes vote down vote up
def metk_report(df_kcal):
    """
    Generate a report
    :param df_kcal: input dataframe, activity should be in kcal/mol
    :param outfile: output file for the report
    :return: the report as a list of strings
    """
    N = df_kcal.shape[0]
    pred = df_kcal['Pred']
    expr = df_kcal['Exp']
    rms_val = rmse(pred, expr)
    mae_val = mean_absolute_error(pred, expr)
    pearson_r, pearson_p = pearsonr(pred, expr)
    pearson_vals = [x ** 2 for x in [pearson_r] + list(pearson_confidence(pearson_r, N))]
    spearman_r, spearman_p = spearmanr(pred, expr)
    kendall_t, kendall_p = kendalltau(pred, expr)
    max_correlation = max_possible_correlation([log10(kcal_to_ki(x, "M")) for x in df_kcal['Exp']])
    report = []
    report.append("N = %d" % N)
    report.append("RMSE = %.2f kcal/mol" % rms_val)
    report.append("MAE  = %.2f kcal/mol" % mae_val)
    report.append("Max possible correlation = %.2f" % max_correlation)
    report.append("Pearson R^2 = %0.2f  95%%CI = %.2f %.2f" % tuple(pearson_vals))
    report.append("Spearman rho = %0.2f" % spearman_r)
    report.append("Kendall tau = %0.2f" % kendall_t)
    return report 
Example 32
Project: metk   Author: PatWalters   File: metk_plots.py    MIT License 5 votes vote down vote up
def add_ic50_error(df, bins=None):
    """
    Add columns to a dataframe showing absolute and binned err
    :param df: input dataframe
    :param bins: bins to use (currently <5 kcal, 5-10 kcal, >10 kcal
    :return:
    """
    if bins is None:
        bins = [5, 10]
    pt_color = ['green', 'yellow', 'red']
    df['Error'] = [10 ** x for x in np.abs(np.log10(df['Exp']) - np.log10(df['Pred']))]
    df['Error_Bin'] = [pt_color[x] for x in np.digitize(df['Error'], bins)] 
Example 33
Project: metk   Author: PatWalters   File: metk_plots.py    MIT License 5 votes vote down vote up
def ic50_plot(df, ax, axis_range=None, units="uM"):
    """
    Draw a scatterplot of experimental vs predicted IC50
    :param df: input dataframe
    :param ax: matplotlib axis
    :param axis_range: range for axes [minX, maxY, minY, maxY
    :param units: units for IC50 plot (currently uM or nM)
    :return: None
    """
    if axis_range is None:
        axis_range = np.array([0.001, 100, 0.0001, 100])
    if units == "nM":
        axis_range *= 1000
    min_x, max_x, min_y, max_y = axis_range
    add_ic50_error(df)

    ax.set(xscale="log", yscale="log")
    ax.axis(axis_range)
    ax.xaxis.set_major_formatter(
        ticker.FuncFormatter(lambda y, pos: ('{{:.{:1d}f}}'.format(int(np.maximum(-np.log10(y), 0)))).format(y)))
    ax.yaxis.set_major_formatter(
        ticker.FuncFormatter(lambda y, pos: ('{{:.{:1d}f}}'.format(int(np.maximum(-np.log10(y), 0)))).format(y)))
    ax.set_xlabel("Experimental IC50 (%s)" % units)
    ax.set_ylabel("Predicted IC50 (%s)" % units)
    ax.scatter(df['Exp'], df['Pred'], s=100, c=df['Error_Bin'], alpha=0.5, edgecolors="black")

    ax.plot([0, max_x], [0, max_y], linewidth=2, color='black')
    # 5 fold
    ax.plot([0, max_x], [0, max_y * 5], linewidth=1, color="blue", linestyle='--')
    ax.plot([0, max_x], [0, max_y / 5], linewidth=1, color="blue", linestyle='--')
    # 10 fold
    ax.plot([0, max_x], [0, max_y * 10], linewidth=1, color="black")
    ax.plot([0, max_x], [0, max_y / 10], linewidth=1, color="black") 
Example 34
Project: metk   Author: PatWalters   File: metk_plots.py    MIT License 5 votes vote down vote up
def draw_plots(df_kcal, pdf_file_name, units='uM'):
    """
    Draw scatter plots and histograms showing agreement between experimental and predicted activity
    :param df_kcal: input dataframe, data is in kcal/mol
    :param pdf_file_name: output file for plot
    :param units: units to use for the plots (currently uM or nM)
    :return:
    """
    add_kcal_error(df_kcal)
    f_kcal, ax_kcal = plt.subplots(2, figsize=(7, 7))
    ax_kcal[0].set_title("N = %d" % df_kcal.shape[0])
    
    minx = int( min(df_kcal["Exp"] ) - 1 )
    maxx = int( max(df_kcal["Exp"] ) + 1 )
    miny = int( min(df_kcal["Pred"]) - 1 )
    maxy = int( max(df_kcal["Pred"]) + 1 )
    
    kcal_plot(df_kcal, ax_kcal[0], axis_range=[minx, maxx, miny, maxy])
    kcal_histogram(df_kcal, ax_kcal[1])
    pdf_pages = PdfPages(pdf_file_name)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        plt.tight_layout()
    pdf_pages.savefig(f_kcal.get_figure())

    df_ic50 = kcal_to_ki_df(df_kcal, units)
    add_ic50_error(df_ic50)
    f_ic50, ax_ic50 = plt.subplots(2, figsize=(7, 7))
    
    minx = 10**( math.log10(min(df_ic50["Exp"] )) - 1 )
    maxx = 10**( math.log10(max(df_ic50["Exp"] )) + 1 )
    miny = 10**( math.log10(min(df_ic50["Pred"])) - 1 )
    maxy = 10**( math.log10(max(df_ic50["Pred"])) + 1 )
    
    ic50_plot(df_ic50, ax_ic50[0], axis_range=[minx, maxx, miny, maxy], units=units)
    ic50_histogram(df_ic50, ax_ic50[1])
    pdf_pages.savefig(f_ic50.get_figure())

    pdf_pages.close() 
Example 35
Project: kivy-smoothie-host   Author: wolfmanjm   File: __init__.py    GNU General Public License v3.0 5 votes vote down vote up
def to_data(self, x, y):
        '''Convert widget coords to data coords. Use
        `x, y = self.to_widget(x, y, relative=True)` to first convert into
        widget coordinates if it's in window coordinates because it's assumed
        to be given in local widget coordinates, relative to the graph's pos.

        :Parameters:
            `x, y`:
                The coordinates to convert.

        If the graph has multiple axes, use :class:`Plot.unproject` instead.
        '''
        adj_x = float(x - self._plot_area.pos[0])
        adj_y = float(y - self._plot_area.pos[1])
        norm_x = adj_x / self._plot_area.size[0]
        norm_y = adj_y / self._plot_area.size[1]
        if self.xlog:
            xmin, xmax = log10(self.xmin), log10(self.xmax)
            conv_x = 10.**(norm_x * (xmax - xmin) + xmin)
        else:
            conv_x = norm_x * (self.xmax - self.xmin) + self.xmin
        if self.ylog:
            ymin, ymax = log10(self.ymin), log10(self.ymax)
            conv_y = 10.**(norm_y * (ymax - ymin) + ymin)
        else:
            conv_y = norm_y * (self.ymax - self.ymin) + self.ymin
        return [conv_x, conv_y] 
Example 36
Project: kivy-smoothie-host   Author: wolfmanjm   File: __init__.py    GNU General Public License v3.0 5 votes vote down vote up
def funcx(self):
        """Return a function that convert or not the X value according to plot
        prameters"""
        return log10 if self.params["xlog"] else lambda x: x 
Example 37
Project: kivy-smoothie-host   Author: wolfmanjm   File: __init__.py    GNU General Public License v3.0 5 votes vote down vote up
def funcy(self):
        """Return a function that convert or not the Y value according to plot
        prameters"""
        return log10 if self.params["ylog"] else lambda y: y 
Example 38
Project: scality-sproxyd-client   Author: scality   File: afd.py    Apache License 2.0 5 votes vote down vote up
def phi(self):
        # if we don't have enough value to take a decision
        # assume the node is dead
        if self._mean is None:
            return self.threshold + 1
        ts = time.time()
        diff = ts - self._timestamp
        prob = self._probability(diff)
        if decimal.Decimal(str(prob)).is_zero():
            prob = 1E-128  # a very small number, avoiding ValueError: math domain error
        return -1 * math.log10(prob) 
Example 39
Project: robot-navigation   Author: ronaldahmed   File: plot.py    MIT License 5 votes vote down vote up
def config_axes(self, xlog, ylog):
        if hasattr(self, '_rng'):
            (i1, j1, i2, j2) = self.visible_area()
            zoomed=1
        else:
            zoomed=0
            
        self._xlog = xlog
        self._ylog = ylog
        if xlog: self._rng = [log10(x) for x in self._original_rng]
        else: self._rng = self._original_rng
        if ylog: self._vals = [log10(x) for x in self._original_vals]
        else: self._vals = self._original_vals
            
        self._imin = min(self._rng)
        self._imax = max(self._rng)
        if self._imax == self._imin:
            self._imin -= 1
            self._imax += 1
        self._jmin = min(self._vals)
        self._jmax = max(self._vals)
        if self._jmax == self._jmin:
            self._jmin -= 1
            self._jmax += 1

        if zoomed:
            self.zoom(i1, j1, i2, j2)
        else:
            self.zoom(self._imin, self._jmin, self._imax, self._jmax) 
Example 40
Project: tfidf   Author: tdstein   File: tfidf.py    MIT License 5 votes vote down vote up
def __idf__(self, term):
        """
        Inverse Document Frequency

        The inverse document frequency is a measure of how much information a term provides in relationship to a set of
        documents. The value is logarithmically scaled to give exponentially less weight to a term that is exponentially
        more informative.
        :param term: the term to calculate the inverse document frequency of.
        :return: the inverse document frequency of the term
        """

        # First check to see if we have already computed the IDF for this term
        if term in self.__idf_by_term:
            return self.__idf_by_term[term]

        # Count the frequency of each term
        freq_by_term = {}
        for document in self.documents:
            for term in set(document.text.split()):
                if term not in freq_by_term:
                    freq_by_term[term] = 1
                else:
                    freq_by_term[term] += 1

        # Calculate the Inverse Document Frequency of each term
        for term, freq in freq_by_term.iteritems():
            self.__idf_by_term[term] = 1 + math.log10(len(self.documents) / freq)

        return self.__idf_by_term[term] 
Example 41
Project: Jtyoui   Author: jtyoui   File: tfidf.py    MIT License 5 votes vote down vote up
def get_tf_idf(self, word):
        """传入一个词语,获得重要性"""
        self.word = word
        c, t = 0, 0
        for line in self.ls:
            num = line.count(self.word)
            if num:
                t += num / len(line)
                c += 1
        return t * math.log10(self.length / c) 
Example 42
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 5 votes vote down vote up
def _compute_offset(self):
        locs = self.locs
        if locs is None or not len(locs):
            self.offset = 0
            return
        # Restrict to visible ticks.
        vmin, vmax = sorted(self.axis.get_view_interval())
        locs = np.asarray(locs)
        locs = locs[(vmin <= locs) & (locs <= vmax)]
        if not len(locs):
            self.offset = 0
            return
        lmin, lmax = locs.min(), locs.max()
        # Only use offset if there are at least two ticks and every tick has
        # the same sign.
        if lmin == lmax or lmin <= 0 <= lmax:
            self.offset = 0
            return
        # min, max comparing absolute values (we want division to round towards
        # zero so we work on absolute values).
        abs_min, abs_max = sorted([abs(float(lmin)), abs(float(lmax))])
        sign = math.copysign(1, lmin)
        # What is the smallest power of ten such that abs_min and abs_max are
        # equal up to that precision?
        # Note: Internally using oom instead of 10 ** oom avoids some numerical
        # accuracy issues.
        oom_max = np.ceil(math.log10(abs_max))
        oom = 1 + next(oom for oom in itertools.count(oom_max, -1)
                       if abs_min // 10 ** oom != abs_max // 10 ** oom)
        if (abs_max - abs_min) / 10 ** oom <= 1e-2:
            # Handle the case of straddling a multiple of a large power of ten
            # (relative to the span).
            # What is the smallest power of ten such that abs_min and abs_max
            # are no more than 1 apart at that precision?
            oom = 1 + next(oom for oom in itertools.count(oom_max, -1)
                           if abs_max // 10 ** oom - abs_min // 10 ** oom > 1)
        # Only use offset if it saves at least _offset_threshold digits.
        n = self._offset_threshold - 1
        self.offset = (sign * (abs_max // 10 ** oom) * 10 ** oom
                       if abs_max // 10 ** oom >= 10**n
                       else 0) 
Example 43
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 5 votes vote down vote up
def _set_format(self, vmin, vmax):
        # set the format string to format all the ticklabels
        if len(self.locs) < 2:
            # Temporarily augment the locations with the axis end points.
            _locs = list(self.locs) + [vmin, vmax]
        else:
            _locs = self.locs
        locs = (np.asarray(_locs) - self.offset) / 10. ** self.orderOfMagnitude
        loc_range = np.ptp(locs)
        # Curvilinear coordinates can yield two identical points.
        if loc_range == 0:
            loc_range = np.max(np.abs(locs))
        # Both points might be zero.
        if loc_range == 0:
            loc_range = 1
        if len(self.locs) < 2:
            # We needed the end points only for the loc_range calculation.
            locs = locs[:-2]
        loc_range_oom = int(math.floor(math.log10(loc_range)))
        # first estimate:
        sigfigs = max(0, 3 - loc_range_oom)
        # refined estimate:
        thresh = 1e-3 * 10 ** loc_range_oom
        while sigfigs >= 0:
            if np.abs(locs - np.round(locs, decimals=sigfigs)).max() < thresh:
                sigfigs -= 1
            else:
                break
        sigfigs += 1
        self.format = '%1.' + str(sigfigs) + 'f'
        if self._usetex:
            self.format = '$%s$' % self.format
        elif self._useMathText:
            self.format = '$%s$' % _mathdefault(self.format) 
Example 44
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 5 votes vote down vote up
def __call__(self, x, pos=None):
        s = ''
        if 0.01 <= x <= 0.99:
            s = '{:.2f}'.format(x)
        elif x < 0.01:
            if is_decade(x):
                s = '$10^{{{:.0f}}}$'.format(np.log10(x))
            else:
                s = '${:.5f}$'.format(x)
        else:  # x > 0.99
            if is_decade(1-x):
                s = '$1-10^{{{:.0f}}}$'.format(np.log10(1-x))
            else:
                s = '$1-{:.5f}$'.format(1-x)
        return s 
Example 45
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 5 votes vote down vote up
def scale_range(vmin, vmax, n=1, threshold=100):
    dv = abs(vmax - vmin)  # > 0 as nonsingular is called before.
    meanv = (vmax + vmin) / 2
    if abs(meanv) / dv < threshold:
        offset = 0
    else:
        offset = math.copysign(10 ** (math.log10(abs(meanv)) // 1), meanv)
    scale = 10 ** (math.log10(dv / n) // 1)
    return scale, offset 
Example 46
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 5 votes vote down vote up
def tick_values(self, vmin, vmax):
        # dummy axis has no axes attribute
        if hasattr(self.axis, 'axes') and self.axis.axes.name == 'polar':
            raise NotImplementedError('Polar axis cannot be logit scaled yet')

        vmin, vmax = self.nonsingular(vmin, vmax)
        vmin = np.log10(vmin / (1 - vmin))
        vmax = np.log10(vmax / (1 - vmax))

        decade_min = np.floor(vmin)
        decade_max = np.ceil(vmax)

        # major ticks
        if not self.minor:
            ticklocs = []
            if (decade_min <= -1):
                expo = np.arange(decade_min, min(0, decade_max + 1))
                ticklocs.extend(list(10**expo))
            if (decade_min <= 0) and (decade_max >= 0):
                ticklocs.append(0.5)
            if (decade_max >= 1):
                expo = -np.arange(max(1, decade_min), decade_max + 1)
                ticklocs.extend(list(1 - 10**expo))

        # minor ticks
        else:
            ticklocs = []
            if (decade_min <= -2):
                expo = np.arange(decade_min, min(-1, decade_max))
                newticks = np.outer(np.arange(2, 10), 10**expo).ravel()
                ticklocs.extend(list(newticks))
            if (decade_min <= 0) and (decade_max >= 0):
                ticklocs.extend([0.2, 0.3, 0.4, 0.6, 0.7, 0.8])
            if (decade_max >= 2):
                expo = -np.arange(max(2, decade_min), decade_max + 1)
                newticks = 1 - np.outer(np.arange(2, 10), 10**expo).ravel()
                ticklocs.extend(list(newticks))

        return self.raise_if_exceeds(np.array(ticklocs)) 
Example 47
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 5 votes vote down vote up
def get_locator(self, d):
        'pick the best locator based on a distance'
        d = abs(d)
        if d <= 0:
            locator = MultipleLocator(0.2)
        else:

            try:
                ld = math.log10(d)
            except OverflowError:
                raise RuntimeError('AutoLocator illegal data interval range')

            fld = math.floor(ld)
            base = 10 ** fld

            #if ld==fld:  base = 10**(fld-1)
            #else:        base = 10**fld

            if d >= 5 * base:
                ticksize = base
            elif d >= 2 * base:
                ticksize = base / 2.0
            else:
                ticksize = base / 5.0
            locator = MultipleLocator(ticksize)

        return locator 
Example 48
Project: LaserTOF   Author: kyleuckert   File: _base.py    MIT License 5 votes vote down vote up
def get_data_ratio_log(self):
        """
        Returns the aspect ratio of the raw data in log scale.
        Will be used when both axis scales are in log.
        """
        xmin, xmax = self.get_xbound()
        ymin, ymax = self.get_ybound()

        xsize = max(math.fabs(math.log10(xmax) - math.log10(xmin)), 1e-30)
        ysize = max(math.fabs(math.log10(ymax) - math.log10(ymin)), 1e-30)

        return ysize / xsize 
Example 49
Project: mapreduce_python   Author: laertispappas   File: tfidf-serial.py    Apache License 2.0 5 votes vote down vote up
def tfidf(word_counts, total_number_of_documents):
    for word_and_doc in word_counts:
        n_terms_appear = int(word_counts[word_and_doc])
        terms_in_doc = int(number_of_terms_in_document[word_and_doc.split('#')[1]])
        words_tfidf[word_and_doc] = [float(n_terms_appear) / float(terms_in_doc)]
        word = word_and_doc.split('#')[0]
        words_tfidf[word_and_doc].append(log10(float(total_number_of_documents) / float(number_of_docs_with_term[word])))
        words_tfidf[word_and_doc].append(words_tfidf[word_and_doc][0] * words_tfidf[word_and_doc][1]) 
Example 50
Project: OpenBench   Author: AndyGrant   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def proba_to_bayeselo(pwin, pdraw, ploss):
    elo     = 200 * math.log10(pwin/ploss * (1-ploss)/(1-pwin))
    drawelo = 200 * math.log10((1-ploss)/ploss * (1-pwin)/pwin)
    return elo, drawelo