Python math.log10() Examples

The following are code examples for showing how to use math.log10(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: pyblish-win   Author: pyblish   File: test_long.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_logs(self):
        LOG10E = math.log10(math.e)

        for exp in range(10) + [100, 1000, 10000]:
            value = 10 ** exp
            log10 = math.log10(value)
            self.assertAlmostEqual(log10, exp)

            # log10(value) == exp, so log(value) == log10(value)/log10(e) ==
            # exp/LOG10E
            expected = exp / LOG10E
            log = math.log(value)
            self.assertAlmostEqual(log, expected)

        for bad in -(1L << 10000), -2L, 0L:
            self.assertRaises(ValueError, math.log, bad)
            self.assertRaises(ValueError, math.log10, bad) 
Example 2
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: super_resolution.py    Apache License 2.0 6 votes vote down vote up
def test(ctx):
    val_data.reset()
    avg_psnr = 0
    batches = 0
    for batch in val_data:
        batches += 1
        data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
        label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
        outputs = []
        for x in data:
            outputs.append(net(x))
        metric.update(label, outputs)
        avg_psnr += 10 * math.log10(1/metric.get()[1])
        metric.reset()
    avg_psnr /= batches
    print('validation avg psnr: %f'%avg_psnr) 
Example 3
Project: aurora   Author: carnby   File: tasks.py    MIT License 6 votes vote down vote up
def select_tweets(timeline, allow_rts=True, allow_replies=False, popular_only=True):
    texts = []

    for t in timeline:
        if not 'retweeted_status' in t:
            if not allow_replies and t['in_reply_to_status_id_str']:
                continue
            t['tweet_score'] = log(t['retweet_count'] + 1.0) + log(t['favorite_count'] + 1.0)
            t['__is_rt__'] = False
            texts.append(t)
        else:
            if allow_rts:
                t['retweeted_status']['tweet_score'] = log10(t['retweet_count'] + 1.0) + log10(t['favorite_count'] + 1.0)
                t['retweeted_status']['source_created_at'] = t['retweeted_status']['created_at']
                t['retweeted_status']['created_at'] = t['created_at']
                t['retweeted_status']['text'] = t['retweeted_status']['text']
                t['retweeted_status']['__is_rt__'] = True
                texts.append(t['retweeted_status'])

    #texts = sorted(texts, key=lambda x: x['tweet_score'], reverse=True)[0:100]
    if popular_only:
        texts = list(filter(lambda x: x['tweet_score'] > 0, texts))

    return texts 
Example 4
Project: xia2   Author: xia2   File: DialsScaler.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def split_experiments(self, experiment, reflection, sweep_handler):
        """Split a multi-experiment dataset into individual datasets and set in the
        sweep handler."""
        splitter = SplitExperiments()
        splitter.add_experiments(experiment)
        splitter.add_reflections(reflection)
        splitter.set_working_directory(self.get_working_directory())
        auto_logfiler(splitter)
        splitter.run()

        nn = len(sweep_handler.get_epochs())
        fmt = "%%0%dd" % (math.log10(nn) + 1)

        for i, epoch in enumerate(sweep_handler.get_epochs()):
            si = sweep_handler.get_sweep_information(epoch)
            nums = fmt % i
            si.set_reflections(
                os.path.join(self.get_working_directory(), "split_%s.refl" % nums)
            )
            si.set_experiments(
                os.path.join(self.get_working_directory(), "split_%s.expt" % nums)
            )
        return sweep_handler 
Example 5
Project: pymoku   Author: liquidinstruments   File: _specan.py    MIT License 6 votes vote down vote up
def _compensation_singen(self):
        # Increase sinewave amplitude as span is reduced. Don't consider
        # spans less than 1 kHz
        span = max(self.f2 - self.f1, 1.0e3)
        self.demod_sinegen_bitshift = max(
            min(17 - math.floor(2.4 * math.log10(span)), 7), 0)
        self.demod_sinegen_enable = (span < 2.0e6) and (
            self.demod_sinegen_bitshift != 0)

        # Phase dither to broaden sinewave peak to ~512 FFT points
        self.demod_phase_bitshift = round(
            14.0 - 0.58 * math.log(1.0e6 / span, 2))

        # Sinewave frequency. Place at 1.9 screens from DC. Need to correct
        # for phase dither offset
        fbin_resolution = _SA_ADC_SMPS / 2.0 / _SA_FFT_LENGTH / (
            self._total_decimation)
        desired_frequency = fbin_resolution * _SA_FFT_LENGTH * 0.475
        phase_step = min(round(desired_frequency / _SA_DAC_SMPS * 2 ** 32),
                         2 ** 32)
        self.demod_sinegen_freq = phase_step - round(
            31.0 / 32.0 * 2 ** (self.demod_phase_bitshift + 4)) 
Example 6
Project: ns3   Author: KyleBenson   File: hud.py    GNU General Public License v2.0 6 votes vote down vote up
def _compute_divisions(self, xi, xf):
        assert xf > xi
        dx = xf - xi
        size = dx
        ndiv = 5
        text_width = dx/ndiv/2

        def rint(x):
            return math.floor(x+0.5)
        
        dx_over_ndiv = dx / ndiv
        for n in range(5): # iterate 5 times to find optimum division size
            #/* div: length of each division */
            tbe = math.log10(dx_over_ndiv)#;   /* looking for approx. 'ndiv' divisions in a length 'dx' */
            div = pow(10, rint(tbe))#;	/* div: power of 10 closest to dx/ndiv */
            if math.fabs(div/2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv): #/* test if div/2 is closer to dx/ndiv */
                div /= 2
            elif math.fabs(div*2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv):
                div *= 2 #			/* test if div*2 is closer to dx/ndiv */
            x0 = div*math.ceil(xi / div) - div
            if n > 1:
                ndiv = rint(size / text_width)
        return x0, div 
Example 7
Project: geophys_utils   Author: GeoscienceAustralia   File: aseg_gdf_utils.py    Apache License 2.0 6 votes vote down vote up
def dfrexp(f):
    '''
    Decimal version of frexp or np.frexp function to return mantissa & exponent
    @param f: Floating point scalar or array
    @return fman: Scalar or array decimal mantissa between 0.0 and 1.0 
    @return fexp: Scalar or array decimal exponent
    '''
    # Compute decimal exponent
    if type(f) == np.ndarray:
        fexp = np.zeros(shape=f.shape, dtype='int32')
        fexp[f != 0] = np.ceil(np.log10(np.abs(f[f != 0]))).astype('int32')
    else: # Scalar
        fexp = int(ceil(log10(abs(f)))) if f != 0 else 0
            
    # Compute decimal mantissa between 0.0 and 1.0
    fman = f/10.0**fexp
    
    logger.debug('fman: {}'.format(fman))
    logger.debug('fexp: {}'.format(fexp))
    
    return fman, fexp


# Approximate maximum number of significant decimal figures for each signed datatype 
Example 8
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 6 votes vote down vote up
def _set_orderOfMagnitude(self, range):
        # if scientific notation is to be used, find the appropriate exponent
        # if using an numerical offset, find the exponent after applying the
        # offset
        if not self._scientific:
            self.orderOfMagnitude = 0
            return
        locs = np.absolute(self.locs)
        if self.offset:
            oom = math.floor(math.log10(range))
        else:
            if locs[0] > locs[-1]:
                val = locs[0]
            else:
                val = locs[-1]
            if val == 0:
                oom = 0
            else:
                oom = math.floor(math.log10(val))
        if oom <= self._powerlimits[0]:
            self.orderOfMagnitude = oom
        elif oom >= self._powerlimits[1]:
            self.orderOfMagnitude = oom
        else:
            self.orderOfMagnitude = 0 
Example 9
Project: OpenBench   Author: AndyGrant   File: utils.py    GNU General Public License v3.0 6 votes vote down vote up
def ELO(wins, losses, draws):

    def _elo(x):
        if x <= 0 or x >= 1: return 0.0
        return -400*math.log10(1/x-1)

    # win/loss/draw ratio
    N = wins + losses + draws;
    if N == 0: return (0, 0, 0)
    w = float(wins)  / N
    l = float(losses)/ N
    d = float(draws) / N

    # mu is the empirical mean of the variables (Xi), assumed i.i.d.
    mu = w + d/2

    # stdev is the empirical standard deviation of the random variable (X1+...+X_N)/N
    stdev = math.sqrt(w*(1-mu)**2 + l*(0-mu)**2 + d*(0.5-mu)**2) / math.sqrt(N)

    # 95% confidence interval for mu
    mu_min = mu + phi_inv(0.025) * stdev
    mu_max = mu + phi_inv(0.975) * stdev

    return (_elo(mu_min), _elo(mu), _elo(mu_max)) 
Example 10
Project: NiujiaoDebugger   Author: MrSrc   File: test_long.py    GNU General Public License v3.0 6 votes vote down vote up
def test_logs(self):
        LOG10E = math.log10(math.e)

        for exp in list(range(10)) + [100, 1000, 10000]:
            value = 10 ** exp
            log10 = math.log10(value)
            self.assertAlmostEqual(log10, exp)

            # log10(value) == exp, so log(value) == log10(value)/log10(e) ==
            # exp/LOG10E
            expected = exp / LOG10E
            log = math.log(value)
            self.assertAlmostEqual(log, expected)

        for bad in -(1 << 10000), -2, 0:
            self.assertRaises(ValueError, math.log, bad)
            self.assertRaises(ValueError, math.log10, bad) 
Example 11
Project: kucher   Author: Zubax   File: textual.py    GNU General Public License v3.0 6 votes vote down vote up
def _get_numpy_formatter(dtype: numpy.dtype) -> dict:
    """Formatter construction can be very slow, we optimize it by caching the results"""
    try:
        if dtype == numpy.bool_:
            return {
                'bool': '{:d}'.format       # Formatting as integer to conserve space
            }
        else:
            info = numpy.iinfo(dtype)
            item_length = max(len(str(info.max)), len(str(info.min)))
            return {
                'int_kind': ('{' + f':{item_length}' + '}').format
            }
    except ValueError:
        decimals = int(abs(math.log10(numpy.finfo(dtype).resolution)) + 0.5)
        return {
            'float_kind': '{:#[email protected]}'.replace('@', str(decimals)).format
        } 
Example 12
Project: gymwipe   Author: bjoluc   File: physical.py    GNU General Public License v3.0 6 votes vote down vote up
def calculateEbToN0Ratio(signalPower: float, noisePower: float, bitRate: float,
                            returnDb: bool = False) -> float:
    """
    Computes :math:`E_b/N_0 = \\frac{S}{N_0 R}` (the "ratio of signal energy per
    bit to noise power density per Hertz" :cite:`stallings2005data`) given the
    signal power :math:`S_{dBm}`, the noise power :math:`N_{0_{dBm}}`, and the
    bit rate :math:`R`, according to p. 95 of :cite:`stallings2005data`.

    Args:
        signalPower: The signal power :math:`S` in dBm
        noisePower: The noise power :math:`N_0` in dBm
        bitRate: The bit rate :math:`R` in bps
        returnDb: If set to ``True``, the ratio will be returned in dB.
    """
    ratio_db = signalPower - noisePower - 10*log10(bitRate)
    if returnDb:
        return ratio_db
    return 10**(ratio_db/10) 
Example 13
Project: mathematics_dataset   Author: deepmind   File: arithmetic.py    Apache License 2.0 6 votes vote down vote up
def simplify_surd(value, sample_args, context=None):
  """E.g., "Simplify (2 + 5*sqrt(3))**2."."""
  del value  # unused
  if context is None:
    context = composition.Context()

  entropy, sample_args = sample_args.peel()

  while True:
    base = random.randint(2, 20)
    if sympy.Integer(base).is_prime:
      break
  num_primes_less_than_20 = 8
  entropy -= math.log10(num_primes_less_than_20)
  exp = _sample_surd(base, entropy, max_power=2, multiples_only=False)
  simplified = sympy.expand(sympy.simplify(exp))

  template = random.choice([
      'Simplify {exp}.',
  ])
  return example.Problem(
      question=example.question(context, template, exp=exp),
      answer=simplified) 
Example 14
Project: mathematics_dataset   Author: deepmind   File: numbers.py    Apache License 2.0 6 votes vote down vote up
def _semi_prime(entropy):
  """Generates a semi-prime with the given entropy."""
  # Add on extra entropy to account for the sparsity of the primes; we don't
  # actually use the integers sampled, but rather a random prime close to them;
  # thus some entropy is lost, which we must account for
  entropy += math.log10(max(1, entropy * math.log(10)))

  # We intentionally uniformy sample the "entropy" (i.e., approx number digits)
  # of the two factors.
  entropy_1, entropy_2 = entropy * np.random.dirichlet([1, 1])

  # Need >= 2 for randprime to always work (Betrand's postulate).
  approx_1 = number.integer(entropy_1, signed=False, min_abs=2)
  approx_2 = number.integer(entropy_2, signed=False, min_abs=2)

  factor_1 = sympy.ntheory.generate.randprime(approx_1 / 2, approx_1 * 2)
  factor_2 = sympy.ntheory.generate.randprime(approx_2 / 2, approx_2 * 2)

  return factor_1 * factor_2 
Example 15
Project: ns3   Author: bvamanan   File: hud.py    GNU General Public License v2.0 6 votes vote down vote up
def _compute_divisions(self, xi, xf):
        assert xf > xi
        dx = xf - xi
        size = dx
        ndiv = 5
        text_width = dx/ndiv/2

        def rint(x):
            return math.floor(x+0.5)
        
        dx_over_ndiv = dx / ndiv
        for n in range(5): # iterate 5 times to find optimum division size
            #/* div: length of each division */
            tbe = math.log10(dx_over_ndiv)#;   /* looking for approx. 'ndiv' divisions in a length 'dx' */
            div = pow(10, rint(tbe))#;	/* div: power of 10 closest to dx/ndiv */
            if math.fabs(div/2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv): #/* test if div/2 is closer to dx/ndiv */
                div /= 2
            elif math.fabs(div*2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv):
                div *= 2 #			/* test if div*2 is closer to dx/ndiv */
            x0 = div*math.ceil(xi / div) - div
            if n > 1:
                ndiv = rint(size / text_width)
        return x0, div 
Example 16
Project: mptcp_with_machine_learning   Author: JamesRaynor67   File: hud.py    GNU General Public License v2.0 6 votes vote down vote up
def _compute_divisions(self, xi, xf):
        assert xf > xi
        dx = xf - xi
        size = dx
        ndiv = 5
        text_width = dx/ndiv/2

        def rint(x):
            return math.floor(x+0.5)
        
        dx_over_ndiv = dx / ndiv
        for n in range(5): # iterate 5 times to find optimum division size
            #/* div: length of each division */
            tbe = math.log10(dx_over_ndiv)#;   /* looking for approx. 'ndiv' divisions in a length 'dx' */
            div = pow(10, rint(tbe))#;	/* div: power of 10 closest to dx/ndiv */
            if math.fabs(div/2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv): #/* test if div/2 is closer to dx/ndiv */
                div /= 2
            elif math.fabs(div*2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv):
                div *= 2 #			/* test if div*2 is closer to dx/ndiv */
            x0 = div*math.ceil(xi / div) - div
            if n > 1:
                ndiv = rint(size / text_width)
        return x0, div 
Example 17
Project: Gurux.DLMS.Python   Author: Gurux   File: GXDLMSExtendedRegister.py    GNU General Public License v2.0 5 votes vote down vote up
def setValue(self, settings, e):
        #pylint: disable=broad-except
        if e.index == 1:
            self.logicalName = _GXCommon.toLogicalName(e.value)
        elif e.index == 2:
            if self.scaler != 0 and e.value:
                try:
                    if settings.isServer:
                        self.value = e.value
                    else:
                        self.value = e.value * math.log10(self.scaler)
                except Exception:
                    #  Sometimes scaler is set for wrong Object type.
                    self.value = e.value
            else:
                self.value = e.value
        elif e.index == 3:
            #  Set default values.
            if not e.value:
                self.scaler = 0
                self.unit = 0
            else:
                if not e.value:
                    self.scaler = 0
                    self.unit = 0
                else:
                    self.scaler = e.value[0]
                    self.unit = e.value[1]
        elif e.index == 4:
            self.status = e.value
        elif e.index == 5:
            if e.value is None:
                self.captureTime = GXDateTime()
            else:
                if isinstance(e.value, bytearray):
                    self.captureTime = _GXCommon.changeType(e.value, DataType.DATETIME)
                else:
                    self.captureTime = e.value
        else:
            e.error = ErrorCode.READ_WRITE_DENIED 
Example 18
Project: Gurux.DLMS.Python   Author: Gurux   File: GXDLMSRegister.py    GNU General Public License v2.0 5 votes vote down vote up
def setValue(self, settings, e):
        if e.index == 1:
            self.logicalName = _GXCommon.toLogicalName(e.value)
        elif e.index == 2:
            if self.scaler != 0 and e.value:
                try:
                    if settings.isServer:
                        self.value = e.value
                    else:
                        self.value = e.value * math.log10(self.scaler)
                except Exception:
                    #  Sometimes scaler is set for wrong Object type.
                    self.value = e.value
            else:
                self.value = e.value
        elif e.index == 3:
            #  Set default values.
            if not e.value:
                self.scaler = 0
                self.unit = 0
            else:
                if not e.value:
                    self.scaler = 0
                    self.unit = 0
                else:
                    self.scaler = e.value[0]
                    self.unit = e.value[1]
        else:
            e.error = ErrorCode.READ_WRITE_DENIED 
Example 19
Project: AboveTustin   Author: kevinabrandon   File: flightdata.py    MIT License 5 votes vote down vote up
def _parse_aircraft_data(self, a, time):
        alt = a.get('Alt', 0)
        dist = -1
        az = 0
        el = 0
        if 'Lat' in a and 'Long' in a:
            rec_pos = (receiver_latitude, receiver_longitude)
            ac_pos = (a['Lat'], a['Long'])
            dist = geomath.distance(rec_pos, ac_pos)
            az = geomath.bearing(rec_pos, ac_pos)
            el = math.degrees(math.atan(alt / (dist * 5280)))
        speed = 0
        if 'Spd' in a:
            speed = geomath.knot2mph(a['Spd'])
        if 'PosTime' in a:
            last_seen_time = datetime.fromtimestamp(a['PosTime'] / 1000.0)
            seen = (time - last_seen_time).total_seconds()
        else:
            seen = 0
        ac_data = AirCraftData(
            a.get('Icao', None).upper(),
            a.get('Sqk', None),
            a.get('Call', None),
            a.get('Reg', None),
            a.get('Lat', None),
            a.get('Long', None),
            alt,
            a.get('Vsi', 0),
            a.get('Trak', None),
            speed,
            a.get('CMsgs', None),
            seen,
            a.get('Mlat', False),
            None,  # NUCP
            None,  # Seen pos
            10.0 * math.log10(a.get('Sig', 0) / 255.0 + 1e-5),
            dist,
            az,
            el,
            time)
        return ac_data 
Example 20
Project: phrydy   Author: Josef-Friedrich   File: mediafile.py    MIT License 5 votes vote down vote up
def _sc_decode(soundcheck):
    """Convert a Sound Check bytestring value to a (gain, peak) tuple as
    used by ReplayGain.
    """
    # We decode binary data. If one of the formats gives us a text
    # string, interpret it as UTF-8.
    if isinstance(soundcheck, six.text_type):
        soundcheck = soundcheck.encode('utf-8')

    # SoundCheck tags consist of 10 numbers, each represented by 8
    # characters of ASCII hex preceded by a space.
    try:
        soundcheck = codecs.decode(soundcheck.replace(b' ', b''), 'hex')
        soundcheck = struct.unpack('!iiiiiiiiii', soundcheck)
    except (struct.error, TypeError, binascii.Error):
        # SoundCheck isn't in the format we expect, so return default
        # values.
        return 0.0, 0.0

    # SoundCheck stores absolute calculated/measured RMS value in an
    # unknown unit. We need to find the ratio of this measurement
    # compared to a reference value of 1000 to get our gain in dB. We
    # play it safe by using the larger of the two values (i.e., the most
    # attenuation).
    maxgain = max(soundcheck[:2])
    if maxgain > 0:
        gain = math.log10(maxgain / 1000.0) * -10
    else:
        # Invalid gain value found.
        gain = 0.0

    # SoundCheck stores peak values as the actual value of the sample,
    # and again separately for the left and right channels. We need to
    # convert this to a percentage of full scale, which is 32768 for a
    # 16 bit sample. Once again, we play it safe by using the larger of
    # the two values.
    peak = max(soundcheck[6:8]) / 32768.0

    return round(gain, 2), round(peak, 6) 
Example 21
Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def testLog10(self):
        self.assertRaises(TypeError, math.log10)
        self.ftest('log10(0.1)', math.log10(0.1), -1)
        self.ftest('log10(1)', math.log10(1), 0)
        self.ftest('log10(10)', math.log10(10), 1)
        self.assertEqual(math.log(INF), INF)
        self.assertRaises(ValueError, math.log10, NINF)
        self.assertTrue(math.isnan(math.log10(NAN)))
        # Log values should match for int and long (issue #18739).
        for n in range(1, 1000):
            self.assertEqual(math.log10(n), math.log10(long(n))) 
Example 22
Project: PersonalRecommendation   Author: ma-zhiyuan   File: item_cf.py    Apache License 2.0 5 votes vote down vote up
def update_one_contribute_score(user_total_click_num):
    """
    item cf update sim contribution score by user
    """
    return 1/math.log10(1+user_total_click_num) 
Example 23
Project: PersonalRecommendation   Author: ma-zhiyuan   File: user_cf.py    Apache License 2.0 5 votes vote down vote up
def update_contribution_score(item_user_click_count):
    """
    usercf user contribution score update v1
    Args:
        item_user_click_count: how many user have clicked this item
    Return:
        contribution score
    """
    return 1/math.log10(1 + item_user_click_count) 
Example 24
Project: python.math.expression.parser.pymep   Author: sbesada   File: complex.py    Apache License 2.0 5 votes vote down vote up
def __log10__(self):

        rpart = math.sqrt((self.real * self.real) + (self.imag * self.imag))
        ipart = math.atan2(self.imag,self.real)
        if ipart > math.pi:
            ipart = ipart - (2.0 * math.pi)
        
        return Complex(math.log10(rpart), (1 /math.log(10)) * ipart) 
Example 25
Project: audio   Author: pytorch   File: transforms.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, stype='power', top_db=None):
        super(AmplitudeToDB, self).__init__()
        self.stype = stype
        if top_db is not None and top_db < 0:
            raise ValueError('top_db must be positive value')
        self.top_db = torch.jit.Attribute(top_db, Optional[float])
        self.multiplier = 10.0 if stype == 'power' else 20.0
        self.amin = 1e-10
        self.ref_value = 1.0
        self.db_multiplier = math.log10(max(self.amin, self.ref_value)) 
Example 26
Project: audio   Author: pytorch   File: functional.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def amplitude_to_DB(x, multiplier, amin, db_multiplier, top_db=None):
    # type: (Tensor, float, float, float, Optional[float]) -> Tensor
    r"""Turn a tensor from the power/amplitude scale to the decibel scale.

    This output depends on the maximum value in the input tensor, and so
    may return different values for an audio clip split into snippets vs. a
    a full clip.

    Args:
        x (torch.Tensor): Input tensor before being converted to decibel scale
        multiplier (float): Use 10. for power and 20. for amplitude
        amin (float): Number to clamp ``x``
        db_multiplier (float): Log10(max(reference value and amin))
        top_db (Optional[float]): Minimum negative cut-off in decibels. A reasonable number
            is 80. (Default: ``None``)

    Returns:
        torch.Tensor: Output tensor in decibel scale
    """
    x_db = multiplier * torch.log10(torch.clamp(x, min=amin))
    x_db -= multiplier * db_multiplier

    if top_db is not None:
        x_db = x_db.clamp(min=x_db.max().item() - top_db)

    return x_db 
Example 27
Project: audio   Author: pytorch   File: functional.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def create_fb_matrix(n_freqs, f_min, f_max, n_mels, sample_rate):
    # type: (int, float, float, int, int) -> Tensor
    r"""Create a frequency bin conversion matrix.

    Args:
        n_freqs (int): Number of frequencies to highlight/apply
        f_min (float): Minimum frequency (Hz)
        f_max (float): Maximum frequency (Hz)
        n_mels (int): Number of mel filterbanks
        sample_rate (int): Sample rate of the audio waveform

    Returns:
        torch.Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
        meaning number of frequencies to highlight/apply to x the number of filterbanks.
        Each column is a filterbank so that assuming there is a matrix A of
        size (..., ``n_freqs``), the applied result would be
        ``A * create_fb_matrix(A.size(-1), ...)``.
    """
    # freq bins
    # Equivalent filterbank construction by Librosa
    all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
    i_freqs = all_freqs.ge(f_min) & all_freqs.le(f_max)
    freqs = all_freqs[i_freqs]

    # calculate mel freq bins
    # hertz to mel(f) is 2595. * math.log10(1. + (f / 700.))
    m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0))
    m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0))
    m_pts = torch.linspace(m_min, m_max, n_mels + 2)
    # mel to hertz(mel) is 700. * (10**(mel / 2595.) - 1.)
    f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0)
    # calculate the difference between each mel point and each stft freq point in hertz
    f_diff = f_pts[1:] - f_pts[:-1]  # (n_mels + 1)
    slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1)  # (n_freqs, n_mels + 2)
    # create overlapping triangles
    zero = torch.zeros(1)
    down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1]  # (n_freqs, n_mels)
    up_slopes = slopes[:, 2:] / f_diff[1:]  # (n_freqs, n_mels)
    fb = torch.max(zero, torch.min(down_slopes, up_slopes))
    return fb 
Example 28
Project: pymoku   Author: liquidinstruments   File: _frequency_response_analyzer_data.py    MIT License 5 votes vote down vote up
def __init__(self, input_signal, gain_correction,
                 front_end_scale, output_amp):

        # Extract the length of the signal (this varies with number of
        # sweep points)
        sig_len = len(gain_correction)

        # De-interleave IQ values
        self.i_sig, self.q_sig = zip(*zip(*[iter(input_signal)] * 2))
        self.i_sig = self.i_sig[:sig_len]
        self.q_sig = self.q_sig[:sig_len]

        # Calculates magnitude of a sample given I,Q and gain correction
        # factors
        def calculate_magnitude(I, Q, G, frontend_scale):
            if I is None or Q is None:
                return None
            else:
                return 2.0 * math.sqrt(
                    (I or 0)**2 + (Q or 0)**2) * front_end_scale / (G or 1)

        self.magnitude = [calculate_magnitude(I, Q, G, front_end_scale)
                          for I, Q, G in zip(self.i_sig,
                                             self.q_sig, gain_correction)]

        # Sometimes there's a transient condition at startup where we don't
        # have a valid output_amp. Return Nones in that case in preference to
        # exploding.
        self.magnitude_dB = [None if not x else
                             20.0 * math.log10(x / output_amp)
                             if output_amp else None for x in self.magnitude]

        self.phase = [None if (I is None or Q is None)
                      else (math.atan2(Q or 0, I or 0)) / (2.0 * math.pi)
                      for I, Q in zip(self.i_sig, self.q_sig)] 
Example 29
Project: draco   Author: radiocosmology   File: task.py    MIT License 5 votes vote down vote up
def __init__(self):

        from mpi4py import MPI
        import math

        logging.captureWarnings(True)

        rank_length = int(math.log10(MPI.COMM_WORLD.size)) + 1

        mpi_fmt = "[MPI %%(mpi_rank)%id/%%(mpi_size)%id]" % (rank_length, rank_length)
        filt = MPILogFilter(level_all=self.level_all, level_rank0=self.level_rank0)

        # This uses the fact that caput.pipeline.Manager has already
        # attempted to set up the logging. We just override the level, and
        # insert our custom filter
        root_logger = logging.getLogger()
        root_logger.setLevel(logging.DEBUG)
        ch = root_logger.handlers[0]
        ch.setLevel(logging.DEBUG)
        ch.addFilter(filt)

        formatter = logging.Formatter(
            "%(elapsedTime)8.1fs "
            + mpi_fmt
            + " - %(levelname)-8s %(name)s: %(message)s"
        )

        ch.setFormatter(formatter) 
Example 30
Project: razzy-spinner   Author: rafasashi   File: plot.py    GNU General Public License v3.0 5 votes vote down vote up
def config_axes(self, xlog, ylog):
        if hasattr(self, '_rng'):
            (i1, j1, i2, j2) = self.visible_area()
            zoomed=1
        else:
            zoomed=0
            
        self._xlog = xlog
        self._ylog = ylog
        if xlog: self._rng = [log10(x) for x in self._original_rng]
        else: self._rng = self._original_rng
        if ylog: self._vals = [log10(x) for x in self._original_vals]
        else: self._vals = self._original_vals
            
        self._imin = min(self._rng)
        self._imax = max(self._rng)
        if self._imax == self._imin:
            self._imin -= 1
            self._imax += 1
        self._jmin = min(self._vals)
        self._jmax = max(self._vals)
        if self._jmax == self._jmin:
            self._jmin -= 1
            self._jmax += 1

        if zoomed:
            self.zoom(i1, j1, i2, j2)
        else:
            self.zoom(self._imin, self._jmin, self._imax, self._jmax) 
Example 31
Project: metk   Author: PatWalters   File: metk_report.py    MIT License 5 votes vote down vote up
def metk_report(df_kcal):
    """
    Generate a report
    :param df_kcal: input dataframe, activity should be in kcal/mol
    :param outfile: output file for the report
    :return: the report as a list of strings
    """
    N = df_kcal.shape[0]
    pred = df_kcal['Pred']
    expr = df_kcal['Exp']
    rms_val = rmse(pred, expr)
    mae_val = mean_absolute_error(pred, expr)
    pearson_r, pearson_p = pearsonr(pred, expr)
    pearson_vals = [x ** 2 for x in [pearson_r] + list(pearson_confidence(pearson_r, N))]
    spearman_r, spearman_p = spearmanr(pred, expr)
    kendall_t, kendall_p = kendalltau(pred, expr)
    max_correlation = max_possible_correlation([log10(kcal_to_ki(x, "M")) for x in df_kcal['Exp']])
    report = []
    report.append("N = %d" % N)
    report.append("RMSE = %.2f kcal/mol" % rms_val)
    report.append("MAE  = %.2f kcal/mol" % mae_val)
    report.append("Max possible correlation = %.2f" % max_correlation)
    report.append("Pearson R^2 = %0.2f  95%%CI = %.2f %.2f" % tuple(pearson_vals))
    report.append("Spearman rho = %0.2f" % spearman_r)
    report.append("Kendall tau = %0.2f" % kendall_t)
    return report 
Example 32
Project: metk   Author: PatWalters   File: metk_plots.py    MIT License 5 votes vote down vote up
def add_ic50_error(df, bins=None):
    """
    Add columns to a dataframe showing absolute and binned err
    :param df: input dataframe
    :param bins: bins to use (currently <5 kcal, 5-10 kcal, >10 kcal
    :return:
    """
    if bins is None:
        bins = [5, 10]
    pt_color = ['green', 'yellow', 'red']
    df['Error'] = [10 ** x for x in np.abs(np.log10(df['Exp']) - np.log10(df['Pred']))]
    df['Error_Bin'] = [pt_color[x] for x in np.digitize(df['Error'], bins)] 
Example 33
Project: metk   Author: PatWalters   File: metk_plots.py    MIT License 5 votes vote down vote up
def ic50_plot(df, ax, axis_range=None, units="uM"):
    """
    Draw a scatterplot of experimental vs predicted IC50
    :param df: input dataframe
    :param ax: matplotlib axis
    :param axis_range: range for axes [minX, maxY, minY, maxY
    :param units: units for IC50 plot (currently uM or nM)
    :return: None
    """
    if axis_range is None:
        axis_range = np.array([0.001, 100, 0.0001, 100])
    if units == "nM":
        axis_range *= 1000
    min_x, max_x, min_y, max_y = axis_range
    add_ic50_error(df)

    ax.set(xscale="log", yscale="log")
    ax.axis(axis_range)
    ax.xaxis.set_major_formatter(
        ticker.FuncFormatter(lambda y, pos: ('{{:.{:1d}f}}'.format(int(np.maximum(-np.log10(y), 0)))).format(y)))
    ax.yaxis.set_major_formatter(
        ticker.FuncFormatter(lambda y, pos: ('{{:.{:1d}f}}'.format(int(np.maximum(-np.log10(y), 0)))).format(y)))
    ax.set_xlabel("Experimental IC50 (%s)" % units)
    ax.set_ylabel("Predicted IC50 (%s)" % units)
    ax.scatter(df['Exp'], df['Pred'], s=100, c=df['Error_Bin'], alpha=0.5, edgecolors="black")

    ax.plot([0, max_x], [0, max_y], linewidth=2, color='black')
    # 5 fold
    ax.plot([0, max_x], [0, max_y * 5], linewidth=1, color="blue", linestyle='--')
    ax.plot([0, max_x], [0, max_y / 5], linewidth=1, color="blue", linestyle='--')
    # 10 fold
    ax.plot([0, max_x], [0, max_y * 10], linewidth=1, color="black")
    ax.plot([0, max_x], [0, max_y / 10], linewidth=1, color="black") 
Example 34
Project: metk   Author: PatWalters   File: metk_plots.py    MIT License 5 votes vote down vote up
def draw_plots(df_kcal, pdf_file_name, units='uM'):
    """
    Draw scatter plots and histograms showing agreement between experimental and predicted activity
    :param df_kcal: input dataframe, data is in kcal/mol
    :param pdf_file_name: output file for plot
    :param units: units to use for the plots (currently uM or nM)
    :return:
    """
    add_kcal_error(df_kcal)
    f_kcal, ax_kcal = plt.subplots(2, figsize=(7, 7))
    ax_kcal[0].set_title("N = %d" % df_kcal.shape[0])
    
    minx = int( min(df_kcal["Exp"] ) - 1 )
    maxx = int( max(df_kcal["Exp"] ) + 1 )
    miny = int( min(df_kcal["Pred"]) - 1 )
    maxy = int( max(df_kcal["Pred"]) + 1 )
    
    kcal_plot(df_kcal, ax_kcal[0], axis_range=[minx, maxx, miny, maxy])
    kcal_histogram(df_kcal, ax_kcal[1])
    pdf_pages = PdfPages(pdf_file_name)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        plt.tight_layout()
    pdf_pages.savefig(f_kcal.get_figure())

    df_ic50 = kcal_to_ki_df(df_kcal, units)
    add_ic50_error(df_ic50)
    f_ic50, ax_ic50 = plt.subplots(2, figsize=(7, 7))
    
    minx = 10**( math.log10(min(df_ic50["Exp"] )) - 1 )
    maxx = 10**( math.log10(max(df_ic50["Exp"] )) + 1 )
    miny = 10**( math.log10(min(df_ic50["Pred"])) - 1 )
    maxy = 10**( math.log10(max(df_ic50["Pred"])) + 1 )
    
    ic50_plot(df_ic50, ax_ic50[0], axis_range=[minx, maxx, miny, maxy], units=units)
    ic50_histogram(df_ic50, ax_ic50[1])
    pdf_pages.savefig(f_ic50.get_figure())

    pdf_pages.close() 
Example 35
Project: kivy-smoothie-host   Author: wolfmanjm   File: __init__.py    GNU General Public License v3.0 5 votes vote down vote up
def to_data(self, x, y):
        '''Convert widget coords to data coords. Use
        `x, y = self.to_widget(x, y, relative=True)` to first convert into
        widget coordinates if it's in window coordinates because it's assumed
        to be given in local widget coordinates, relative to the graph's pos.

        :Parameters:
            `x, y`:
                The coordinates to convert.

        If the graph has multiple axes, use :class:`Plot.unproject` instead.
        '''
        adj_x = float(x - self._plot_area.pos[0])
        adj_y = float(y - self._plot_area.pos[1])
        norm_x = adj_x / self._plot_area.size[0]
        norm_y = adj_y / self._plot_area.size[1]
        if self.xlog:
            xmin, xmax = log10(self.xmin), log10(self.xmax)
            conv_x = 10.**(norm_x * (xmax - xmin) + xmin)
        else:
            conv_x = norm_x * (self.xmax - self.xmin) + self.xmin
        if self.ylog:
            ymin, ymax = log10(self.ymin), log10(self.ymax)
            conv_y = 10.**(norm_y * (ymax - ymin) + ymin)
        else:
            conv_y = norm_y * (self.ymax - self.ymin) + self.ymin
        return [conv_x, conv_y] 
Example 36
Project: kivy-smoothie-host   Author: wolfmanjm   File: __init__.py    GNU General Public License v3.0 5 votes vote down vote up
def funcx(self):
        """Return a function that convert or not the X value according to plot
        prameters"""
        return log10 if self.params["xlog"] else lambda x: x 
Example 37
Project: kivy-smoothie-host   Author: wolfmanjm   File: __init__.py    GNU General Public License v3.0 5 votes vote down vote up
def funcy(self):
        """Return a function that convert or not the Y value according to plot
        prameters"""
        return log10 if self.params["ylog"] else lambda y: y 
Example 38
Project: scality-sproxyd-client   Author: scality   File: afd.py    Apache License 2.0 5 votes vote down vote up
def phi(self):
        # if we don't have enough value to take a decision
        # assume the node is dead
        if self._mean is None:
            return self.threshold + 1
        ts = time.time()
        diff = ts - self._timestamp
        prob = self._probability(diff)
        if decimal.Decimal(str(prob)).is_zero():
            prob = 1E-128  # a very small number, avoiding ValueError: math domain error
        return -1 * math.log10(prob) 
Example 39
Project: robot-navigation   Author: ronaldahmed   File: plot.py    MIT License 5 votes vote down vote up
def config_axes(self, xlog, ylog):
        if hasattr(self, '_rng'):
            (i1, j1, i2, j2) = self.visible_area()
            zoomed=1
        else:
            zoomed=0
            
        self._xlog = xlog
        self._ylog = ylog
        if xlog: self._rng = [log10(x) for x in self._original_rng]
        else: self._rng = self._original_rng
        if ylog: self._vals = [log10(x) for x in self._original_vals]
        else: self._vals = self._original_vals
            
        self._imin = min(self._rng)
        self._imax = max(self._rng)
        if self._imax == self._imin:
            self._imin -= 1
            self._imax += 1
        self._jmin = min(self._vals)
        self._jmax = max(self._vals)
        if self._jmax == self._jmin:
            self._jmin -= 1
            self._jmax += 1

        if zoomed:
            self.zoom(i1, j1, i2, j2)
        else:
            self.zoom(self._imin, self._jmin, self._imax, self._jmax) 
Example 40
Project: tfidf   Author: tdstein   File: tfidf.py    MIT License 5 votes vote down vote up
def __idf__(self, term):
        """
        Inverse Document Frequency

        The inverse document frequency is a measure of how much information a term provides in relationship to a set of
        documents. The value is logarithmically scaled to give exponentially less weight to a term that is exponentially
        more informative.
        :param term: the term to calculate the inverse document frequency of.
        :return: the inverse document frequency of the term
        """

        # First check to see if we have already computed the IDF for this term
        if term in self.__idf_by_term:
            return self.__idf_by_term[term]

        # Count the frequency of each term
        freq_by_term = {}
        for document in self.documents:
            for term in set(document.text.split()):
                if term not in freq_by_term:
                    freq_by_term[term] = 1
                else:
                    freq_by_term[term] += 1

        # Calculate the Inverse Document Frequency of each term
        for term, freq in freq_by_term.iteritems():
            self.__idf_by_term[term] = 1 + math.log10(len(self.documents) / freq)

        return self.__idf_by_term[term] 
Example 41
Project: Jtyoui   Author: jtyoui   File: tfidf.py    MIT License 5 votes vote down vote up
def get_tf_idf(self, word):
        """传入一个词语,获得重要性"""
        self.word = word
        c, t = 0, 0
        for line in self.ls:
            num = line.count(self.word)
            if num:
                t += num / len(line)
                c += 1
        return t * math.log10(self.length / c) 
Example 42
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 5 votes vote down vote up
def _compute_offset(self):
        locs = self.locs
        if locs is None or not len(locs):
            self.offset = 0
            return
        # Restrict to visible ticks.
        vmin, vmax = sorted(self.axis.get_view_interval())
        locs = np.asarray(locs)
        locs = locs[(vmin <= locs) & (locs <= vmax)]
        if not len(locs):
            self.offset = 0
            return
        lmin, lmax = locs.min(), locs.max()
        # Only use offset if there are at least two ticks and every tick has
        # the same sign.
        if lmin == lmax or lmin <= 0 <= lmax:
            self.offset = 0
            return
        # min, max comparing absolute values (we want division to round towards
        # zero so we work on absolute values).
        abs_min, abs_max = sorted([abs(float(lmin)), abs(float(lmax))])
        sign = math.copysign(1, lmin)
        # What is the smallest power of ten such that abs_min and abs_max are
        # equal up to that precision?
        # Note: Internally using oom instead of 10 ** oom avoids some numerical
        # accuracy issues.
        oom_max = np.ceil(math.log10(abs_max))
        oom = 1 + next(oom for oom in itertools.count(oom_max, -1)
                       if abs_min // 10 ** oom != abs_max // 10 ** oom)
        if (abs_max - abs_min) / 10 ** oom <= 1e-2:
            # Handle the case of straddling a multiple of a large power of ten
            # (relative to the span).
            # What is the smallest power of ten such that abs_min and abs_max
            # are no more than 1 apart at that precision?
            oom = 1 + next(oom for oom in itertools.count(oom_max, -1)
                           if abs_max // 10 ** oom - abs_min // 10 ** oom > 1)
        # Only use offset if it saves at least _offset_threshold digits.
        n = self._offset_threshold - 1
        self.offset = (sign * (abs_max // 10 ** oom) * 10 ** oom
                       if abs_max // 10 ** oom >= 10**n
                       else 0) 
Example 43
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 5 votes vote down vote up
def _set_format(self, vmin, vmax):
        # set the format string to format all the ticklabels
        if len(self.locs) < 2:
            # Temporarily augment the locations with the axis end points.
            _locs = list(self.locs) + [vmin, vmax]
        else:
            _locs = self.locs
        locs = (np.asarray(_locs) - self.offset) / 10. ** self.orderOfMagnitude
        loc_range = np.ptp(locs)
        # Curvilinear coordinates can yield two identical points.
        if loc_range == 0:
            loc_range = np.max(np.abs(locs))
        # Both points might be zero.
        if loc_range == 0:
            loc_range = 1
        if len(self.locs) < 2:
            # We needed the end points only for the loc_range calculation.
            locs = locs[:-2]
        loc_range_oom = int(math.floor(math.log10(loc_range)))
        # first estimate:
        sigfigs = max(0, 3 - loc_range_oom)
        # refined estimate:
        thresh = 1e-3 * 10 ** loc_range_oom
        while sigfigs >= 0:
            if np.abs(locs - np.round(locs, decimals=sigfigs)).max() < thresh:
                sigfigs -= 1
            else:
                break
        sigfigs += 1
        self.format = '%1.' + str(sigfigs) + 'f'
        if self._usetex:
            self.format = '$%s$' % self.format
        elif self._useMathText:
            self.format = '$%s$' % _mathdefault(self.format) 
Example 44
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 5 votes vote down vote up
def __call__(self, x, pos=None):
        s = ''
        if 0.01 <= x <= 0.99:
            s = '{:.2f}'.format(x)
        elif x < 0.01:
            if is_decade(x):
                s = '$10^{{{:.0f}}}$'.format(np.log10(x))
            else:
                s = '${:.5f}$'.format(x)
        else:  # x > 0.99
            if is_decade(1-x):
                s = '$1-10^{{{:.0f}}}$'.format(np.log10(1-x))
            else:
                s = '$1-{:.5f}$'.format(1-x)
        return s 
Example 45
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 5 votes vote down vote up
def scale_range(vmin, vmax, n=1, threshold=100):
    dv = abs(vmax - vmin)  # > 0 as nonsingular is called before.
    meanv = (vmax + vmin) / 2
    if abs(meanv) / dv < threshold:
        offset = 0
    else:
        offset = math.copysign(10 ** (math.log10(abs(meanv)) // 1), meanv)
    scale = 10 ** (math.log10(dv / n) // 1)
    return scale, offset 
Example 46
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 5 votes vote down vote up
def tick_values(self, vmin, vmax):
        # dummy axis has no axes attribute
        if hasattr(self.axis, 'axes') and self.axis.axes.name == 'polar':
            raise NotImplementedError('Polar axis cannot be logit scaled yet')

        vmin, vmax = self.nonsingular(vmin, vmax)
        vmin = np.log10(vmin / (1 - vmin))
        vmax = np.log10(vmax / (1 - vmax))

        decade_min = np.floor(vmin)
        decade_max = np.ceil(vmax)

        # major ticks
        if not self.minor:
            ticklocs = []
            if (decade_min <= -1):
                expo = np.arange(decade_min, min(0, decade_max + 1))
                ticklocs.extend(list(10**expo))
            if (decade_min <= 0) and (decade_max >= 0):
                ticklocs.append(0.5)
            if (decade_max >= 1):
                expo = -np.arange(max(1, decade_min), decade_max + 1)
                ticklocs.extend(list(1 - 10**expo))

        # minor ticks
        else:
            ticklocs = []
            if (decade_min <= -2):
                expo = np.arange(decade_min, min(-1, decade_max))
                newticks = np.outer(np.arange(2, 10), 10**expo).ravel()
                ticklocs.extend(list(newticks))
            if (decade_min <= 0) and (decade_max >= 0):
                ticklocs.extend([0.2, 0.3, 0.4, 0.6, 0.7, 0.8])
            if (decade_max >= 2):
                expo = -np.arange(max(2, decade_min), decade_max + 1)
                newticks = 1 - np.outer(np.arange(2, 10), 10**expo).ravel()
                ticklocs.extend(list(newticks))

        return self.raise_if_exceeds(np.array(ticklocs)) 
Example 47
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 5 votes vote down vote up
def get_locator(self, d):
        'pick the best locator based on a distance'
        d = abs(d)
        if d <= 0:
            locator = MultipleLocator(0.2)
        else:

            try:
                ld = math.log10(d)
            except OverflowError:
                raise RuntimeError('AutoLocator illegal data interval range')

            fld = math.floor(ld)
            base = 10 ** fld

            #if ld==fld:  base = 10**(fld-1)
            #else:        base = 10**fld

            if d >= 5 * base:
                ticksize = base
            elif d >= 2 * base:
                ticksize = base / 2.0
            else:
                ticksize = base / 5.0
            locator = MultipleLocator(ticksize)

        return locator 
Example 48
Project: LaserTOF   Author: kyleuckert   File: _base.py    MIT License 5 votes vote down vote up
def get_data_ratio_log(self):
        """
        Returns the aspect ratio of the raw data in log scale.
        Will be used when both axis scales are in log.
        """
        xmin, xmax = self.get_xbound()
        ymin, ymax = self.get_ybound()

        xsize = max(math.fabs(math.log10(xmax) - math.log10(xmin)), 1e-30)
        ysize = max(math.fabs(math.log10(ymax) - math.log10(ymin)), 1e-30)

        return ysize / xsize 
Example 49
Project: mapreduce_python   Author: laertispappas   File: tfidf-serial.py    Apache License 2.0 5 votes vote down vote up
def tfidf(word_counts, total_number_of_documents):
    for word_and_doc in word_counts:
        n_terms_appear = int(word_counts[word_and_doc])
        terms_in_doc = int(number_of_terms_in_document[word_and_doc.split('#')[1]])
        words_tfidf[word_and_doc] = [float(n_terms_appear) / float(terms_in_doc)]
        word = word_and_doc.split('#')[0]
        words_tfidf[word_and_doc].append(log10(float(total_number_of_documents) / float(number_of_docs_with_term[word])))
        words_tfidf[word_and_doc].append(words_tfidf[word_and_doc][0] * words_tfidf[word_and_doc][1]) 
Example 50
Project: OpenBench   Author: AndyGrant   File: utils.py    GNU General Public License v3.0 5 votes vote down vote up
def proba_to_bayeselo(pwin, pdraw, ploss):
    elo     = 200 * math.log10(pwin/ploss * (1-ploss)/(1-pwin))
    drawelo = 200 * math.log10((1-ploss)/ploss * (1-pwin)/pwin)
    return elo, drawelo 
Example 51
Project: NiujiaoDebugger   Author: MrSrc   File: test_math.py    GNU General Public License v3.0 5 votes vote down vote up
def testLog10(self):
        self.assertRaises(TypeError, math.log10)
        self.ftest('log10(0.1)', math.log10(0.1), -1)
        self.ftest('log10(1)', math.log10(1), 0)
        self.ftest('log10(10)', math.log10(10), 1)
        self.ftest('log10(10**1000)', math.log10(10**1000), 1000.0)
        self.assertRaises(ValueError, math.log10, -1.5)
        self.assertRaises(ValueError, math.log10, -10**1000)
        self.assertRaises(ValueError, math.log10, NINF)
        self.assertEqual(math.log(INF), INF)
        self.assertTrue(math.isnan(math.log10(NAN))) 
Example 52
Project: 0xbtc-discord-price-bot   Author: 0x1d00ffff   File: formatting_helpers.py    MIT License 5 votes vote down vote up
def round_to_n_decimals(x, n=1):
    from math import log10, floor
    assert n >= 1
    return round(x, -int(floor(log10(abs(x))))+n-1) 
Example 53
Project: gymwipe   Author: bjoluc   File: physical.py    GNU General Public License v3.0 5 votes vote down vote up
def wattsToDbm(watts: float):
    """
    Converts a watt value to a dBm value.

    Args:
        watts: The watt value to be converted
    """
    return 10 * log10(watts) + 30 
Example 54
Project: gymwipe   Author: bjoluc   File: physical.py    GNU General Public License v3.0 5 votes vote down vote up
def milliwattsToDbm(milliwatts: float):
    """
    Converts a milliwatt value to a dBm value.

    Args:
        watts: The milliwatt value to be converted
    """
    return 10 * log10(milliwatts) 
Example 55
Project: gymwipe   Author: bjoluc   File: attenuation_models.py    GNU General Public License v3.0 5 votes vote down vote up
def _update(self):
        # https://en.wikipedia.org/wiki/Free-space_path_loss#Free-space_path_loss_in_decibels
        a = self.devices[0].position
        b = self.devices[1].position
        if a == b:
            logger.warning("%s: Source and destination position are equivalent.", self)
            return 0
        attenuation = 20*log10(a.distanceTo(b)) + 20*log10(self.frequencyBandSpec.frequency) - 147.55
        self._setAttenuation(attenuation) 
Example 56
Project: geoscience   Author: rolandhill   File: Utils.py    GNU General Public License v3.0 5 votes vote down vote up
def gridInterval(request):
    intervals = [1, 2, 2.5, 4, 5]
    logIntervals = []
    for f in intervals:
        logIntervals.append(math.log10(f))
    
    logReq = math.log10(request)
    iremain = math.floor(logReq)
    remain = logReq - iremain
    
    for i in range(len(intervals) - 1, -1, -1):
        if logIntervals[i] < remain:
            res = 10.0 ** (float(iremain) + logIntervals[i])
            return res 
Example 57
Project: MetroloPy   Author: nrc-cnrc   File: gummy.py    GNU General Public License v3.0 5 votes vote down vote up
def _lg10(x):
    if mp is not None and isinstance(x,mp.mpf):
        return mp.log10(x)
    try:
        return x.log10() # in case x is a decimal.Decimal
    except:
        try:
            return log10(x)
        except:
            return log10(float(x)) # in case x is a fraction.Fraction 
Example 58
Project: mathematics_dataset   Author: deepmind   File: numbers.py    Apache License 2.0 5 votes vote down vote up
def base_conversion(min_entropy, max_entropy):
  """E.g., "What is 17 base 8 in base 10?"."""
  context = composition.Context()

  from_base = random.randint(2, 16)
  while True:
    to_base = random.randint(2, 16)
    if to_base != from_base:
      break

  # Entropy used up in selecting bases.
  entropy_used = math.log10(16 * 15)
  entropy = random.uniform(
      min_entropy - entropy_used, max_entropy - entropy_used)

  value = number.integer(entropy, signed=True)
  template = random.choice([
      '{from_str} (base {from_base}) to base {to_base}',
      'Convert {from_str} (base {from_base}) to base {to_base}.',
      'What is {from_str} (base {from_base}) in base {to_base}?',
  ])
  return example.Problem(
      question=example.question(
          context, template,
          from_str=display.NumberInBase(value, from_base),
          from_base=from_base,
          to_base=to_base),
      answer=display.NumberInBase(value, to_base)) 
Example 59
Project: mathematics_dataset   Author: deepmind   File: arithmetic.py    Apache License 2.0 5 votes vote down vote up
def _entropy_of_factor_split(integer):
  """Returns entropy (log base 10) of decomposing: integer = a * b."""
  assert integer.is_Integer
  if integer == 0:
    return 0
  # Gives dict of form {factor: multiplicity}
  factors = sympy.factorint(integer)
  return sum(math.log10(mult + 1) for mult in six.itervalues(factors)) 
Example 60
Project: mathematics_dataset   Author: deepmind   File: arithmetic.py    Apache License 2.0 5 votes vote down vote up
def arithmetic(value, entropy, length=None, add_sub=True, mul_div=True):
  """Generates an arithmetic expression with a given value.

  Args:
    value: Target value (integer or rational).
    entropy: Amount of randomness to use in generating expression.
    length: Number of ops to use. If `None` then suitable length will be picked
        based on entropy by sampling within the range
        `length_range_for_entropy`.
    add_sub: Whether to include addition and subtraction operations.
    mul_div: Whether to include multiplication and division operations.

  Returns:
    Instance of `ops.Op` containing expression.
  """
  assert isinstance(entropy, float)
  if length is None:
    min_length, max_length = length_range_for_entropy(entropy)
    length = random.randint(min_length, max_length)
    # Some entropy used up in sampling the length.
    entropy -= math.log10(max_length - min_length + 1)
  else:
    assert isinstance(length, int)

  # Entropy adjustment, because different binary trees (from sampling ops) can
  # lead to the same expression. This is the correct value when we use just
  # addition as the op, and is otherwise an an upper bound.
  entropy += combinatorics.log_number_binary_trees(length) / math.log(10)

  value = sympy.sympify(value)
  sample_args = _SampleArgs(length, entropy)
  return _arithmetic(value, sample_args, add_sub, mul_div) 
Example 61
Project: mathematics_dataset   Author: deepmind   File: number.py    Apache License 2.0 5 votes vote down vote up
def entropy_of_value(value):
  """Returns "min entropy" that would give probability of getting this value."""
  if isinstance(value, display.Decimal):
    return entropy_of_value(sympy.numer(value))

  if is_non_integer_rational(value):
    numer = sympy.numer(value)
    denom = sympy.denom(value)
    return entropy_of_value(numer) + entropy_of_value(denom)
  elif not is_integer(value):
    raise ValueError('Unhandled value: {}'.format(value))

  # Note: we sample integers in a range of size approx 10**entropy about zero,
  # so assume that `abs(value)` is about half of the upper range.
  return math.log10(5 * abs(value) + 1) 
Example 62
Project: mathematics_dataset   Author: deepmind   File: polynomials.py    Apache License 2.0 5 votes vote down vote up
def sample_with_small_evaluation(variable, degree, max_abs_input, entropy):
  """Generates a (canonically ordered) polynomial, with bounded evaluation.

  The coefficients are chosen to make use of the entropy, with the scaling
  adjusted so that all give roughly the same contribution to the output of the
  polynomial when the input is bounded in magnitude by `max_abs_input`.

  Args:
    variable: Variable to use in polynomial.
    degree: Degree of polynomial.
    max_abs_input: Number >= 1; max absolute value of input.
    entropy: Float; randomness for generating polynomial.

  Returns:
    Instance of `ops.Add`.
  """
  assert max_abs_input >= 1
  entropies = entropy * np.random.dirichlet(np.ones(degree + 1))
  coeffs = []

  for power in range(degree + 1):
    # This scaling guarantees that the terms give roughly equal contribution
    # to the typical magnitude of the polynomial when |input| <= max_abs_input.
    delta = 0.5 * (degree - 2 * power) * math.log10(max_abs_input)
    power_entropy = entropies[power] + delta
    min_abs = 1 if power == degree else 0
    coeff = number.integer(power_entropy, signed=True, min_abs=min_abs)
    coeffs.append(coeff)

  terms = [monomial(coeff, variable, power)
           for power, coeff in enumerate(coeffs)]
  return ops.Add(*terms) 
Example 63
Project: Lexpage   Author: AlexandreDecan   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def get_tags_list(self, by_number=False, relative=True):
        """
        Return a list of every tag that occured in at least one post.
        This list is composed of couples (tag_name, number_of_posts).

        :param by_number: If True, the list is sorted by the number of posts instead of by names
        :param relative: If True, the number of posts will be a percentage wrt.
        biggest value occuring in the list.
        :return: A list of (tag_name, number_of_posts)
        """

        if by_number:
            sort_func = lambda x: -x[1]
        else:
            sort_func = lambda x: x[0]

        posts = self.all()
        count = {}

        for post in posts:
            for tag in post.tags_list():
                count[tag] = count.setdefault(tag, 0) + 1

        if relative:
            max_value = max(count.values())
            count = {k: math.log10(100 * v / max_value) for k, v in count.items()}

        return sorted(count.items(), key=sort_func) 
Example 64
Project: seamseg   Author: mapillary   File: logging.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _current_total_formatter(current, total):
    width = int(log10(total)) + 1
    return ("[{:" + str(width) + "}/{:" + str(width) + "}]").format(current, total) 
Example 65
Project: mkipp   Author: orlox   File: mkipp.py    GNU General Public License v2.0 5 votes vote down vote up
def default_extractor(identifier, log10_on_data, prof, return_data_columns = False):
    if return_data_columns:
        return [identifier]
    if log10_on_data:
        return np.log10(abs(prof.get(identifier))+1e-99)
    else:
        return prof.get(identifier)

#properties of the plotter 
Example 66
Project: vo_single_camera_sos   Author: ubuntuslave   File: pose_est_tools.py    GNU General Public License v3.0 5 votes vote down vote up
def pose_relative_ransac_2D_to_2D(bearing_vectors1, bearing_vectors2, model_error_threshold = 0.001, rel_pose_est_algorithm = "STEWENIUS", outlier_fraction_known = 0.50):
    '''
    @param model_error_threshold: Defines the model's error threshold for the pose fitting model, which in this context, it's about twice the angle projection error because we reproject the triangulated 3D points to each view. In other words, the relative RANSAC pose estimation model computes the angular distance between the bearing (back projection) and forward projection angles. We do this for each view, so we they get added up as total measure of fitness quality.
    '''
    threshold = model_error_threshold
    n_points_for_model = -1
    w = 1.0 - outlier_fraction_known  # w = number of inliers in data / number of points in data
    # The probability that the RANSAC algorithm in some iteration selects only inliers from the input data set
    # when it chooses the n points from which the model parameters are estimated:
    # The number of iterations, N, is chosen high enough to ensure that the probability
    # (usually set to 0.99) that at least one of the sets of random samples does not include an outlier.
    desired_prob_only_inlier_selection = 0.99
    if rel_pose_est_algorithm == "NISTER" or rel_pose_est_algorithm == "STEWENIUS":
        n_points_for_model = 5
    elif rel_pose_est_algorithm == "SEVENPT":
        n_points_for_model = 7
    elif rel_pose_est_algorithm == "EIGHTPT":
        n_points_for_model = 8
    num_of_iters = log10(1.0 - desired_prob_only_inlier_selection) / log10(1.0 - w ** n_points_for_model)
    std_of_k = sqrt(1.0 - w ** n_points_for_model) / (w ** n_points_for_model)
    # Add the std. dev in order to gain additional confidence
#     max_iterations = 1000
    max_iterations = int(num_of_iters + 3 * std_of_k)

    ransac_transformation, indices_inliers = pyopengv.relative_pose_ransac(bearing_vectors1[..., :3], bearing_vectors2[..., :3], rel_pose_est_algorithm, threshold, max_iterations)
    # The following is not longer necessary because I modified "pyopengv" to also return the inlier indices
    #===========================================================================
    # ransac_transformation = pyopengv.relative_pose_ransac(bearing_vectors1[..., :3], bearing_vectors2[..., :3], rel_pose_est_algorithm, threshold, max_iterations)
    # indices_all = np.arange(len(bearing_vectors1))
    # # A final selection of inlier correspondences would be:
    # indices_inliers, indices_outliers = select_inliers_within_distance(model_coefficients=ransac_transformation, indices_all=indices_all, threshold=threshold, bearing_vectors1=bearing_vectors1[..., :3], bearing_vectors2=bearing_vectors2[..., :3], is_relative_2D_to_2D_case=True)
    #===========================================================================

    ransac_transformation_homo = np.identity(4)
    ransac_transformation_homo[:3] = ransac_transformation

    return ransac_transformation_homo, indices_inliers 
Example 67
Project: vo_single_camera_sos   Author: ubuntuslave   File: pose_est_tools.py    GNU General Public License v3.0 5 votes vote down vote up
def pose_absolute_ransac_3D_to_2D(bearing_vectors, points3D, model_error_threshold = 0.001, pose_est_algorithm = "EPNP", outlier_fraction_known = 0.50, max_iterations = -1):
    '''
    @param model_error_threshold: Defines the model's error threshold for the pose fitting model, which in this context, it's about twice the angle projection error because we reproject the triangulated 3D points to each view. In other words, the relative RANSAC pose estimation model computes the angular distance between the bearing (back projection) and forward projection angles. We do this for each view, so we they get added up as total measure of fitness quality.
    @param pose_est_algorithm: Absolute pose estimation implemented algorithms are: "TWOPT", "KNEIP", "GAO", "EPNP" and "GP3P"
    @param max_iterations: When the number of iteration is -1, this number will be computed online
    '''
    threshold = model_error_threshold
    if max_iterations < 0:
        n_points_for_model = -1
        w = 1.0 - outlier_fraction_known  # w = number of inliers in data / number of points in data
        # The probability that the RANSAC algorithm in some iteration selects only inliers from the input data set
        # when it chooses the n points from which the model parameters are estimated:
        # The number of iterations, N, is chosen high enough to ensure that the probability
        # (usually set to 0.99) that at least one of the sets of random samples does not include an outlier.
        desired_prob_only_inlier_selection = 0.99
        if pose_est_algorithm == "TWOPT":
            n_points_for_model = 2
        else:
            n_points_for_model = 3
        num_of_iters = log10(1.0 - desired_prob_only_inlier_selection) / log10(1.0 - w ** n_points_for_model)
        std_of_k = sqrt(1.0 - w ** n_points_for_model) / (w ** n_points_for_model)
        # Add the std. dev in order to gain additional confidence
        max_iterations = int(num_of_iters + 3 * std_of_k)

    ransac_transformation, indices_inliers = absolute_pose_ransac(bearing_vectors[..., :3], points3D[..., :3], pose_est_algorithm, threshold, max_iterations)
    # The following is not longer necessary because I modified "pyopengv" to also return the inlier indices
    #===========================================================================
    # ransac_transformation = absolute_pose_ransac(bearing_vectors[..., :3], points3D[..., :3], pose_est_algorithm, threshold, max_iterations)
    # indices_all = np.arange(len(bearing_vectors))
    # # A final selection of inlier correspondences would be:
    # indices_inliers_test, indices_outliers = select_inliers_within_distance(model_coefficients=ransac_transformation, indices_all=indices_all, threshold=threshold, bearing_vectors1=points3D[..., :3], bearing_vectors2=bearing_vectors[..., :3], is_relative_2D_to_2D_case=False)
    #===========================================================================

    ransac_transformation_homo = np.identity(4)
    ransac_transformation_homo[:3] = ransac_transformation

    return ransac_transformation_homo, indices_inliers

# Get the set of inliers that correspond to the best model found so far 
Example 68
Project: vo_single_camera_sos   Author: ubuntuslave   File: pose_est_tools.py    GNU General Public License v3.0 5 votes vote down vote up
def compute_num_of_iterations_RANSAC(self, n_points_for_model, correspondences_outliers_fraction):
        w = 1.0 - correspondences_outliers_fraction  # w = number of inliers in data / number of points in data
        # The probability that the RANSAC algorithm in some iteration selects only inliers from the input data set
        # when it chooses the n points from which the model parameters are estimated:
        # The number of iterations, N, is chosen high enough to ensure that the probability
        # (usually set to 0.99) that at least one of the sets of random samples does not include an outlier.
        desired_prob_only_inlier_selection = 0.998
        num_of_iters = log10(1.0 - desired_prob_only_inlier_selection) / log10(1.0 - w ** n_points_for_model)
        std_of_k = sqrt(1.0 - w ** n_points_for_model) / (w ** n_points_for_model)
        # Add the std. dev in order to gain additional confidence
        max_ransac_iterations = int(num_of_iters + 3 * std_of_k)
        return max_ransac_iterations 
Example 69
Project: FX-RER-Value-Extraction   Author: tsKenneth   File: ticker.py    MIT License 5 votes vote down vote up
def _compute_offset(self):
        locs = self.locs
        # Restrict to visible ticks.
        vmin, vmax = sorted(self.axis.get_view_interval())
        locs = np.asarray(locs)
        locs = locs[(vmin <= locs) & (locs <= vmax)]
        if not len(locs):
            self.offset = 0
            return
        lmin, lmax = locs.min(), locs.max()
        # Only use offset if there are at least two ticks and every tick has
        # the same sign.
        if lmin == lmax or lmin <= 0 <= lmax:
            self.offset = 0
            return
        # min, max comparing absolute values (we want division to round towards
        # zero so we work on absolute values).
        abs_min, abs_max = sorted([abs(float(lmin)), abs(float(lmax))])
        sign = math.copysign(1, lmin)
        # What is the smallest power of ten such that abs_min and abs_max are
        # equal up to that precision?
        # Note: Internally using oom instead of 10 ** oom avoids some numerical
        # accuracy issues.
        oom_max = np.ceil(math.log10(abs_max))
        oom = 1 + next(oom for oom in itertools.count(oom_max, -1)
                       if abs_min // 10 ** oom != abs_max // 10 ** oom)
        if (abs_max - abs_min) / 10 ** oom <= 1e-2:
            # Handle the case of straddling a multiple of a large power of ten
            # (relative to the span).
            # What is the smallest power of ten such that abs_min and abs_max
            # are no more than 1 apart at that precision?
            oom = 1 + next(oom for oom in itertools.count(oom_max, -1)
                           if abs_max // 10 ** oom - abs_min // 10 ** oom > 1)
        # Only use offset if it saves at least _offset_threshold digits.
        n = self._offset_threshold - 1
        self.offset = (sign * (abs_max // 10 ** oom) * 10 ** oom
                       if abs_max // 10 ** oom >= 10**n
                       else 0) 
Example 70
Project: FX-RER-Value-Extraction   Author: tsKenneth   File: ticker.py    MIT License 5 votes vote down vote up
def _set_order_of_magnitude(self):
        # if scientific notation is to be used, find the appropriate exponent
        # if using an numerical offset, find the exponent after applying the
        # offset. When lower power limit = upper <> 0, use provided exponent.
        if not self._scientific:
            self.orderOfMagnitude = 0
            return
        if self._powerlimits[0] == self._powerlimits[1] != 0:
            # fixed scaling when lower power limit = upper <> 0.
            self.orderOfMagnitude = self._powerlimits[0]
            return
        # restrict to visible ticks
        vmin, vmax = sorted(self.axis.get_view_interval())
        locs = np.asarray(self.locs)
        locs = locs[(vmin <= locs) & (locs <= vmax)]
        locs = np.abs(locs)
        if not len(locs):
            self.orderOfMagnitude = 0
            return
        if self.offset:
            oom = math.floor(math.log10(vmax - vmin))
        else:
            if locs[0] > locs[-1]:
                val = locs[0]
            else:
                val = locs[-1]
            if val == 0:
                oom = 0
            else:
                oom = math.floor(math.log10(val))
        if oom <= self._powerlimits[0]:
            self.orderOfMagnitude = oom
        elif oom >= self._powerlimits[1]:
            self.orderOfMagnitude = oom
        else:
            self.orderOfMagnitude = 0 
Example 71
Project: rate.sx   Author: chubin   File: to_precision.py    MIT License 4 votes vote down vote up
def to_precision(x,p):
    """
    returns a string representation of x formatted with a precision of p

    Based on the webkit javascript implementation taken from here:
    https://code.google.com/p/webkit-mirror/source/browse/JavaScriptCore/kjs/number_object.cpp
    """

    x = float(x)

    if x == 0.:
        return "0." + "0"*(p-1)

    out = []

    if x < 0:
        out.append("-")
        x = -x

    e = int(math.log10(x))
    tens = math.pow(10, e - p + 1)
    n = math.floor(x/tens)

    if n < math.pow(10, p - 1):
        e = e -1
        tens = math.pow(10, e - p+1)
        n = math.floor(x / tens)

    if abs((n + 1.) * tens - x) <= abs(n * tens -x):
        n = n + 1

    if n >= math.pow(10,p):
        n = n / 10.
        e = e + 1

    m = "%.*g" % (p, n)

    if e < -2 or e >= p:
        out.append(m[0])
        if p > 1:
            out.append(".")
            out.extend(m[1:p])
        out.append('e')
        if e > 0:
            out.append("+")
        out.append(str(e))
    elif e == (p -1):
        out.append(m)
    elif e >= 0:
        out.append(m[:e+1])
        if e+1 < len(m):
            out.append(".")
            out.extend(m[e+1:])
    else:
        out.append("0.")
        out.extend(["0"]*-(e+1))
        out.append(m)

    return "".join(out) 
Example 72
Project: GroundedTranslation   Author: elliottd   File: sweep.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def random_sweep(self):
        '''
        Start randomly sweeping through hyperparameter ranges.

        This current only supports sweeping through the L2 regularisation
        strength, the learning rate, and the dropout probability.
        '''

        model = GroundedTranslation(self.args, datagen=self.data_generator)

        handle = open("../logs/sweeper-%s.log" % self.args.run_string, "w")
        handle.write("{:3} | {:10} | {:10} | {:10} | {:10} | {:10} \n".format("Run",
            "loss", "val_loss", "lr", "reg", "dropin"))
        handle.close()
        for sweep in xrange(self.args.num_sweeps):
            # randomly sample a learning rate and an L2 regularisation
            handle = open("../logs/sweeper-%s.log" % self.args.run_string, "a")
            if self.args.min_lr == ceil(self.args.min_lr):
                # you provided an exponent, we'll search in log-space
                lr = 10**uniform(self.args.min_lr, self.args.max_lr)
            else:
                # you provided a specific number
                lr = 10**uniform(log10(self.args.min_lr),
                                 log10(self.args.max_lr))

            if self.args.min_l2 == ceil(self.args.min_l2):
                # you provided an exponent, we'll search in log-space
                l2 = 10**uniform(self.args.min_l2, self.args.max_l2)
            else:
                # you provide a specific number
                l2 = 10**uniform(log10(self.args.min_l2),
                                 log10(self.args.max_l2))
            drop_in = uniform(self.args.min_dropin, self.args.max_dropin)

            # modify the arguments that will be used to create the graph
            model.args.lr = lr
            model.args.l2reg = l2
            model.args.dropin = drop_in

            logger.info("Setting learning rate to: %.5e", lr)
            logger.info("Setting l2reg to: %.5e", l2)
            logger.info("Setting dropout to: %f", drop_in)

            # initialise and compile a new model
            losses = model.train_model()
            handle.write("{:3d} | {:5.5f} | {:5.5f} | {:5e} | {:5e} | {:5.4f} \n".format(sweep,
                         losses.history['loss'][-1],
                         losses.history['val_loss'][-1], lr, l2, drop_in))
            handle.close() 
Example 73
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 4 votes vote down vote up
def format_eng(self, num):
        """ Formats a number in engineering notation, appending a letter
        representing the power of 1000 of the original number. Some examples:

        >>> format_eng(0)       # for self.places = 0
        '0'

        >>> format_eng(1000000) # for self.places = 1
        '1.0 M'

        >>> format_eng("-1e-6") # for self.places = 2
        u'-1.00 \u03bc'

        @param num: the value to represent
        @type num: either a numeric value or a string that can be converted to
                   a numeric value (as per decimal.Decimal constructor)

        @return: engineering formatted string
        """

        dnum = decimal.Decimal(str(num))

        sign = 1

        if dnum < 0:
            sign = -1
            dnum = -dnum

        if dnum != 0:
            pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
        else:
            pow10 = decimal.Decimal(0)

        pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
        pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))

        prefix = self.ENG_PREFIXES[int(pow10)]

        mant = sign * dnum / (10 ** pow10)

        if self.places is None:
            format_str = "%g %s"
        elif self.places == 0:
            format_str = "%i %s"
        elif self.places > 0:
            format_str = ("%%.%if %%s" % self.places)

        formatted = format_str % (mant, prefix)

        formatted = formatted.strip()
        if (self.unit != "") and (prefix == self.ENG_PREFIXES[0]):
            formatted = formatted + " "

        return formatted 
Example 74
Project: LaserTOF   Author: kyleuckert   File: ticker.py    MIT License 4 votes vote down vote up
def __call__(self):
        'Return the locations of the ticks'
        majorlocs = self.axis.get_majorticklocs()
        try:
            majorstep = majorlocs[1] - majorlocs[0]
        except IndexError:
            # Need at least two major ticks to find minor tick locations
            # TODO: Figure out a way to still be able to display minor
            # ticks without two major ticks visible. For now, just display
            # no ticks at all.
            majorstep = 0

        if self.ndivs is None:
            if majorstep == 0:
                # TODO: Need a better way to figure out ndivs
                ndivs = 1
            else:
                x = int(np.round(10 ** (np.log10(majorstep) % 1)))
                if x in [1, 5, 10]:
                    ndivs = 5
                else:
                    ndivs = 4
        else:
            ndivs = self.ndivs

        minorstep = majorstep / ndivs

        vmin, vmax = self.axis.get_view_interval()
        if vmin > vmax:
            vmin, vmax = vmax, vmin

        if len(majorlocs) > 0:
            t0 = majorlocs[0]
            tmin = ((vmin - t0) // minorstep + 1) * minorstep
            tmax = ((vmax - t0) // minorstep + 1) * minorstep
            locs = np.arange(tmin, tmax, minorstep) + t0
            cond = np.abs((locs - t0) % majorstep) > minorstep / 10.0
            locs = locs.compress(cond)
        else:
            locs = []

        return self.raise_if_exceeds(np.array(locs)) 
Example 75
Project: LaserTOF   Author: kyleuckert   File: filter_design.py    MIT License 4 votes vote down vote up
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
    """
    Band Stop Objective Function for order minimization.

    Returns the non-integer order for an analog band stop filter.

    Parameters
    ----------
    wp : scalar
        Edge of passband `passb`.
    ind : int, {0, 1}
        Index specifying which `passb` edge to vary (0 or 1).
    passb : ndarray
        Two element sequence of fixed passband edges.
    stopb : ndarray
        Two element sequence of fixed stopband edges.
    gstop : float
        Amount of attenuation in stopband in dB.
    gpass : float
        Amount of ripple in the passband in dB.
    type : {'butter', 'cheby', 'ellip'}
        Type of filter.

    Returns
    -------
    n : scalar
        Filter order (possibly non-integer).

    """
    passbC = passb.copy()
    passbC[ind] = wp
    nat = (stopb * (passbC[0] - passbC[1]) /
           (stopb ** 2 - passbC[0] * passbC[1]))
    nat = min(abs(nat))

    if type == 'butter':
        GSTOP = 10 ** (0.1 * abs(gstop))
        GPASS = 10 ** (0.1 * abs(gpass))
        n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
    elif type == 'cheby':
        GSTOP = 10 ** (0.1 * abs(gstop))
        GPASS = 10 ** (0.1 * abs(gpass))
        n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
    elif type == 'ellip':
        GSTOP = 10 ** (0.1 * gstop)
        GPASS = 10 ** (0.1 * gpass)
        arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
        arg0 = 1.0 / nat
        d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
        d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
        n = (d0[0] * d1[1] / (d0[1] * d1[0]))
    else:
        raise ValueError("Incorrect type: %s" % type)
    return n 
Example 76
Project: DOSMA   Author: ad12   File: dicom_io.py    GNU General Public License v3.0 4 votes vote down vote up
def save(self, im, filepath):
        """
        Save a medical volume in dicom format
        :param im: a Medical Volume
        :param filepath: a path to a directory to store dicom files

        :raises ValueError if im (MedicalVolume) does not have initialized headers
        :raises ValueError if im was flipped across any axis. Flipping changes scanner origin, which is currently not handled
        """
        # Get orientation indicated by headers
        headers = im.headers
        if headers is None:
            raise ValueError('MedicalVolume headers must be initialized to save as a dicom')

        affine = LPSplus_to_RASplus(headers)
        orientation = stdo.orientation_nib_to_standard(nib.aff2axcodes(affine))

        # Currently do not support mismatch in scanner_origin
        if tuple(affine[:3, 3]) != im.scanner_origin:
            raise ValueError(
                'Scanner origin mismatch. Currently we do not handle mismatch in scanner origin (i.e. cannot flip across axis)')

        # reformat medical volume to expected orientation specified by dicom headers
        # store original orientation so we can undo the dicom-specific reformatting
        original_orientation = im.orientation

        im.reformat(orientation)
        volume = im.volume
        assert volume.shape[2] == len(headers), "Dimension mismatch - %d slices but %d headers" % (
            volume.shape[-1], len(headers))

        # check if filepath exists
        filepath = io_utils.check_dir(filepath)

        num_slices = len(headers)
        filename_format = 'I%0' + str(max(4, ceil(log10(num_slices)))) + 'd.dcm'

        for s in range(num_slices):
            s_filepath = os.path.join(filepath, filename_format % (s + 1))
            self.__write_dicom_file__(volume[..., s], headers[s], s_filepath)

        # reformat image to original orientation (before saving)
        # we do this, because saving should not affect the existing state of any variable
        im.reformat(original_orientation) 
Example 77
Project: genomeview   Author: nspies   File: axis.py    MIT License 4 votes vote down vote up
def get_ticks(start, end, target_n_labels=10):
    """
    Tries to put an appropriate number of ticks at nice round coordinates between 
    the genomic positions `start` and `end`. Tries but doesn't guarantee to create 
    `target_n_labels` number of ticks / labels.

    Returns:
        a list of tuples (coordinate, label), where "label" is a nicely formatted 
        string describing the coordinate
    """
    ticks = []
    start = int(start)
    end = int(end)
    width = end - start

    res = (10 ** round(math.log10(end - start))) / (10**math.floor(math.log10(target_n_labels)))

    if width / res > target_n_labels*2:
        res *= 5
    elif width / res > target_n_labels*1.5:
         res *= 2.5
    elif width / res < target_n_labels*0.15:
        res /= 10.0
    elif width / res < target_n_labels*0.25:
        res /= 8.0
    elif width / res < target_n_labels*0.5:
        res /= 4.0
    elif width / res < target_n_labels*0.8:
        res /= 2.0

    roundStart = start - (start%res)
    res = max(1, int(res))
    
    for i in range(int(roundStart), end, res):
        res_digits = math.log10(res)
        if res_digits >= 6:
            label = "{}mb".format(i / 1e6)
        elif res_digits >= 3:
            label = "{:,}kb".format(i / 1e3)
        else:
            label = "{:,}".format(i)

        ticks.append((i, label))

    return ticks 
Example 78
Project: kucher   Author: Zubax   File: editor_delegate.py    GNU General Public License v3.0 4 votes vote down vote up
def createEditor(self, parent: QWidget, option: QStyleOptionViewItem, index: QModelIndex) -> QWidget:
        """
        The set of editors that we have defined here are only good for small-dimensioned registers with a few values.
        They are not good for unstructured data and large arrays. For that, shall the need arise, we'll need to define
        dedicated complex widgets. Luckily, it is very easy to do, just not really necessary at the moment.
        The framework doesn't care what kind of widget we're displaying when editing, it's just a basic pop-up that
        appears on top of the view.
        """
        register = self._get_register_from_index(index)
        _logger.info('Constructing editor for %r', register)

        if self._can_use_bool_switch(register):
            editor = QComboBox(parent)
            editor.setEditable(False)
            editor.addItem(get_icon('cancel'), 'False (0)')
            editor.addItem(get_icon('ok'), 'True (1)')
        elif self._can_use_spinbox(register):
            minimum, maximum = register.min_value[0], register.max_value[0]

            try:
                dtype = Register.get_numpy_type(register.type_id)
                float_decimals = int(abs(math.log10(numpy.finfo(dtype).resolution)) + 0.5) + 1
            except ValueError:
                float_decimals = None

            if float_decimals is not None:
                step = (maximum - minimum) / _MIN_PREFERRED_NUMBER_OF_STEPS_IN_FULL_RANGE
                try:
                    step = 10 ** round(math.log10(step))
                except ValueError:
                    step = 1        # Math domain error corner case

                step = min(1.0, step)                       # Step can't be greater than one for UX reasons
                _logger.info('Constructing QDoubleSpinBox with single step set to %r', step)
                editor = QDoubleSpinBox(parent)
                editor.setSingleStep(step)
                editor.setDecimals(float_decimals)
            else:
                editor = QSpinBox(parent)

            editor.setMinimum(minimum)
            editor.setMaximum(maximum)
        else:
            editor = QPlainTextEdit(parent)
            editor.setFont(get_monospace_font())
            editor.setMinimumWidth(QFontMetrics(editor.font()).width('9' * (MAX_LINE_LENGTH + 5)))

        editor.setFont(Model.get_font())

        self._message_display_callback('Press Esc to cancel editing')

        return editor 
Example 79
Project: mathematics_dataset   Author: deepmind   File: calculus.py    Apache License 2.0 4 votes vote down vote up
def _differentiate_polynomial(value, sample_args, context, num_variables):
  """Generates a question for differentiating a polynomial."""
  is_question = context is None
  if context is None:
    context = composition.Context()

  if value is not None:
    num_variables = value.coefficients.ndim

  entropy, sample_args = sample_args.peel()
  max_derivative_order = 3
  derivative_order = random.randint(1, max_derivative_order)
  entropy = max(0, entropy - math.log10(max_derivative_order))

  derivative_axis = random.randint(0, num_variables - 1)
  if value is None:
    coefficients = _generate_polynomial(
        num_variables, entropy, derivative_order, derivative_axis)
  else:
    coefficients = _sample_integrand(
        value.coefficients, derivative_order, derivative_axis, entropy)

  (entity,) = context.sample(
      sample_args, [composition.Polynomial(coefficients)])

  value = coefficients
  for _ in range(derivative_order):
    value = polynomials.differentiate(value, axis=derivative_axis)
  nth = display.StringOrdinal(derivative_order)

  if entity.has_expression():
    polynomial = entity.expression
    variables = entity.polynomial_variables
  else:
    variables = [sympy.Symbol(context.pop()) for _ in range(num_variables)]
    polynomial = entity.handle.apply(*variables)
  variable = variables[derivative_axis]

  if is_question:
    template = _template(context.module_count, derivative_order, len(variables))
    answer = polynomials.coefficients_to_polynomial(value, variables).sympy()
    return example.Problem(
        question=example.question(
            context, template, eq=polynomial, var=variable, nth=nth),
        answer=answer)
  else:
    fn_symbol = context.pop()
    variables_string = ', '.join(str(variable) for variable in variables)
    assert len(variables) == 1  # since below we don't specify var we diff wrt
    return composition.Entity(
        context=context,
        value=composition.Polynomial(value),
        description='Let {fn}({variables}) be the {nth} derivative of {eq}.',
        handle=composition.FunctionHandle(fn_symbol),
        fn=fn_symbol, variables=variables_string, nth=nth, eq=polynomial) 
Example 80
Project: mathematics_dataset   Author: deepmind   File: polynomials.py    Apache License 2.0 4 votes vote down vote up
def add(value, sample_args, context=None):
  """E.g., "Let f(x)=2x+1, g(x)=3x+2. What is 5*f(x) - 7*g(x)?"."""
  is_question = context is None
  if context is None:
    context = composition.Context()

  entropy, sample_args = sample_args.peel()

  if value is None:
    max_degree = 3
    degree = random.randint(1, max_degree)
    entropy -= math.log10(max_degree)
    entropy_value = entropy / 2
    entropy -= entropy_value
    value = polynomials.sample_coefficients(
        degree, entropy=entropy_value, min_non_zero=random.randint(1, 3))
    value = composition.Polynomial(value)

  c1, c2, coeffs1, coeffs2 = polynomials.coefficients_linear_split(
      value.coefficients, entropy)
  coeffs1 = polynomials.trim(coeffs1)
  coeffs2 = polynomials.trim(coeffs2)

  c1, c2, fn1, fn2 = context.sample(
      sample_args,
      [c1, c2, composition.Polynomial(coeffs1), composition.Polynomial(coeffs2)]
  )

  var = sympy.var(context.pop())

  expression = (
      c1.handle * fn1.handle.apply(var) + c2.handle * fn2.handle.apply(var))

  if is_question:
    answer = polynomials.coefficients_to_polynomial(value.coefficients, var)
    answer = answer.sympy()
    template = random.choice(_TEMPLATES)
    return example.Problem(
        question=example.question(context, template, composed=expression),
        answer=answer)
  else:
    intermediate_symbol = context.pop()
    intermediate = sympy.Function(intermediate_symbol)(var)
    return composition.Entity(
        context=context,
        value=value,
        description='Let {intermediate} = {composed}.',
        handle=composition.FunctionHandle(intermediate_symbol),
        intermediate=intermediate,
        composed=expression)