Python math.log1p() Examples

The following are code examples for showing how to use math.log1p(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the ones you don't like. You can also save this page to your account.

Example 1
Project: q2-coordinates   Author: nbokulich   File: _utilities.py    (license) View Source Project 6 votes vote down vote up
def plot_basemap(latitude, longitude, image, color_palette=None):
    # define basemap, color palette
    cmap, tiler = get_map_params(image, color_palette)

    # Find min/max coordinates to set extent
    lat_0, lat_1, lon_0, lon_1 = get_max_extent(latitude, longitude)

    # Automatically focus resolution
    max_span = max((abs(lat_1) - abs(lat_0)), (abs(lon_1) - abs(lon_0)))
    res = int(-1.4 * math.log1p(max_span) + 13)

    # initiate tiler projection
    ax = plt.axes(projection=tiler.crs)
    # Define extents of any plotted data
    ax.set_extent((lon_0, lon_1, lat_0, lat_1), ccrs.Geodetic())
    # add terrain background
    ax.add_image(tiler, res)

    return ax, cmap 
Example 2
Project: sparks   Author: ImpactHorizon   File: utils.py    (license) View Source Project 6 votes vote down vote up
def detect_peaks(hist, count=2):
    hist_copy = hist    
    peaks = len(argrelextrema(hist_copy, np.greater, mode="wrap")[0])
    sigma = log1p(peaks)
    print(peaks, sigma)
    while (peaks > count):
        new_hist = gaussian_filter(hist_copy, sigma=sigma)        
        peaks = len(argrelextrema(new_hist, np.greater, mode="wrap")[0])
        if peaks < count:
            peaks = count + 1
            sigma = sigma * 0.5
            continue
        hist_copy = new_hist
        sigma = log1p(peaks)
    print(peaks, sigma)
    return argrelextrema(hist_copy, np.greater, mode="wrap")[0] 
Example 3
Project: qgis-shapetools-plugin   Author: NationalSecurityAgency   File: geomath.py    (GNU General Public License v2.0) View Source Project 5 votes vote down vote up
def log1p(x):
    """log(1 + x) accurate for small x (missing from python 2.5.2)"""

    if sys.version_info > (2, 6):
      return math.log1p(x)

    y = 1 + x
    z = y - 1
    # Here's the explanation for this magic: y = 1 + z, exactly, and z
    # approx x, thus log(y)/z (which is nearly constant near z = 0) returns
    # a good approximation to the true log(1 + x)/x.  The multiplication x *
    # (log(y)/z) introduces little additional error.
    return x if z == 0 else x * math.log(y) / z 
Example 4
Project: qgis-shapetools-plugin   Author: NationalSecurityAgency   File: geomath.py    (GNU General Public License v2.0) View Source Project 5 votes vote down vote up
def atanh(x):
    """atanh(x) (missing from python 2.5.2)"""

    if sys.version_info > (2, 6):
      return math.atanh(x)

    y = abs(x)                  # Enforce odd parity
    y = Math.log1p(2 * y/(1 - y))/2
    return -y if x < 0 else y 
Example 5
Project: lopocs   Author: Oslandia   File: utils.py    (GNU Lesser General Public License v2.1) View Source Project 5 votes vote down vote up
def compute_scale_for_cesium(coordmin, coordmax):
    '''
    Cesium quantized positions need to be in uint16
    This function computes the best scale to apply to coordinates
    to fit the range [0, 65535]
    '''
    max_int = np.iinfo(np.uint16).max
    delta = abs(coordmax - coordmin)
    scale = 10 ** -(math.floor(math.log1p(max_int / delta) / math.log1p(10)))
    return scale 
Example 6
Project: p2pool-bch   Author: amarian12   File: math.py    (license) View Source Project 5 votes vote down vote up
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1 
Example 7
Project: p2pool-unitus   Author: amarian12   File: math.py    (license) View Source Project 5 votes vote down vote up
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1 
Example 8
Project: zippy   Author: securesystemslab   File: test_math.py    (license) View Source Project 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 9
Project: p2pool-dgb-sha256   Author: ilsawa   File: math.py    (license) View Source Project 5 votes vote down vote up
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1 
Example 10
Project: oil   Author: oilshell   File: test_math.py    (license) View Source Project 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 11
Project: python2-tracer   Author: extremecoders-re   File: test_math.py    (license) View Source Project 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 12
Project: p2pool-ltc   Author: ilsawa   File: math.py    (license) View Source Project 5 votes vote down vote up
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1 
Example 13
Project: p2pool-bsty   Author: amarian12   File: math.py    (license) View Source Project 5 votes vote down vote up
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1 
Example 14
Project: web_ctp   Author: molebot   File: test_math.py    (license) View Source Project 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 15
Project: mandos   Author: carolinetomo   File: branch_support.py    (license) View Source Project 5 votes vote down vote up
def logsum(l):
    x = l[0]
    for i in l[1:]:
        try:
            x += math.log1p(math.exp(i-x))
        except:
            x += 0
    return x 
Example 16
Project: p2pool-cann   Author: ilsawa   File: math.py    (license) View Source Project 5 votes vote down vote up
def geometric(p):
    if p <= 0 or p > 1:
        raise ValueError('p must be in the interval (0.0, 1.0]')
    if p == 1:
        return 1
    return int(math.log1p(-random.random()) / math.log1p(-p)) + 1 
Example 17
Project: tensorflow-rl   Author: steveKapturowski   File: cts.py    (license) View Source Project 5 votes vote down vote up
def log_add(log_x, log_y):
    """Given log x and log y, returns log(x + y)."""
    # Swap variables so log_y is larger.
    if log_x > log_y:
        log_x, log_y = log_y, log_x

    # Use the log(1 + e^p) trick to compute this efficiently
    # If the difference is large enough, this is effectively log y.
    delta = log_y - log_x
    return math.log1p(math.exp(delta)) + log_x if delta <= 50.0 else log_y 
Example 18
Project: tichu-tournament   Author: aragos   File: calculator.py    (license) View Source Project 5 votes vote down vote up
def _log_rps(self, rps):
        if rps > 0:
            return math.log1p(rps)
        else: 
            return -math.log1p(-rps) 
Example 19
Project: hyper-engine   Author: maxim5   File: sugar.py    (license) View Source Project 5 votes vote down vote up
def log1p(node): return merge([node], math.log1p) 
Example 20
Project: pefile.pypy   Author: cloudtracer   File: test_math.py    (license) View Source Project 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 21
Project: ouroboros   Author: pybee   File: cmath.py    (license) View Source Project 5 votes vote down vote up
def atanh(x):
    _atanh_special = [
        [complex(-0.0, -pi/2), complex(-0.0, -pi/2), complex(-0.0, -pi/2),
            complex(-0.0, pi/2), complex(-0.0, pi/2), complex(-0.0, pi/2),
            complex(-0.0, float("nan"))],
        [complex(-0.0, -pi/2), None, None, None, None, complex(-0.0, pi/2),
            nan+nanj],
        [complex(-0.0, -pi/2), None, None, None, None, complex(-0.0, pi/2),
            complex(-0.0, float("nan"))],
        [-1j*pi/2, None, None, None, None, 1j*pi/2, nanj],
        [-1j*pi/2, None, None, None, None, 1j*pi/2, nan+nanj],
        [-1j*pi/2, -1j*pi/2, -1j*pi/2, 1j*pi/2, 1j*pi/2, 1j*pi/2,  nanj],
        [-1j*pi/2, nan+nanj, nan+nanj, nan+nanj, nan+nanj, 1j*pi/2, nan+nanj]
    ]

    z = _make_complex(x)

    if not isfinite(z):
        return _atanh_special[_special_type(z.real)][_special_type(z.imag)]

    if z.real < 0:
        return -atanh(-z)

    ay = abs(z.imag)
    if z.real > _SQRT_LARGE_DOUBLE or ay > _SQRT_LARGE_DOUBLE:
        hypot = math.hypot(z.real/2, z.imag/2)
        return complex(z.real/4/hypot/hypot, -math.copysign(pi/2, -z.imag))
    if z.real == 1 and ay < _SQRT_DBL_MIN:
        if ay == 0:
            raise ValueError
        return complex(-math.log(math.sqrt(ay)/math.sqrt(math.hypot(ay, 2))),
                       math.copysign(math.atan2(2, -ay)/2, z.imag))
    return complex(math.log1p(4*z.real/((1-z.real)*(1-z.real) + ay*ay))/4,
                   -math.atan2(-2*z.imag, (1-z.real)*(1+z.real) - ay*ay)/2) 
Example 22
Project: ouroboros   Author: pybee   File: test_math.py    (license) View Source Project 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 23
Project: ndk-python   Author: gittor   File: test_math.py    (license) View Source Project 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        self.ftest('log1p(1/e -1)', math.log1p(1/math.e-1), -1)
        self.ftest('log1p(0)', math.log1p(0), 0)
        self.ftest('log1p(e-1)', math.log1p(math.e-1), 1)
        self.ftest('log1p(1)', math.log1p(1), math.log(2))
        self.assertEqual(math.log1p(INF), INF)
        self.assertRaises(ValueError, math.log1p, NINF)
        self.assertTrue(math.isnan(math.log1p(NAN)))
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), 62.383246250395075)
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 24
Project: kbe_server   Author: xiaohaoppy   File: test_math.py    (license) View Source Project 5 votes vote down vote up
def testLog1p(self):
        self.assertRaises(TypeError, math.log1p)
        n= 2**90
        self.assertAlmostEqual(math.log1p(n), math.log1p(float(n))) 
Example 25
Project: paragraph2vec   Author: thunlp   File: tfidfmodel.py    (license) View Source Project 4 votes vote down vote up
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass 
Example 26
Project: topical_word_embeddings   Author: thunlp   File: tfidfmodel.py    (license) View Source Project 4 votes vote down vote up
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass 
Example 27
Project: topical_word_embeddings   Author: thunlp   File: tfidfmodel.py    (license) View Source Project 4 votes vote down vote up
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass 
Example 28
Project: topical_word_embeddings   Author: thunlp   File: tfidfmodel.py    (license) View Source Project 4 votes vote down vote up
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass 
Example 29
Project: nonce2vec   Author: minimalparts   File: tfidfmodel.py    (license) View Source Project 4 votes vote down vote up
def __init__(self, corpus=None, id2word=None, dictionary=None,
                 wlocal=utils.identity, wglobal=df2idf, normalize=True):
        """
        Compute tf-idf by multiplying a local component (term frequency) with a
        global component (inverse document frequency), and normalizing
        the resulting documents to unit length. Formula for unnormalized weight
        of term `i` in document `j` in a corpus of D documents::

          weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i})

        or, more generally::

          weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D)

        so you can plug in your own custom `wlocal` and `wglobal` functions.

        Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...)
        and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the
        formula above.

        `normalize` dictates how the final transformed vectors will be normalized.
        `normalize=True` means set to unit length (default); `False` means don't
        normalize. You can also set `normalize` to your own function that accepts
        and returns a sparse vector.

        If `dictionary` is specified, it must be a `corpora.Dictionary` object
        and it will be used to directly construct the inverse document frequency
        mapping (then `corpus`, if specified, is ignored).
        """
        self.normalize = normalize
        self.id2word = id2word
        self.wlocal, self.wglobal = wlocal, wglobal
        self.num_docs, self.num_nnz, self.idfs = None, None, None
        if dictionary is not None:
            # user supplied a Dictionary object, which already contains all the
            # statistics we need to construct the IDF mapping. we can skip the
            # step that goes through the corpus (= an optimization).
            if corpus is not None:
                logger.warning("constructor received both corpus and explicit "
                               "inverse document frequencies; ignoring the corpus")
            self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
            self.dfs = dictionary.dfs.copy()
            self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
        elif corpus is not None:
            self.initialize(corpus)
        else:
            # NOTE: everything is left uninitialized; presumably the model will
            # be initialized in some other way
            pass 
Example 30
Project: ESPSS   Author: rcalinjageman   File: EPAIRED.py    (license) View Source Project 4 votes vote down vote up
def nct(tval, delta, df):
  #This is the non-central t-distruction function
  #This is a direct translation from visual-basic to Python of the nct implementation by Geoff Cumming for ESCI

  #Noncentral t function, after Excel worksheet calculation by Geoff Robinson
  #The three parameters are:
  #   tval -- our t value
  #   delta -- noncentrality parameter
  #   df -- degrees of freedom

  #Dim dfmo As Integer     'for df-1
  #Dim tosdf As Double     'for tval over square root of df
  #Dim sep As Double       'for separation of points
  #Dim sepa As Double      'for temp calc of points
  #Dim consta As Double    'for constant
  #Dim ctr As Integer      'loop counter

  dfmo = df - 1.0
  tosdf = tval / math.sqrt(df)
  sep = (math.sqrt(df) + 7) / 100
  consta = math.exp( (2 - df) * 0.5 * math.log1p(1) - math.lgamma(df/2) ) * sep /3

  #now do first term in cross product summation, with df=0 special
  if dfmo > 0:
    nctvalue = stdnormdist(0 * tosdf - delta) * 0 ** dfmo * math.exp(-0.5* 0 * 0)
  else:  
    nctvalue = stdnormdist(0 * tosdf - delta) * math.exp(-0.5 * 0 * 0)

  #now add in second term, with multiplier 4
  nctvalue = nctvalue + 4 * stdnormdist(sep*tosdf - delta) * sep ** dfmo * math.exp(-0.5*sep*sep)    

  #now loop 49 times to add 98 terms
  for ctr in range(1,49):
      sepa = 2 * ctr * sep
      nctvalue = nctvalue + 2 * stdnormdist(sepa * tosdf - delta) * sepa ** dfmo * math.exp(-0.5 * sepa * sepa)
      sepa = sepa + sep
      nctvalue = nctvalue + 4 * stdnormdist(sepa * tosdf - delta) * sepa ** dfmo * math.exp(-0.5 * sepa * sepa)

  #add in last term
  sepa = sepa + sep
  nctvalue = nctvalue + stdnormdist(sepa * tosdf - delta) * sepa ** dfmo * math.exp(-0.5 * sepa * sepa)

  #multiply by the constant and we've finished
  nctvalue = nctvalue * consta
  
  return nctvalue 
Example 31
Project: SparseArray   Author: INGEOTEC   File: test_sparse_array.py    (license) View Source Project 4 votes vote down vote up
def test_one():
    from math import sin, cos, tan, asin, acos, atan
    from math import sinh, cosh, tanh, asinh, acosh, atanh
    from math import exp, expm1, log, log10, log1p, sqrt, lgamma
    from math import fabs, ceil, floor, trunc, erf, erfc
    try:
        from math import log2
    except ImportError:
        def log2(x):
            return log(x) / log(2)

    def wrapper(f, v):
        try:
            return f(v)
        except ValueError:
            if f == sqrt:
                return float('nan')
            if v >= 0:
                return float('inf')
            else:
                return -float('inf')

    def compare(a, b):
        if isfinite(a) and isfinite(b):
            return assert_almost_equals(a, b)
        return str(a) == str(b)

    for f in [sin, cos, tan, asin, acos, atan,
              sinh, cosh, tanh, asinh, acosh, atanh,
              exp, expm1, log, log2, log10, log1p, sqrt,
              lgamma,
              fabs, ceil, floor, trunc,
              erf, erfc]:
        for p in [0.5, 1]:
            a = random_lst(p=p)
            b = SparseArray.fromlist(a)
            c = getattr(b, f.__name__)()
            res = [wrapper(f, x) for x in a]
            index = [k for k, v in enumerate(res) if v != 0]
            res = [x for x in res if x != 0]
            print(f, p, c.non_zero, len(res))
            assert c.non_zero == len(res)
            [assert_almost_equals(v, w) for v, w in zip(index,
                                                        c.index)]
            [compare(v, w) for v, w in zip(res,
                                           c.data)]