Python scipy.special.betaln() Examples

The following are 29 code examples of scipy.special.betaln(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.special , or try the search function .
Example #1
Source File: combined_test.py    From WASP with Apache License 2.0 6 votes vote down vote up
def AS_betabinom_loglike(logps, sigma, AS1, AS2, hetp, error):
    """Given parameter, returns log likelihood of allele-specific
    part of test. Note that some parts of equation have been
    canceled out"""
    a = math.exp(logps[0] + math.log(1/sigma**2 - 1))
    b = math.exp(logps[1] + math.log(1/sigma**2 - 1))

    part1 = 0
    part1 += betaln(AS1 + a, AS2 + b)
    part1 -= betaln(a, b)

    if hetp==1:
        return part1

    e1 = math.log(error) * AS1 + math.log(1 - error) * AS2
    e2 = math.log(error) * AS2 + math.log(1 - error) * AS1
    if hetp == 0:
        return addlogs(e1, e2)

    return addlogs(math.log(hetp)+part1, math.log(1-hetp) + addlogs(e1, e2)) 
Example #2
Source File: fit_as_coefficients.py    From WASP with Apache License 2.0 6 votes vote down vote up
def AS_betabinom_loglike(logps, sigma, AS1, AS2, hetp, error):
    if sigma >= 1.0 or sigma <= 0.0:
        return -99999999999.0

    a = math.exp(logps[0] + math.log(1/sigma**2 - 1))
    b = math.exp(logps[1] + math.log(1/sigma**2 - 1))

    part1 = 0
    part1 += betaln(AS1 + a, AS2 + b)
    part1 -= betaln(a, b)

    if hetp == 1:
        return part1

    e1 = math.log(error) * AS1 + math.log(1 - error) * AS2
    e2 = math.log(error) * AS2 + math.log(1 - error) * AS1
    if hetp == 0:
        return addlogs(e1, e2)

    return addlogs(math.log(hetp)+part1, math.log(1-hetp) + addlogs(e1, e2)) 
Example #3
Source File: bernoulli.py    From cgpm with Apache License 2.0 5 votes vote down vote up
def calc_logpdf_marginal(N, x_sum, alpha, beta):
        return betaln(x_sum + alpha, N - x_sum + beta) - betaln(alpha, beta) 
Example #4
Source File: _discrete_distns.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _logpmf(self, k, M, n, N):
        tot, good = M, n
        bad = tot - good
        return betaln(good+1, 1) + betaln(bad+1,1) + betaln(tot-N+1, N+1)\
            - betaln(k+1, good-k+1) - betaln(N-k+1,bad-N+k+1)\
            - betaln(tot+1, 1) 
Example #5
Source File: _continuous_distns.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _logpdf(self, x, dfn, dfd):
        n = 1.0 * dfn
        m = 1.0 * dfd
        lPx = m/2 * np.log(m) + n/2 * np.log(n) + (n/2 - 1) * np.log(x)
        lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)
        return lPx 
Example #6
Source File: _continuous_distns.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _logpdf(self, x, a, b):
        return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b) 
Example #7
Source File: _continuous_distns.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _logpdf(self, x, a, b):
        lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
        lPx -= sc.betaln(a, b)
        return lPx 
Example #8
Source File: stats.py    From trials with MIT License 5 votes vote down vote up
def dominance(variations, control=None, sample_size=SAMPLE_SIZE):
    """Calculate P(A > B).

    Uses a modified Evan Miller closed formula if prior parameters are integers
    http://www.evanmiller.org/bayesian-ab-testing.html

    Uses scipy's MCMC otherwise

    TODO: The modified formula for informative prior has to be proved correct
    """
    values = OrderedDict()
    a, others = _split(variations, control)

    def is_integer(x):
        try:
            return x.is_integer()
        except:
            return int(x) == x

    for label, b in others.items():

        # If prior parameters are integers, use modified Evan Miller formula:
        if is_integer(a.prior_alpha) and is_integer(a.prior_beta) \
           and is_integer(b.prior_alpha) and is_integer(b.prior_beta):

            total = 0
            for i in range(b.alpha-b.prior_alpha):
                total += np.exp(spc.betaln(a.alpha+i, b.beta + a.beta) -
                                np.log(b.beta+i) - spc.betaln(b.prior_alpha+i,
                                b.beta) - spc.betaln(a.alpha, a.beta))
            values[label] = total

        # Use MCMC otherwise
        else:
            a_samples = a.posterior.rvs(sample_size)
            b_samples = b.posterior.rvs(sample_size)
            values[label] = np.mean(b_samples > a_samples)

    return values 
Example #9
Source File: combined_test.py    From WASP with Apache License 2.0 5 votes vote down vote up
def betaln_asym(a, b):
    if b > a:
        a, b = b, a

    if a < 1e6:
        return betaln(a, b)

    l=gammaln(b)

    l -= b*math.log(a)
    l += b*(1-b)/(2*a)
    l += b*(1-b)*(1-2*b)/(12*a*a)
    l += -((b*(1-b))**2)/(12*a**3)

    return l 
Example #10
Source File: fit_bnb_coefficients.py    From WASP with Apache License 2.0 5 votes vote down vote up
def BNB_loglike(k, mean, n, sigma):
    #n=min(n,10000)
    #Put variables in beta-NB form (n,a,b)
    mean = max(mean, 0.00001)
    p = np.float64(n)/(n+mean)
    logps = [math.log(n) - math.log(n + mean),
             math.log(mean) - math.log(n + mean)]

    if sigma < 0.00001:
        loglike = -betaln(n, k+1)-math.log(n+k)+n*logps[0]+k*logps[1]
        return loglike

    sigma = (1/sigma)**2

    a = p * sigma + 1
    b = (1-p) * sigma

    loglike = 0

    #Rising Pochhammer = gamma(k+n)/gamma(n)
    #for j in range(k):
    #    loglike += math.log(j+n)
    if k>0:
        loglike = -lbeta_asymp(n, k) - math.log(k)
        #loglike=scipy.special.gammaln(k+n)-scipy.special.gammaln(n)
    else:
        loglike=0

    #Add log(beta(a+n,b+k))
    loglike += lbeta_asymp(a+n, b+k)

    #Subtract log(beta(a,b))
    loglike -= lbeta_asymp(a, b)

    return loglike 
Example #11
Source File: fit_bnb_coefficients.py    From WASP with Apache License 2.0 5 votes vote down vote up
def lbeta_asymp(a, b):
    if b > a:
        a, b = b, a

    if a<1e6:
        return betaln(a, b)

    l = gammaln(b)

    l -= b*math.log(a)
    l += b*(1-b)/(2*a)
    l += b*(1-b)*(1-2*b)/(12*a*a)
    l += -((b*(1-b))**2)/(12*a**3)

    return l 
Example #12
Source File: beta_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testBetaBetaKL(self):
    with self.test_session() as sess:
      for shape in [(10,), (4,5)]:
        a1 = 6.0*np.random.random(size=shape) + 1e-4
        b1 = 6.0*np.random.random(size=shape) + 1e-4 
        a2 = 6.0*np.random.random(size=shape) + 1e-4
        b2 = 6.0*np.random.random(size=shape) + 1e-4 
        # Take inverse softplus of values to test BetaWithSoftplusAB
        a1_sp = np.log(np.exp(a1) - 1.0)
        b1_sp = np.log(np.exp(b1) - 1.0)
        a2_sp = np.log(np.exp(a2) - 1.0)
        b2_sp = np.log(np.exp(b2) - 1.0)

        d1 = tf.contrib.distributions.Beta(a=a1, b=b1)
        d2 = tf.contrib.distributions.Beta(a=a2, b=b2)
        d1_sp = tf.contrib.distributions.BetaWithSoftplusAB(a=a1_sp, b=b1_sp)
        d2_sp = tf.contrib.distributions.BetaWithSoftplusAB(a=a2_sp, b=b2_sp)

        kl_expected = (special.betaln(a2, b2) - special.betaln(a1, b1)
                     + (a1 - a2)*special.digamma(a1)
                     + (b1 - b2)*special.digamma(b1)
                     + (a2 - a1 + b2 - b1)*special.digamma(a1 + b1))

        for dist1 in [d1, d1_sp]:
          for dist2 in [d2, d2_sp]:
            kl = tf.contrib.distributions.kl(dist1, dist2)
            kl_val = sess.run(kl)
            self.assertEqual(kl.get_shape(), shape)
            self.assertAllClose(kl_val, kl_expected)
        
        # Make sure KL(d1||d1) is 0
        kl_same = sess.run(tf.contrib.distributions.kl(d1, d1))
        self.assertAllClose(kl_same, np.zeros_like(kl_expected)) 
Example #13
Source File: test_mpmath.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_beta():
    np.random.seed(1234)

    b = np.r_[np.logspace(-200, 200, 4),
              np.logspace(-10, 10, 4),
              np.logspace(-1, 1, 4),
              np.arange(-10, 11, 1),
              np.arange(-10, 11, 1) + 0.5,
              -1, -2.3, -3, -100.3, -10003.4]
    a = b

    ab = np.array(np.broadcast_arrays(a[:,None], b[None,:])).reshape(2, -1).T

    old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
    try:
        mpmath.mp.dps = 400

        assert_func_equal(sc.beta,
                          lambda a, b: float(mpmath.beta(a, b)),
                          ab,
                          vectorized=False,
                          rtol=1e-10,
                          ignore_inf_sign=True)

        assert_func_equal(
            sc.betaln,
            lambda a, b: float(mpmath.log(abs(mpmath.beta(a, b)))),
            ab,
            vectorized=False,
            rtol=1e-10)
    finally:
        mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec


# ------------------------------------------------------------------------------
# loggamma
# ------------------------------------------------------------------------------ 
Example #14
Source File: test_basic.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_betaln(self):
        assert_equal(cephes.betaln(1,1),0.0)
        assert_allclose(cephes.betaln(-100.3, 1e-200), cephes.gammaln(1e-200))
        assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,
                        rtol=1e-14, atol=0) 
Example #15
Source File: _discrete_distns.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _logpmf(self, k, M, n, N):
        tot, good = M, n
        bad = tot - good
        return betaln(good+1, 1) + betaln(bad+1,1) + betaln(tot-N+1, N+1)\
            - betaln(k+1, good-k+1) - betaln(N-k+1,bad-N+k+1)\
            - betaln(tot+1, 1) 
Example #16
Source File: _continuous_distns.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _logpdf(self, x, dfn, dfd):
        n = 1.0 * dfn
        m = 1.0 * dfd
        lPx = m/2 * np.log(m) + n/2 * np.log(n) + (n/2 - 1) * np.log(x)
        lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)
        return lPx 
Example #17
Source File: _continuous_distns.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _logpdf(self, x, a, b):
        return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b) 
Example #18
Source File: _continuous_distns.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _logpdf(self, x, a, b):
        lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
        lPx -= sc.betaln(a, b)
        return lPx 
Example #19
Source File: test_mpmath.py    From Computable with MIT License 5 votes vote down vote up
def test_beta():
    np.random.seed(1234)

    b = np.r_[np.logspace(-200, 200, 4),
              np.logspace(-10, 10, 4),
              np.logspace(-1, 1, 4),
              np.arange(-10, 11, 1),
              np.arange(-10, 11, 1) + 0.5,
              -1, -2.3, -3, -100.3, -10003.4]
    a = b

    ab = np.array(np.broadcast_arrays(a[:,None], b[None,:])).reshape(2, -1).T

    old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
    try:
        mpmath.mp.dps = 400

        assert_func_equal(sc.beta,
                          lambda a, b: float(mpmath.beta(a, b)),
                          ab,
                          vectorized=False,
                          rtol=1e-10,
                          ignore_inf_sign=True)

        assert_func_equal(
            sc.betaln,
            lambda a, b: float(mpmath.log(abs(mpmath.beta(a, b)))),
            ab,
            vectorized=False,
            rtol=1e-10)
    finally:
        mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec


#------------------------------------------------------------------------------
# Machinery for systematic tests
#------------------------------------------------------------------------------ 
Example #20
Source File: test_basic.py    From Computable with MIT License 5 votes vote down vote up
def test_betaln(self):
        betln = special.betaln(2,4)
        bet = log(abs(special.beta(2,4)))
        assert_almost_equal(betln,bet,8) 
Example #21
Source File: test_basic.py    From Computable with MIT License 5 votes vote down vote up
def test_betaln(self):
        assert_equal(cephes.betaln(1,1),0.0) 
Example #22
Source File: _discrete_distns.py    From lambda-packs with MIT License 5 votes vote down vote up
def _logpmf(self, k, M, n, N):
        tot, good = M, n
        bad = tot - good
        return betaln(good+1, 1) + betaln(bad+1,1) + betaln(tot-N+1, N+1)\
            - betaln(k+1, good-k+1) - betaln(N-k+1,bad-N+k+1)\
            - betaln(tot+1, 1) 
Example #23
Source File: _continuous_distns.py    From lambda-packs with MIT License 5 votes vote down vote up
def _logpdf(self, x, dfn, dfd):
        n = 1.0 * dfn
        m = 1.0 * dfd
        lPx = m/2 * np.log(m) + n/2 * np.log(n) + (n/2 - 1) * np.log(x)
        lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)
        return lPx 
Example #24
Source File: _continuous_distns.py    From lambda-packs with MIT License 5 votes vote down vote up
def _logpdf(self, x, a, b):
        return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b) 
Example #25
Source File: _continuous_distns.py    From lambda-packs with MIT License 5 votes vote down vote up
def _logpdf(self, x, a, b):
        lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
        lPx -= sc.betaln(a, b)
        return lPx 
Example #26
Source File: geometric.py    From cgpm with Apache License 2.0 5 votes vote down vote up
def calc_log_Z(a, b):
        return betaln(a, b) 
Example #27
Source File: bayesian_mixture.py    From Mastering-Elasticsearch-7.0 with MIT License 4 votes vote down vote up
def _compute_lower_bound(self, log_resp, log_prob_norm):
        """Estimate the lower bound of the model.

        The lower bound on the likelihood (of the training data with respect to
        the model) is used to detect the convergence and has to decrease at
        each iteration.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)

        log_resp : array, shape (n_samples, n_components)
            Logarithm of the posterior probabilities (or responsibilities) of
            the point of each sample in X.

        log_prob_norm : float
            Logarithm of the probability of each sample in X.

        Returns
        -------
        lower_bound : float
        """
        # Contrary to the original formula, we have done some simplification
        # and removed all the constant terms.
        n_features, = self.mean_prior_.shape

        # We removed `.5 * n_features * np.log(self.degrees_of_freedom_)`
        # because the precision matrix is normalized.
        log_det_precisions_chol = (_compute_log_det_cholesky(
            self.precisions_cholesky_, self.covariance_type, n_features) -
            .5 * n_features * np.log(self.degrees_of_freedom_))

        if self.covariance_type == 'tied':
            log_wishart = self.n_components * np.float64(_log_wishart_norm(
                self.degrees_of_freedom_, log_det_precisions_chol, n_features))
        else:
            log_wishart = np.sum(_log_wishart_norm(
                self.degrees_of_freedom_, log_det_precisions_chol, n_features))

        if self.weight_concentration_prior_type == 'dirichlet_process':
            log_norm_weight = -np.sum(betaln(self.weight_concentration_[0],
                                             self.weight_concentration_[1]))
        else:
            log_norm_weight = _log_dirichlet_norm(self.weight_concentration_)

        return (-np.sum(np.exp(log_resp) * log_resp) -
                log_wishart - log_norm_weight -
                0.5 * n_features * np.sum(np.log(self.mean_precision_))) 
Example #28
Source File: bayesian_mixture.py    From Splunking-Crime with GNU Affero General Public License v3.0 4 votes vote down vote up
def _compute_lower_bound(self, log_resp, log_prob_norm):
        """Estimate the lower bound of the model.

        The lower bound on the likelihood (of the training data with respect to
        the model) is used to detect the convergence and has to decrease at
        each iteration.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)

        log_resp : array, shape (n_samples, n_components)
            Logarithm of the posterior probabilities (or responsibilities) of
            the point of each sample in X.

        log_prob_norm : float
            Logarithm of the probability of each sample in X.

        Returns
        -------
        lower_bound : float
        """
        # Contrary to the original formula, we have done some simplification
        # and removed all the constant terms.
        n_features, = self.mean_prior_.shape

        # We removed `.5 * n_features * np.log(self.degrees_of_freedom_)`
        # because the precision matrix is normalized.
        log_det_precisions_chol = (_compute_log_det_cholesky(
            self.precisions_cholesky_, self.covariance_type, n_features) -
            .5 * n_features * np.log(self.degrees_of_freedom_))

        if self.covariance_type == 'tied':
            log_wishart = self.n_components * np.float64(_log_wishart_norm(
                self.degrees_of_freedom_, log_det_precisions_chol, n_features))
        else:
            log_wishart = np.sum(_log_wishart_norm(
                self.degrees_of_freedom_, log_det_precisions_chol, n_features))

        if self.weight_concentration_prior_type == 'dirichlet_process':
            log_norm_weight = -np.sum(betaln(self.weight_concentration_[0],
                                             self.weight_concentration_[1]))
        else:
            log_norm_weight = _log_dirichlet_norm(self.weight_concentration_)

        return (-np.sum(np.exp(log_resp) * log_resp) -
                log_wishart - log_norm_weight -
                0.5 * n_features * np.sum(np.log(self.mean_precision_))) 
Example #29
Source File: bayesian_mixture.py    From twitter-stock-recommendation with MIT License 4 votes vote down vote up
def _compute_lower_bound(self, log_resp, log_prob_norm):
        """Estimate the lower bound of the model.

        The lower bound on the likelihood (of the training data with respect to
        the model) is used to detect the convergence and has to decrease at
        each iteration.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)

        log_resp : array, shape (n_samples, n_components)
            Logarithm of the posterior probabilities (or responsibilities) of
            the point of each sample in X.

        log_prob_norm : float
            Logarithm of the probability of each sample in X.

        Returns
        -------
        lower_bound : float
        """
        # Contrary to the original formula, we have done some simplification
        # and removed all the constant terms.
        n_features, = self.mean_prior_.shape

        # We removed `.5 * n_features * np.log(self.degrees_of_freedom_)`
        # because the precision matrix is normalized.
        log_det_precisions_chol = (_compute_log_det_cholesky(
            self.precisions_cholesky_, self.covariance_type, n_features) -
            .5 * n_features * np.log(self.degrees_of_freedom_))

        if self.covariance_type == 'tied':
            log_wishart = self.n_components * np.float64(_log_wishart_norm(
                self.degrees_of_freedom_, log_det_precisions_chol, n_features))
        else:
            log_wishart = np.sum(_log_wishart_norm(
                self.degrees_of_freedom_, log_det_precisions_chol, n_features))

        if self.weight_concentration_prior_type == 'dirichlet_process':
            log_norm_weight = -np.sum(betaln(self.weight_concentration_[0],
                                             self.weight_concentration_[1]))
        else:
            log_norm_weight = _log_dirichlet_norm(self.weight_concentration_)

        return (-np.sum(np.exp(log_resp) * log_resp) -
                log_wishart - log_norm_weight -
                0.5 * n_features * np.sum(np.log(self.mean_precision_)))