Python scipy.special.psi() Examples

The following are 30 code examples of scipy.special.psi(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.special , or try the search function .
Example #1
Source File: hdpmodel.py    From topical_word_embeddings with MIT License 6 votes vote down vote up
def update_expectations(self):
        """
        Since we're doing lazy updates on lambda, at any given moment
        the current state of lambda may not be accurate. This function
        updates all of the elements of lambda and Elogbeta
        so that if (for example) we want to print out the
        topics we've learned we'll get the correct behavior.
        """
        for w in xrange(self.m_W):
            self.m_lambda[:, w] *= np.exp(self.m_r[-1] -
                                          self.m_r[self.m_timestamp[w]])
        self.m_Elogbeta = sp.psi(self.m_eta + self.m_lambda) - \
            sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])

        self.m_timestamp[:] = self.m_updatect
        self.m_status_up_to_date = True 
Example #2
Source File: _multivariate.py    From lambda-packs with MIT License 6 votes vote down vote up
def entropy(self, alpha):
        """
        Compute the differential entropy of the dirichlet distribution.

        Parameters
        ----------
        %(_dirichlet_doc_default_callparams)s

        Returns
        -------
        h : scalar
            Entropy of the Dirichlet distribution

        """

        alpha = _dirichlet_check_parameters(alpha)

        alpha0 = np.sum(alpha)
        lnB = _lnB(alpha)
        K = alpha.shape[0]

        out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
            (alpha - 1) * scipy.special.psi(alpha))
        return _squeeze_output(out) 
Example #3
Source File: entropy.py    From mdentropy with MIT License 6 votes vote down vote up
def knn_entropy(*args, k=None):
    """Entropy calculation

    Parameters
    ----------
    args : numpy.ndarray, shape = (n_samples, ) or (n_samples, n_dims)
        Data of which to calculate entropy. Each array must have the same
        number of samples.
    k : int
        Number of bins.

    Returns
    -------
    entropy : float
    """
    data = vstack((args)).T
    n_samples, n_dims = data.shape
    k = k if k else max(3, int(n_samples * 0.01))

    nneighbor = nearest_distances(data, k=k)
    const = psi(n_samples) - psi(k) + n_dims * log(2.)

    return (const + n_dims * log(nneighbor).mean()) 
Example #4
Source File: hdpmodel.py    From topical_word_embeddings with MIT License 6 votes vote down vote up
def update_expectations(self):
        """
        Since we're doing lazy updates on lambda, at any given moment
        the current state of lambda may not be accurate. This function
        updates all of the elements of lambda and Elogbeta
        so that if (for example) we want to print out the
        topics we've learned we'll get the correct behavior.
        """
        for w in xrange(self.m_W):
            self.m_lambda[:, w] *= np.exp(self.m_r[-1] -
                                          self.m_r[self.m_timestamp[w]])
        self.m_Elogbeta = sp.psi(self.m_eta + self.m_lambda) - \
            sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])

        self.m_timestamp[:] = self.m_updatect
        self.m_status_up_to_date = True 
Example #5
Source File: hdpmodel.py    From topical_word_embeddings with MIT License 6 votes vote down vote up
def update_expectations(self):
        """
        Since we're doing lazy updates on lambda, at any given moment
        the current state of lambda may not be accurate. This function
        updates all of the elements of lambda and Elogbeta
        so that if (for example) we want to print out the
        topics we've learned we'll get the correct behavior.
        """
        for w in xrange(self.m_W):
            self.m_lambda[:, w] *= np.exp(self.m_r[-1] -
                                          self.m_r[self.m_timestamp[w]])
        self.m_Elogbeta = sp.psi(self.m_eta + self.m_lambda) - \
            sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])

        self.m_timestamp[:] = self.m_updatect
        self.m_status_up_to_date = True 
Example #6
Source File: test_basic.py    From Computable with MIT License 6 votes vote down vote up
def test_polygamma(self):
        poly2 = special.polygamma(2,1)
        poly3 = special.polygamma(3,1)
        assert_almost_equal(poly2,-2.4041138063,10)
        assert_almost_equal(poly3,6.4939394023,10)

        # Test polygamma(0, x) == psi(x)
        x = [2, 3, 1.1e14]
        assert_almost_equal(special.polygamma(0, x), special.psi(x))

        # Test broadcasting
        n = [0, 1, 2]
        x = [0.5, 1.5, 2.5]
        expected = [-1.9635100260214238, 0.93480220054467933,
                    -0.23620405164172739]
        assert_almost_equal(special.polygamma(n, x), expected)
        expected = np.row_stack([expected]*2)
        assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
                            expected)
        assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
                            expected) 
Example #7
Source File: network.py    From pyhawkes with MIT License 6 votes vote down vote up
def expected_log_p(self):
        """
        Compute the expected log probability of a connection, averaging over c
        :return:
        """
        if self.fixed:
            E_ln_p = np.log(self.P)
        else:
            E_ln_p = np.zeros((self.K, self.K))
            for c1 in range(self.C):
                for c2 in range(self.C):
                    # Get the KxK matrix of joint class assignment probabilities
                    pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]

                    # Get the probability of a connection for this pair of classes
                    E_ln_p += pc1c2 * (psi(self.mf_tau1[c1,c2])
                                       - psi(self.mf_tau0[c1,c2] + self.mf_tau1[c1,c2]))

        if not self.allow_self_connections:
            np.fill_diagonal(E_ln_p, -np.inf)

        return E_ln_p 
Example #8
Source File: network.py    From pyhawkes with MIT License 6 votes vote down vote up
def expected_log_notp(self):
        """
        Compute the expected log probability of NO connection, averaging over c
        :return:
        """
        if self.fixed:
            E_ln_notp = np.log(1.0 - self.P)
        else:
            E_ln_notp = np.zeros((self.K, self.K))
            for c1 in range(self.C):
                for c2 in range(self.C):
                    # Get the KxK matrix of joint class assignment probabilities
                    pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]

                    # Get the probability of a connection for this pair of classes
                    E_ln_notp += pc1c2 * (psi(self.mf_tau0[c1,c2])
                                          - psi(self.mf_tau0[c1,c2] + self.mf_tau1[c1,c2]))

        if not self.allow_self_connections:
            np.fill_diagonal(E_ln_notp, 0.0)

        return E_ln_notp 
Example #9
Source File: _multivariate.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def entropy(self, alpha):
        """
        Compute the differential entropy of the dirichlet distribution.

        Parameters
        ----------
        %(_dirichlet_doc_default_callparams)s

        Returns
        -------
        h : scalar
            Entropy of the Dirichlet distribution

        """

        alpha = _dirichlet_check_parameters(alpha)

        alpha0 = np.sum(alpha)
        lnB = _lnB(alpha)
        K = alpha.shape[0]

        out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
            (alpha - 1) * scipy.special.psi(alpha))
        return _squeeze_output(out) 
Example #10
Source File: test_basic.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_polygamma(self):
        poly2 = special.polygamma(2,1)
        poly3 = special.polygamma(3,1)
        assert_almost_equal(poly2,-2.4041138063,10)
        assert_almost_equal(poly3,6.4939394023,10)

        # Test polygamma(0, x) == psi(x)
        x = [2, 3, 1.1e14]
        assert_almost_equal(special.polygamma(0, x), special.psi(x))

        # Test broadcasting
        n = [0, 1, 2]
        x = [0.5, 1.5, 2.5]
        expected = [-1.9635100260214238, 0.93480220054467933,
                    -0.23620405164172739]
        assert_almost_equal(special.polygamma(n, x), expected)
        expected = np.row_stack([expected]*2)
        assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
                            expected)
        assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
                            expected) 
Example #11
Source File: sigTests.py    From PePr with GNU General Public License v3.0 6 votes vote down vote up
def weighted_log_likelihood(v_hat, m, n, reads, diff_test):
    '''This function returns the derivative of the log likelihood
       of current and adjacent windows using a triangular weight.'''
    equation = 0
    n_window = len(reads)
    baseline = numpy.mean(reads[int(n_window/2),0:m])
    for idx in range(n_window):
        x = reads[idx, 0:m]  # x/m refer the test sample
        y = reads[idx, m:(m+n)]  # y/n refer to the control sample
        weight_x = numpy.mean(x)/baseline
        weight_y = numpy.mean(y)/baseline
        if n == 1: 
            weight_y = 0
        log_likelihood = (-(m*weight_x+n*weight_y)*psi(v_hat) +
                numpy.sum(psi(v_hat+x))*weight_x + 
                numpy.sum(psi(v_hat+y))*weight_y + 
                m*numpy.log(v_hat/(v_hat+numpy.mean(x)))*weight_x + 
                n*numpy.log(v_hat/(v_hat+numpy.mean(y)))*weight_y)
        equation = (equation + 
                log_likelihood*(1-(abs(float(n_window)/2-idx-0.5)/(float(n_window)/2+1))))
    return equation 
Example #12
Source File: _multivariate.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def entropy(self, alpha):
        """
        Compute the differential entropy of the dirichlet distribution.

        Parameters
        ----------
        %(_dirichlet_doc_default_callparams)s

        Returns
        -------
        h : scalar
            Entropy of the Dirichlet distribution

        """

        alpha = _dirichlet_check_parameters(alpha)

        alpha0 = np.sum(alpha)
        lnB = _lnB(alpha)
        K = alpha.shape[0]

        out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
            (alpha - 1) * scipy.special.psi(alpha))
        return _squeeze_output(out) 
Example #13
Source File: features.py    From CausalDiscoveryToolbox with MIT License 6 votes vote down vote up
def normalized_entropy(x, tx, m=2):
    x = normalize(x, tx)
    try:
        cx = Counter(x)
    except TypeError:
        cx = Counter(x.flat)

    if len(cx) < 2:
        return 0
    xk = np.array(list(cx.keys()), dtype=float)
    xk.sort()
    delta = (xk[1:] - xk[:-1]) / m
    counter = np.array([cx[i] for i in xk], dtype=float)
    hx = np.sum(counter[1:] * np.log(delta / counter[1:])) / len(x)
    hx += (psi(len(delta)) - np.log(len(delta)))
    hx += np.log(len(x))
    hx -= (psi(m) - np.log(m))
    return hx 
Example #14
Source File: network.py    From pyhawkes with MIT License 6 votes vote down vote up
def expected_log_v(self):
        """
        Compute the expected log scale of a connection, averaging over c
        :return:
        """
        if self.fixed:
            return np.log(self.V)

        E_log_v = np.zeros((self.K, self.K))
        for c1 in range(self.C):
            for c2 in range(self.C):
                # Get the KxK matrix of joint class assignment probabilities
                pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]

                # Get the probability of a connection for this pair of classes
                E_log_v += pc1c2 * (psi(self.mf_alpha[c1,c2])
                                    - np.log(self.mf_beta[c1,c2]))
        return E_log_v 
Example #15
Source File: features.py    From CausalDiscoveryToolbox with MIT License 6 votes vote down vote up
def uniform_divergence(x, tx, m=2):
    x = normalize(x, tx)
    try:
        cx = Counter(x)
    except TypeError:
        cx = Counter(x.flat)
    xk = np.array(list(cx.keys()), dtype=float)
    xk.sort()
    delta = np.zeros(len(xk))
    if len(xk) > 1:
        delta[0] = xk[1] - xk[0]
        delta[1:-1] = (xk[m:] - xk[:-m]) / m
        delta[-1] = xk[-1] - xk[-2]
    else:
        delta = np.array(np.sqrt(12))
    counter = np.array([cx[i] for i in xk], dtype=float)
    delta = delta / np.sum(delta)
    hx = np.sum(counter * np.log(counter / delta)) / len(x)
    hx -= np.log(len(x))
    hx += (psi(m) - np.log(m))
    return hx 
Example #16
Source File: entropy.py    From mdentropy with MIT License 6 votes vote down vote up
def grassberger(counts):
    """Entropy calculation using Grassberger correction.
    doi:10.1016/0375-9601(88)90193-4

    Parameters
    ----------
    counts : list
        bin counts

    Returns
    -------
    entropy : float
    """
    n_samples = npsum(counts)
    return npsum(counts * (log(n_samples) -
                           nan_to_num(psi(counts)) -
                           ((-1.) ** counts / (counts + 1.)))) / n_samples 
Example #17
Source File: hdp.py    From online-hdp with GNU General Public License v2.0 5 votes vote down vote up
def expect_log_sticks(sticks):
    dig_sum = sp.psi(np.sum(sticks, 0))
    ElogW = sp.psi(sticks[0]) - dig_sum
    Elog1_W = sp.psi(sticks[1]) - dig_sum

    n = len(sticks[0]) + 1
    Elogsticks = np.zeros(n)
    Elogsticks[0:n-1] = ElogW
    Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
    return Elogsticks 
Example #18
Source File: hdp.py    From online-hdp with GNU General Public License v2.0 5 votes vote down vote up
def dirichlet_expectation(alpha):
    if (len(alpha.shape) == 1):
        return(sp.psi(alpha) - sp.psi(np.sum(alpha)))
    return(sp.psi(alpha) - sp.psi(np.sum(alpha, 1))[:, np.newaxis]) 
Example #19
Source File: onlinehdp.py    From online-hdp with GNU General Public License v2.0 5 votes vote down vote up
def update_expectations(self):
        """
        Since we're doing lazy updates on lambda, at any given moment
        the current state of lambda may not be accurate. This function
        updates all of the elements of lambda and Elogbeta so that if (for
        example) we want to print out the topics we've learned we'll get the
        correct behavior.
        """
        for w in range(self.m_W):
            self.m_lambda[:, w] *= np.exp(self.m_r[-1] - 
                                          self.m_r[self.m_timestamp[w]])
        self.m_Elogbeta = sp.psi(self.m_eta + self.m_lambda) - \
            sp.psi(self.m_W*self.m_eta + self.m_lambda_sum[:, np.newaxis])
        self.m_timestamp[:] = self.m_updatect
        self.m_status_up_to_date = True 
Example #20
Source File: _continuous_distns.py    From lambda-packs with MIT License 5 votes vote down vote up
def _beta_mle_ab(theta, n, s1, s2):
    # Zeros of this function are critical points of
    # the maximum likelihood function.  Solving this system
    # for theta (which contains a and b) gives the MLE for a and b
    # given `n`, `s1` and `s2`.  `s1` is the sum of the logs of the data,
    # and `s2` is the sum of the logs of 1 - data.  `n` is the number
    # of data points.
    a, b = theta
    psiab = sc.psi(a + b)
    func = [s1 - n * (-psiab + sc.psi(a)),
            s2 - n * (-psiab + sc.psi(b))]
    return func 
Example #21
Source File: test_basic.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_psi(self):
        cephes.psi(1) 
Example #22
Source File: hdpmodel.py    From topical_word_embeddings with MIT License 5 votes vote down vote up
def expect_log_sticks(sticks):
    """
    For stick-breaking hdp, return the E[log(sticks)]
    """
    dig_sum = sp.psi(np.sum(sticks, 0))
    ElogW = sp.psi(sticks[0]) - dig_sum
    Elog1_W = sp.psi(sticks[1]) - dig_sum

    n = len(sticks[0]) + 1
    Elogsticks = np.zeros(n)
    Elogsticks[0: n - 1] = ElogW
    Elogsticks[1:] = Elogsticks[1:] + np.cumsum(Elog1_W)
    return Elogsticks 
Example #23
Source File: onlinehdp.py    From online-hdp with GNU General Public License v2.0 5 votes vote down vote up
def dirichlet_expectation(alpha):
    """
    For a vector theta ~ Dir(alpha), computes E[log(theta)] given alpha.
    """
    if (len(alpha.shape) == 1):
        return(sp.psi(alpha) - sp.psi(np.sum(alpha)))
    return(sp.psi(alpha) - sp.psi(np.sum(alpha, 1))[:, np.newaxis]) 
Example #24
Source File: hdp.py    From online-hdp with GNU General Public License v2.0 5 votes vote down vote up
def em(self, c, var_converge, fresh):
        ss = suff_stats(self.m_T, self.m_size_vocab)
        ss.set_zero()
        
        # prepare all needs for a single doc
        Elogbeta = dirichlet_expectation(self.m_beta) # the topics
        Elogsticks_1st = expect_log_sticks(self.m_var_sticks) # global sticks
        likelihood = 0.0
        for doc in c.docs:
            likelihood += self.doc_e_step(doc, ss, Elogbeta, Elogsticks_1st, var_converge, fresh=fresh)
        
        # collect the likelihood from other parts
        # the prior for gamma
        if self.m_hdp_hyperparam.m_hyper_opt:
            log_gamma = sp.psi(self.m_var_gamma_a) -  np.log(self.m_var_gamma_b)
            likelihood += self.m_hdp_hyperparam.m_gamma_a * log(self.m_hdp_hyperparam.m_gamma_b) \
                    - sp.gammaln(self.m_hdp_hyperparam.m_gamma_a) 

            likelihood -= self.m_var_gamma_a * log(self.m_var_gamma_b) \
                    - sp.gammaln(self.m_var_gamma_a) 

            likelihood += (self.m_hdp_hyperparam.m_gamma_a - self.m_var_gamma_a) * log_gamma \
                    - (self.m_hdp_hyperparam.m_gamma_b - self.m_var_gamma_b) * self.m_gamma
        else:
            log_gamma = np.log(self.m_gamma)

       # the W/sticks part 
        likelihood += (self.m_T-1) * log_gamma
        dig_sum = sp.psi(np.sum(self.m_var_sticks, 0))
        likelihood += np.sum((np.array([1.0, self.m_gamma])[:,np.newaxis] - self.m_var_sticks) * (sp.psi(self.m_var_sticks) - dig_sum))
        likelihood -= np.sum(sp.gammaln(np.sum(self.m_var_sticks, 0))) - np.sum(sp.gammaln(self.m_var_sticks))
        
        # the beta part    
        likelihood += np.sum((self.m_eta - self.m_beta) * Elogbeta)
        likelihood += np.sum(sp.gammaln(self.m_beta) - sp.gammaln(self.m_eta))
        likelihood += np.sum(sp.gammaln(self.m_eta*self.m_size_vocab) - sp.gammaln(np.sum(self.m_beta, 1)))

        self.do_m_step(ss) # run m step
        return likelihood 
Example #25
Source File: pmf.py    From stochastic_PMF with GNU General Public License v3.0 5 votes vote down vote up
def _compute_expectations(alpha, beta):
    '''
    Given x ~ Gam(alpha, beta), compute E[x] and E[log x]
    '''
    return (alpha / beta, special.psi(alpha) - np.log(beta)) 
Example #26
Source File: bcpd.py    From probreg with MIT License 5 votes vote down vote up
def _maximization_step(source, target, rigid_trans, estep_res,
                           gmat, lmd, k, sigma2_p=None):
        nu_d, nu, n_p, px, x_hat = estep_res
        dim = source.shape[1]
        m = source.shape[0]
        s2s2 = rigid_trans.scale**2 / (sigma2_p**2)
        sigma_mat_inv = lmd * gmat + s2s2 * np.diag(nu)
        sigma_mat = np.linalg.inv(sigma_mat_inv)
        residual = rigid_trans.inverse().transform(x_hat) - source
        v_hat = s2s2 * np.matmul(np.multiply(np.kron(sigma_mat, np.identity(dim)),
                                             np.kron(nu, np.ones(dim))),
                                 residual.ravel()).reshape(-1, dim)
        u_hat = source + v_hat
        alpha = np.exp(spsp.psi(k + nu) - spsp.psi(k * m + n_p))
        x_m = np.sum(nu * x_hat.T, axis=1) / n_p
        sigma2_m = np.sum(nu * np.diag(sigma_mat), axis=0) / n_p
        u_m = np.sum(nu * u_hat.T, axis=1) / n_p
        u_hm = u_hat - u_m
        s_xu = np.matmul(np.multiply(nu, (x_hat - x_m).T), u_hm) / n_p
        s_uu = np.matmul(np.multiply(nu, u_hm.T), u_hm) / n_p + sigma2_m * np.identity(dim)
        phi, s_xu_d, psih = np.linalg.svd(s_xu, full_matrices=True)
        c = np.ones(dim)
        c[-1] = np.linalg.det(np.dot(phi, psih))
        rot = np.matmul(phi * c, psih)
        tr_rsxu = np.trace(np.matmul(rot, s_xu))
        scale = tr_rsxu / np.trace(s_uu)
        t = x_m - scale * np.dot(rot, u_m)
        y_hat = rigid_trans.transform(source + v_hat)
        s1 = np.dot(target.ravel(), np.kron(nu_d, np.ones(dim)) * target.ravel())
        s2 = np.dot(px.ravel(), y_hat.ravel())
        s3 = np.dot(y_hat.ravel(), np.kron(nu, np.ones(dim)) * y_hat.ravel())
        sigma2 = (s1 - 2.0 * s2 + s3) / (n_p * dim) + scale**2 * sigma2_m
        return MstepResult(tf.CombinedTransformation(rot, t, scale, v_hat), u_hat, sigma_mat, alpha, sigma2) 
Example #27
Source File: utils.py    From stochasticLDA with GNU General Public License v3.0 5 votes vote down vote up
def dirichlet_expectation(alpha):
	'''see onlineldavb.py by Blei et al'''
	if (len(alpha.shape) == 1):
		return (psi(alpha) - psi(n.sum(alpha)))
	return (psi(alpha) - psi(n.sum(alpha, 1))[:, n.newaxis]) 
Example #28
Source File: utils.py    From stochasticLDA with GNU General Public License v3.0 5 votes vote down vote up
def beta_expectation(a, b, k):
	mysum = psi(a + b)
	Elog_a = psi(a) - mysum
	Elog_b = psi(b) - mysum
	Elog_beta = n.zeros(k)
	Elog_beta[0] = Elog_a[0]
	# print Elog_beta
	for i in range(1, k):
		Elog_beta[i] = Elog_a[i] + n.sum(Elog_b[0:i])
		# print Elog_beta
	# print Elog_beta
	return Elog_beta 
Example #29
Source File: _multivariate.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _entropy(self, dim, df, log_det_scale):
        """
        Parameters
        ----------
        dim : int
            Dimension of the scale matrix
        df : int
            Degrees of freedom
        log_det_scale : float
            Logarithm of the determinant of the scale matrix

        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'entropy' instead.

        """
        return (
            0.5 * (dim+1) * log_det_scale +
            0.5 * dim * (dim+1) * _LOG_2 +
            multigammaln(0.5*df, dim) -
            0.5 * (df - dim - 1) * np.sum(
                [psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
            ) +
            0.5 * df * dim
        ) 
Example #30
Source File: _continuous_distns.py    From lambda-packs with MIT License 5 votes vote down vote up
def _beta_mle_a(a, b, n, s1):
    # The zeros of this function give the MLE for `a`, with
    # `b`, `n` and `s1` given.  `s1` is the sum of the logs of
    # the data. `n` is the number of data points.
    psiab = sc.psi(a + b)
    func = s1 - n * (-psiab + sc.psi(a))
    return func