Python scipy.special.entr() Examples

The following are 13 code examples of scipy.special.entr(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.special , or try the search function .
Example #1
Source File: _multivariate.py    From lambda-packs with MIT License 5 votes vote down vote up
def entropy(self, n, p):
        r"""
        Compute the entropy of the multinomial distribution.

        The entropy is computed using this expression:

        .. math::

            f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
            \sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!

        Parameters
        ----------
        %(_doc_default_callparams)s

        Returns
        -------
        h : scalar
            Entropy of the Multinomial distribution

        Notes
        -----
        %(_doc_callparams_note)s
        """
        n, p, npcond = self._process_parameters(n, p)

        x = np.r_[1:np.max(n)+1]

        term1 = n*np.sum(entr(p), axis=-1)
        term1 -= gammaln(n+1)

        n = n[..., np.newaxis]
        new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
        x.shape += (1,)*new_axes_needed

        term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
            axis=(-1, -1-new_axes_needed))

        return self._checkresult(term1 + term2, npcond, np.nan) 
Example #2
Source File: _discrete_distns.py    From lambda-packs with MIT License 5 votes vote down vote up
def _entropy(self, n, p):
        k = np.r_[0:n + 1]
        vals = self._pmf(k, n, p)
        return np.sum(entr(vals), axis=0) 
Example #3
Source File: _discrete_distns.py    From lambda-packs with MIT License 5 votes vote down vote up
def _entropy(self, p):
        return entr(p) + entr(1-p) 
Example #4
Source File: _discrete_distns.py    From lambda-packs with MIT License 5 votes vote down vote up
def _entropy(self, M, n, N):
        k = np.r_[N - (M - n):min(n, N) + 1]
        vals = self.pmf(k, M, n, N)
        return np.sum(entr(vals), axis=0) 
Example #5
Source File: _multivariate.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def entropy(self, n, p):
        r"""
        Compute the entropy of the multinomial distribution.

        The entropy is computed using this expression:

        .. math::

            f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
            \sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!

        Parameters
        ----------
        %(_doc_default_callparams)s

        Returns
        -------
        h : scalar
            Entropy of the Multinomial distribution

        Notes
        -----
        %(_doc_callparams_note)s
        """
        n, p, npcond = self._process_parameters(n, p)

        x = np.r_[1:np.max(n)+1]

        term1 = n*np.sum(entr(p), axis=-1)
        term1 -= gammaln(n+1)

        n = n[..., np.newaxis]
        new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
        x.shape += (1,)*new_axes_needed

        term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
            axis=(-1, -1-new_axes_needed))

        return self._checkresult(term1 + term2, npcond, np.nan) 
Example #6
Source File: _discrete_distns.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _entropy(self, n, p):
        k = np.r_[0:n + 1]
        vals = self._pmf(k, n, p)
        return np.sum(entr(vals), axis=0) 
Example #7
Source File: _discrete_distns.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _entropy(self, p):
        return entr(p) + entr(1-p) 
Example #8
Source File: _discrete_distns.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _entropy(self, M, n, N):
        k = np.r_[N - (M - n):min(n, N) + 1]
        vals = self.pmf(k, M, n, N)
        return np.sum(entr(vals), axis=0) 
Example #9
Source File: test_basic.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_entr():
    def xfunc(x):
        if x < 0:
            return -np.inf
        else:
            return -special.xlogy(x, x)
    values = (0, 0.5, 1.0, np.inf)
    signs = [-1, 1]
    arr = []
    for sgn, v in itertools.product(signs, values):
        arr.append(sgn * v)
    z = np.array(arr, dtype=float)
    w = np.vectorize(xfunc, otypes=[np.float64])(z)
    assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13) 
Example #10
Source File: _multivariate.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def entropy(self, n, p):
        r"""
        Compute the entropy of the multinomial distribution.

        The entropy is computed using this expression:

        .. math::

            f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
            \sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!

        Parameters
        ----------
        %(_doc_default_callparams)s

        Returns
        -------
        h : scalar
            Entropy of the Multinomial distribution

        Notes
        -----
        %(_doc_callparams_note)s
        """
        n, p, npcond = self._process_parameters(n, p)

        x = np.r_[1:np.max(n)+1]

        term1 = n*np.sum(entr(p), axis=-1)
        term1 -= gammaln(n+1)

        n = n[..., np.newaxis]
        new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
        x.shape += (1,)*new_axes_needed

        term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
            axis=(-1, -1-new_axes_needed))

        return self._checkresult(term1 + term2, npcond, np.nan) 
Example #11
Source File: _discrete_distns.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _entropy(self, n, p):
        k = np.r_[0:n + 1]
        vals = self._pmf(k, n, p)
        return np.sum(entr(vals), axis=0) 
Example #12
Source File: _discrete_distns.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _entropy(self, p):
        return entr(p) + entr(1-p) 
Example #13
Source File: _discrete_distns.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _entropy(self, M, n, N):
        k = np.r_[N - (M - n):min(n, N) + 1]
        vals = self.pmf(k, M, n, N)
        return np.sum(entr(vals), axis=0)