Python scipy.special.legendre() Examples

The following are 14 code examples of scipy.special.legendre(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.special , or try the search function .
Example #1
Source File: densityorthopoly.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def density_orthopoly(x, polybase, order=5, xeval=None):
    from scipy.special import legendre, hermitenorm, chebyt, chebyu, hermite
    #polybase = legendre  #chebyt #hermitenorm#
    #polybase = chebyt
    #polybase = FPoly
    #polybase = ChtPoly
    #polybase = hermite
    #polybase = HPoly

    if xeval is None:
        xeval = np.linspace(x.min(),x.max(),50)

    #polys = [legendre(i) for i in range(order)]
    polys = [polybase(i) for i in range(order)]
    #coeffs = [(p(x)*(1-x**2)**(-1/2.)).mean() for p in polys]
    #coeffs = [(p(x)*np.exp(-x*x)).mean() for p in polys]
    coeffs = [(p(x)).mean() for p in polys]
    res = sum(c*p(xeval) for c, p in zip(coeffs, polys))
    #res *= (1-xeval**2)**(-1/2.)
    #res *= np.exp(-xeval**2./2)
    return res, xeval, coeffs, polys 
Example #2
Source File: iterfit.py    From specidentify with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def set_func(self, function):
        """Set the function that will be used.
        * function - name of function to be used

        It will throw an error if an inappropriate function is given
        """
        self.function = function
        if self.function == 'poly' or self.function == 'polynomial' or self.function == 'power':
            self.func = power
        elif self.function == 'legendre':
            self.func = legendre
        elif self.function == 'chebyshev':
            self.func = chebyt
        elif self.function == 'spline':
            self.func = None
        else:
            msg = '%s is not a valid function' % self.function
            raise SpecError(msg) 
Example #3
Source File: densityorthopoly.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def density_orthopoly(x, polybase, order=5, xeval=None):
    from scipy.special import legendre, hermitenorm, chebyt, chebyu, hermite
    #polybase = legendre  #chebyt #hermitenorm#
    #polybase = chebyt
    #polybase = FPoly
    #polybase = ChtPoly
    #polybase = hermite
    #polybase = HPoly

    if xeval is None:
        xeval = np.linspace(x.min(),x.max(),50)

    #polys = [legendre(i) for i in range(order)]
    polys = [polybase(i) for i in range(order)]
    #coeffs = [(p(x)*(1-x**2)**(-1/2.)).mean() for p in polys]
    #coeffs = [(p(x)*np.exp(-x*x)).mean() for p in polys]
    coeffs = [(p(x)).mean() for p in polys]
    res = sum(c*p(xeval) for c, p in zip(coeffs, polys))
    #res *= (1-xeval**2)**(-1/2.)
    #res *= np.exp(-xeval**2./2)
    return res, xeval, coeffs, polys 
Example #4
Source File: Utility.py    From fuku-ml with MIT License 5 votes vote down vote up
def feature_transform(X, mode='polynomial', degree=1):

        poly = PolynomialFeatures(degree)
        process_X = poly.fit_transform(X)

        if mode == 'legendre':
            lege = legendre(degree)
            process_X = lege(process_X)

        return process_X 
Example #5
Source File: angular.py    From sfa-numpy with MIT License 5 votes vote down vote up
def Legendre_matrix(N, ctheta):
    r"""Matrix of weighted Legendre Polynominals.

    Computes a matrix of weighted Legendre polynominals up to order N for
    the given angles

    .. math::

        L_n(\theta) = \frac{2n+1}{4 \pi} P_n(\theta)

    Parameters
    ----------
    N : int
        Maximum order.
    ctheta : (Q,) array_like
        Angles.

    Returns
    -------
    Lmn : ((N+1), Q) numpy.ndarray
        Matrix containing Legendre polynominals.
    """
    if ctheta.ndim == 0:
        M = 1
    else:
        M = len(ctheta)
    Lmn = np.zeros([N+1, M], dtype=complex)
    for n in range(N+1):
        Lmn[n, :] = (2*n+1)/(4*np.pi) * np.polyval(special.legendre(n), ctheta)

    return Lmn 
Example #6
Source File: angular.py    From sfa-numpy with MIT License 5 votes vote down vote up
def grid_gauss(n):
    """ Gauss-Legendre sampling points on sphere.

    According to (cf. Rafaely book, sec.3.3)

    Parameters
    ----------
    n : int
        Maximum order.

    Returns
    -------
    azi : array_like
        Azimuth.
    colat : array_like
        Colatitude.
    weights : array_like
        Quadrature weights.
    """
    azi = np.linspace(0, 2*np.pi, 2*n+2, endpoint=False)
    x, weights = np.polynomial.legendre.leggauss(n+1)
    colat = np.arccos(x)
    azi = np.tile(azi, n+1)
    colat = np.repeat(colat, 2*n+2)
    weights = np.repeat(weights, 2*n+2)
    weights *= np.pi / (n+1)      # sum(weights) == 4pi
    return azi, colat, weights 
Example #7
Source File: test_basic.py    From Computable with MIT License 5 votes vote down vote up
def test_legendre(self):
        leg0 = special.legendre(0)
        leg1 = special.legendre(1)
        leg2 = special.legendre(2)
        leg3 = special.legendre(3)
        leg4 = special.legendre(4)
        leg5 = special.legendre(5)
        assert_equal(leg0.c,[1])
        assert_equal(leg1.c,[1,0])
        assert_equal(leg2.c,array([3,0,-1])/2.0)
        assert_almost_equal(leg3.c,array([5,0,-3,0])/2.0)
        assert_almost_equal(leg4.c,array([35,0,-30,0,3])/8.0)
        assert_almost_equal(leg5.c,array([63,0,-70,0,15,0])/8.0) 
Example #8
Source File: test_basic.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_legendre(self):
        leg0 = special.legendre(0)
        leg1 = special.legendre(1)
        leg2 = special.legendre(2)
        leg3 = special.legendre(3)
        leg4 = special.legendre(4)
        leg5 = special.legendre(5)
        assert_equal(leg0.c, [1])
        assert_equal(leg1.c, [1,0])
        assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
        assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
        assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
        assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0) 
Example #9
Source File: iterfit.py    From specidentify with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_coef(self, coef=None):
        """Set the coefficients for the fits for poly, legendre, and chebyshev"""
        if coef is None: coef = np.ones(self.order + 1)
        self.coef = coef 
Example #10
Source File: vmi.py    From PyAbel with MIT License 5 votes vote down vote up
def harmonics(self):
            r"""
            Radial distributions of spherical harmonics
            (Legendre polynomials :math:`P_n(\cos \theta)`).

            Spherical harmonics are orthogonal with respect to integration over
            the full sphere:

            .. math::
                \iint P_n P_m \,d\Omega =
                \int_0^{2\pi} \int_0^\pi P_n(\cos \theta) P_m(\cos \theta)
                    \,\sin\theta d\theta \,d\varphi = 0

            for *n* ≠ *m*; and :math:`P_0(\cos \theta)` is the spherically
            averaged intensity.

            Returns
            -------
            Pn : (# terms) × (rmax + 1) numpy array
                radial dependences of the :math:`P_n(\cos \theta)` terms
            """
            terms = self.cn.shape[0]
            # conversion matrix (cos^k → P_n)
            CH = np.zeros((terms, terms))
            for i in range(terms):
                if self.odd:
                    c = legendre(i).c[::-1]
                else:
                    c = legendre(2 * i).c[::-2]
                CH[:len(c), i] = c
            CH = inv(CH)
            # apply to all radii
            harm = CH.dot(self.cn)
            return harm 
Example #11
Source File: design_matrix.py    From nltools with MIT License 5 votes vote down vote up
def add_poly(self, order=0, include_lower=True):
        """Add nth order Legendre polynomial terms as columns to design matrix. Good for adding constant/intercept to model (order = 0) and accounting for slow-frequency nuisance artifacts e.g. linear, quadratic, etc drifts. Care is recommended when using this with `.add_dct_basis()` as some columns will be highly correlated.

        Args:
            order (int): what order terms to add; 0 = constant/intercept
                        (default), 1 = linear, 2 = quadratic, etc
            include_lower: (bool) whether to add lower order terms if order > 0

        """
        if order < 0:
            raise ValueError("Order must be 0 or greater")

        if self.polys and any(elem.count('_') == 2 for elem in self.polys):
            raise AmbiguityError("It appears that this Design Matrix contains polynomial terms that were kept seperate from a previous append operation. This makes it ambiguous for adding polynomials terms. Try calling .add_poly() on each separate Design Matrix before appending them instead.")

        polyDict = {}
        # Normal/canonical legendre polynomials on the range -1,1 but with size defined by number of observations; keeps all polynomials on similar scales (i.e. big polys don't blow up) and betas are better behaved
        norm_order = np.linspace(-1, 1, self.shape[0])

        if 'poly_'+str(order) in self.polys:
            print("Design Matrix already has {}th order polynomial...skipping".format(order))
            return self

        if include_lower:
            for i in range(order+1):
                if 'poly_'+str(i) in self.polys:
                    print("Design Matrix already has {}th order polynomial...skipping".format(i))
                else:
                    polyDict['poly_' + str(i)] = legendre(i)(norm_order)
        else:
            polyDict['poly_' + str(order)] = legendre(order)(norm_order)

        toAdd = Design_Matrix(polyDict, sampling_freq=self.sampling_freq)
        out = self.append(toAdd, axis=1)
        if out.polys:
            new_polys = out.polys + list(polyDict.keys())
            out.polys = new_polys
        else:
            out.polys = list(polyDict.keys())
        return out 
Example #12
Source File: estimators.py    From nbodykit with GNU General Public License v3.0 4 votes vote down vote up
def to_poles(self, poles):
        r"""
        Invert the measured wedges :math:`\xi(r,mu)` into correlation
        multipoles, :math:`\xi_\ell(r)`.

        To select a mu_range, use

        .. code:: python

            poles = self.sel(mu=slice(*mu_range), method='nearest').to_poles(poles)

        Parameters
        ----------
        poles: array_like
            the list of multipoles to compute

        Returns
        -------
        xi_ell : BinnedStatistic
            a data set holding the :math:`\xi_\ell(r)` multipoles
        """
        from scipy.special import legendre
        from scipy.integrate import quad

        # new data array
        x = str(self.dims[0])
        dtype = numpy.dtype([(x, 'f8')] + [('corr_%d' %ell, 'f8') for ell in poles])
        data = numpy.zeros((self.shape[0]), dtype=dtype)
        dims = [x]
        edges = [self.edges[x]]

        # FIXME: use something fancier than the central point.
        mu_bins = numpy.diff(self.edges['mu'])
        mu_mid = (self.edges['mu'][1:] + self.edges['mu'][:-1])/2.

        for ell in poles:
            legendrePolynomial = (2.*ell+1.)*legendre(ell)(mu_mid)
            data['corr_%d' %ell] = numpy.sum(self['corr']*legendrePolynomial*mu_bins,axis=-1)/numpy.sum(mu_bins)

        data[x] = numpy.mean(self[x],axis=-1)

        return BinnedStatistic(dims=dims, edges=edges, data=data, poles=poles) 
Example #13
Source File: fkp.py    From nbodykit with GNU General Public License v3.0 4 votes vote down vote up
def to_pkmu(self, mu_edges, max_ell):
        """
        Invert the measured multipoles :math:`P_\ell(k)` into power
        spectrum wedges, :math:`P(k,\mu)`.

        Parameters
        ----------
        mu_edges : array_like
            the edges of the :math:`\mu` bins
        max_ell : int
            the maximum multipole to use when computing the wedges;
            all even multipoles with :math:`ell` less than or equal
            to this number are included

        Returns
        -------
        pkmu : BinnedStatistic
            a data set holding the :math:`P(k,\mu)` wedges
        """
        from scipy.special import legendre
        from scipy.integrate import quad

        def compute_coefficient(ell, mumin, mumax):
            """
            Compute how much each multipole contributes to a given wedges.
            This returns:

            .. math::
                \frac{1}{\mu_{max} - \mu_{max}} \int_{\mu_{min}}^{\mu^{max}} \mathcal{L}_\ell(\mu)
            """
            norm = 1.0 / (mumax - mumin)
            return norm * quad(lambda mu: legendre(ell)(mu), mumin, mumax)[0]

        # make sure we have all the poles measured
        ells = list(range(0, max_ell+1, 2))
        if any('power_%d' %ell not in self.poles for ell in ells):
            raise ValueError("measurements for ells=%s required if max_ell=%d" %(ells, max_ell))

        # new data array
        dtype = numpy.dtype([('power', 'c8'), ('k', 'f8'), ('mu', 'f8')])
        data = numpy.zeros((self.poles.shape[0], len(mu_edges)-1), dtype=dtype)

        # loop over each wedge
        bounds = list(zip(mu_edges[:-1], mu_edges[1:]))
        for imu, mulims in enumerate(bounds):

            # add the contribution from each Pell
            for ell in ells:
                coeff = compute_coefficient(ell, *mulims)
                data['power'][:,imu] += coeff * self.poles['power_%d' %ell]

            data['k'][:,imu] = self.poles['k']
            data['mu'][:,imu] = numpy.ones(len(data))*0.5*(mulims[1]+mulims[0])

        dims = ['k', 'mu']
        edges = [self.poles.edges['k'], mu_edges]
        return BinnedStatistic(dims=dims, edges=edges, data=data, coords=[self.poles.coords['k'], None], **self.attrs) 
Example #14
Source File: pymm_legendre.py    From geoist with MIT License 4 votes vote down vote up
def schmidt_norm_legendre(x, degree):
        """ Evaluate Schmidt semi-normalised Legendre functions and their
        derivatives.
        """
        # pylint: disable=invalid-name

        # TODO: Get more robust algorithm.
        # NOTE: This simple reference implementation is prone to truncation
        # errors and overflows for higher degrees.

        def _eval_functions():
            y = (1.0 - x*x)
            yield legendre_polynomial(0)(x)
            for n in range(1, degree + 1):
                leg_pol = legendre_polynomial(n)
                yield leg_pol(x)
                scale = sqrt(y / ((n * (n + 1))//2))
                yield scale * leg_pol.deriv(1)(x)
                for m  in range(2, n + 1):
                    scale *= sqrt(y / ((n + m) * (n - m + 1)))
                    yield scale * leg_pol.deriv(m)(x)

        def _eval_derivatives():
            yield 0.0
            yield p_series[2]
            yield -p_series[1]
            for n in range(2, degree + 1):
                offset = (n*(n + 1))//2
                yield sqrt((n*(n + 1))//2) * p_series[offset + 1]
                yield 0.5 * (
                    sqrt((n+2)*(n-1)) * p_series[offset + 2] -
                    sqrt(2*n*(n + 1)) * p_series[offset]
                )
                for m in range(2, n):
                    yield 0.5 * (
                        sqrt((n+m+1)*(n-m)) * p_series[offset + m + 1] -
                        sqrt((n+m)*(n-m+1)) * p_series[offset + m - 1]
                    )
                yield -sqrt(n/2.0) * p_series[offset + n - 1]

        p_series = list(_eval_functions())
        dp_series = list(_eval_derivatives())

        return array(p_series), array(dp_series)