Python numdifftools.Gradient() Examples

The following are 22 code examples of numdifftools.Gradient(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numdifftools , or try the search function .
Example #1
Source File: test.py    From qpth with Apache License 2.0 6 votes vote down vote up
def test_dl_dG():
    nz, neq, nineq = 10, 0, 3
    [p, Q, G, h, A, b, truez], [dQ, dp, dG, dh, dA, db] = get_grads(
        nz=nz, neq=neq, nineq=nineq)

    def f(G):
        G = G.reshape(nineq, nz)
        _, zhat, nu, lam, slacks = qp_cvxpy.forward_single_np(Q, p, G, h, A, b)
        return 0.5 * np.sum(np.square(zhat - truez))

    df = nd.Gradient(f)
    dG_fd = df(G.ravel()).reshape(nineq, nz)
    if verbose:
        # print('dG_fd[1,:]: ', dG_fd[1,:])
        # print('dG[1,:]: ', dG[1,:])
        print('dG_fd: ', dG_fd)
        print('dG: ', dG)
    npt.assert_allclose(dG_fd, dG, rtol=RTOL, atol=ATOL) 
Example #2
Source File: test.py    From qpth with Apache License 2.0 6 votes vote down vote up
def test_dl_dA():
    nz, neq, nineq = 10, 3, 1
    [p, Q, G, h, A, b, truez], [dQ, dp, dG, dh, dA, db] = get_grads(
        nz=nz, neq=neq, nineq=nineq, Qscale=100., Gscale=100., Ascale=100.)

    def f(A):
        A = A.reshape(neq, nz)
        _, zhat, nu, lam, slacks = qp_cvxpy.forward_single_np(Q, p, G, h, A, b)
        return 0.5 * np.sum(np.square(zhat - truez))

    df = nd.Gradient(f)
    dA_fd = df(A.ravel()).reshape(neq, nz)
    if verbose:
        # print('dA_fd[0,:]: ', dA_fd[0,:])
        # print('dA[0,:]: ', dA[0,:])
        print('dA_fd: ', dA_fd)
        print('dA: ', dA)
    npt.assert_allclose(dA_fd, dA, rtol=RTOL, atol=ATOL) 
Example #3
Source File: test_autograd.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def check_gradient(f, x):
    print(x, "\n", f(x))

    print("# grad2")
    grad2 = Gradient(f)(x)
    print("# building grad1")
    g = grad(f)
    print("# computing grad1")
    grad1 = g(x)

    print("gradient1\n", grad1, "\ngradient2\n", grad2)
    np.allclose(grad1, grad2)

    # check Hessian vector product
    y = np.random.normal(size=x.shape)
    gdot = lambda u: np.dot(g(u), y)
    hess1, hess2 = grad(gdot)(x), Gradient(gdot)(x)
    print("hess1\n", hess1, "\nhess2\n", hess2)
    np.allclose(hess1, hess2) 
Example #4
Source File: optnet-np.py    From optnet with Apache License 2.0 5 votes vote down vote up
def test_dl_dz0():
    def f(z0):
        zhat, nu, lam = af.forward_single_np(p, L, G, A, z0, s0)
        return 0.5*np.sum(np.square(zhat - truez))

    df = nd.Gradient(f)
    dz0_fd = df(z0)
    if verbose:
        print('dz0_fd: ', dz0_fd)
        print('dz0: ', dz0)
    npt.assert_allclose(dz0_fd, dz0, rtol=RTOL, atol=ATOL) 
Example #5
Source File: test_uv_bayes.py    From hawkeslib with MIT License 5 votes vote down vote up
def test_gradient_0_at_map(self):
        A = self.arr[:500]

        x = np.array([0.0099429, 0.59019621, 0.16108526])
        g = self.bhp._log_posterior_grad(A, A[-1])(x)

        assert np.linalg.norm(g, ord=1) < 10, "Gradient not zero!" + str(g) 
Example #6
Source File: test_uv_bayes.py    From hawkeslib with MIT License 5 votes vote down vote up
def test_gradient_correct_finite_difference(self):
        A = self.arr
        f = self.bhp._log_posterior(A, A[-1])
        g = self.bhp._log_posterior_grad(A, A[-1])

        gr_numeric = nd.Gradient(f)([.3, .2, 5.])
        gr_manual = g([.3, .2, 5.])

        np.testing.assert_allclose(gr_manual, gr_numeric, rtol=1e-2) 
Example #7
Source File: vector_calculus.py    From dynamo-release with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def grad(f, x):
    """Gradient of scalar-valued function f evaluated at x"""
    return nd.Gradient(f)(x) 
Example #8
Source File: numdiff.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def approx_fprime_cs(x, f, epsilon=None, args=(), kwargs={}):
    '''
    Calculate gradient or Jacobian with complex step derivative approximation

    Parameters
    ----------
    x : array
        parameters at which the derivative is evaluated
    f : function
        `f(*((x,)+args), **kwargs)` returning either one value or 1d array
    epsilon : float, optional
        Stepsize, if None, optimal stepsize is used. Optimal step-size is
        EPS*x. See note.
    args : tuple
        Tuple of additional arguments for function `f`.
    kwargs : dict
        Dictionary of additional keyword arguments for function `f`.

    Returns
    -------
    partials : ndarray
       array of partial derivatives, Gradient or Jacobian

    Notes
    -----
    The complex-step derivative has truncation error O(epsilon**2), so
    truncation error can be eliminated by choosing epsilon to be very small.
    The complex-step derivative avoids the problem of round-off error with
    small epsilon because there is no subtraction.
    '''
    # From Guilherme P. de Freitas, numpy mailing list
    # May 04 2010 thread "Improvement of performance"
    # http://mail.scipy.org/pipermail/numpy-discussion/2010-May/050250.html
    n = len(x)
    epsilon = _get_epsilon(x, 1, epsilon, n)
    increments = np.identity(n) * 1j * epsilon
    # TODO: see if this can be vectorized, but usually dim is small
    partials = [f(x+ih, *args, **kwargs).imag / epsilon[i]
                for i, ih in enumerate(increments)]
    return np.array(partials).T 
Example #9
Source File: optnet-np.py    From optnet with Apache License 2.0 5 votes vote down vote up
def test_dl_dL():
    def f(l0):
        L_ = np.copy(L)
        L_[:,0] = l0
        zhat, nu, lam = af.forward_single_np(p, L_, G, A, z0, s0)
        return 0.5*np.sum(np.square(zhat - truez))

    df = nd.Gradient(f)
    dL_fd = df(L[:,0])
    dl0 = np.array(dL[:,0]).ravel()
    if verbose:
        print('dL_fd: ', dL_fd)
        print('dL: ', dl0)
    npt.assert_allclose(dL_fd, dl0, rtol=RTOL, atol=ATOL) 
Example #10
Source File: optnet-np.py    From optnet with Apache License 2.0 5 votes vote down vote up
def test_dl_dA():
    def f(A):
        A = A.reshape(neq,nz)
        zhat, nu, lam = af.forward_single_np(p, L, G, A, z0, s0)
        return 0.5*np.sum(np.square(zhat - truez))

    df = nd.Gradient(f)
    dA_fd = df(A.ravel()).reshape(neq, nz)
    if verbose:
        print('dA_fd[1,:]: ', dA_fd[1,:])
        print('dA[1,:]: ', dA[1,:])
    npt.assert_allclose(dA_fd, dA, rtol=RTOL, atol=ATOL) 
Example #11
Source File: optnet-np.py    From optnet with Apache License 2.0 5 votes vote down vote up
def test_dl_dp_batch():
    def f(p):
        zhat, nu, lam = af.forward_single_np(p, L, G, A, z0, s0)
        return 0.5*np.sum(np.square(zhat - truez))

    df = nd.Gradient(f)
    dp_fd = df(p)
    if verbose:
        print('dp_fd: ', dp_fd)
        print('dp: ', dp)
    npt.assert_allclose(dp_fd, dp, rtol=RTOL, atol=ATOL) 
Example #12
Source File: optnet-np.py    From optnet with Apache License 2.0 5 votes vote down vote up
def test_dl_dp():
    def f(p):
        zhat, nu, lam = af.forward_single_np(p, L, G, A, z0, s0)
        return 0.5*np.sum(np.square(zhat - truez))

    df = nd.Gradient(f)
    dp_fd = df(p)
    if verbose:
        print('dp_fd: ', dp_fd)
        print('dp: ', dp)
    npt.assert_allclose(dp_fd, dp, rtol=RTOL, atol=ATOL) 
Example #13
Source File: optnet-np.py    From optnet with Apache License 2.0 5 votes vote down vote up
def test_dl_ds0():
    def f(s0):
        zhat, nu, lam = af.forward_single_np(p, L, G, A, z0, s0)
        return 0.5*np.sum(np.square(zhat - truez))

    df = nd.Gradient(f)
    ds0_fd = df(s0)
    if verbose:
        print('ds0_fd: ', ds0_fd)
        print('ds0: ', ds0)
    npt.assert_allclose(ds0_fd, ds0, rtol=RTOL, atol=ATOL) 
Example #14
Source File: test_numdiff_np.py    From estimagic with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_first_derivative_gradient_richardson(example_function_gradient_fixtures):
    f = example_function_gradient_fixtures["func"]
    fprime = example_function_gradient_fixtures["func_prime"]

    true_grad = fprime(np.ones(3))
    numdifftools_grad = Gradient(f, order=2, n=3, method="central")(np.ones(3))
    grad = first_derivative(f, np.ones(3), n_steps=3, method="central")

    aaae(numdifftools_grad, grad)
    aaae(true_grad, grad) 
Example #15
Source File: test_numdiff_np.py    From estimagic with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def example_function_gradient_fixtures():
    def f(x):
        """f:R^3 -> R"""
        x1, x2, x3 = x[0], x[1], x[2]
        y1 = np.sin(x1) + np.cos(x2) + x3 - x3
        return y1

    def fprime(x):
        """Gradient(f)(x):R^3 -> R^3"""
        x1, x2, x3 = x[0], x[1], x[2]
        grad = np.array([np.cos(x1), -np.sin(x2), x3 - x3])
        return grad

    return {"func": f, "func_prime": fprime} 
Example #16
Source File: test.py    From qpth with Apache License 2.0 5 votes vote down vote up
def test_dl_db():
    nz, neq, nineq = 10, 3, 1
    [p, Q, G, h, A, b, truez], [dQ, dp, dG, dh, dA, db] = get_grads(
        nz=nz, neq=neq, nineq=nineq, Qscale=100., Gscale=100., Ascale=100.)

    def f(b):
        _, zhat, nu, lam, slacks = qp_cvxpy.forward_single_np(Q, p, G, h, A, b)
        return 0.5 * np.sum(np.square(zhat - truez))

    df = nd.Gradient(f)
    db_fd = df(b)
    if verbose:
        print('db_fd: ', db_fd)
        print('db: ', db)
    npt.assert_allclose(db_fd, db, rtol=RTOL, atol=ATOL) 
Example #17
Source File: test.py    From qpth with Apache License 2.0 5 votes vote down vote up
def test_dl_dh():
    nz, neq, nineq = 10, 0, 3
    [p, Q, G, h, A, b, truez], [dQ, dp, dG, dh, dA, db] = get_grads(
        nz=nz, neq=neq, nineq=nineq, Qscale=1., Gscale=1.)

    def f(h):
        _, zhat, nu, lam, slacks = qp_cvxpy.forward_single_np(Q, p, G, h, A, b)
        return 0.5 * np.sum(np.square(zhat - truez))

    df = nd.Gradient(f)
    dh_fd = df(h)
    if verbose:
        print('dh_fd: ', dh_fd)
        print('dh: ', dh)
    npt.assert_allclose(dh_fd, dh, rtol=RTOL, atol=ATOL) 
Example #18
Source File: test.py    From qpth with Apache License 2.0 5 votes vote down vote up
def test_dl_dp():
    nz, neq, nineq = 10, 2, 3
    [p, Q, G, h, A, b, truez], [dQ, dp, dG, dh, dA, db] = get_grads(
        nz=nz, neq=neq, nineq=nineq, Qscale=100., Gscale=100., Ascale=100.)

    def f(p):
        _, zhat, nu, lam, slacks = qp_cvxpy.forward_single_np(Q, p, G, h, A, b)
        return 0.5 * np.sum(np.square(zhat - truez))

    df = nd.Gradient(f)
    dp_fd = df(p)
    if verbose:
        print('dp_fd: ', dp_fd)
        print('dp: ', dp)
    npt.assert_allclose(dp_fd, dp, rtol=RTOL, atol=ATOL) 
Example #19
Source File: numdiff.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def approx_fprime_cs(x, f, epsilon=None, args=(), kwargs={}):
    '''
    Calculate gradient or Jacobian with complex step derivative approximation

    Parameters
    ----------
    x : array
        parameters at which the derivative is evaluated
    f : function
        `f(*((x,)+args), **kwargs)` returning either one value or 1d array
    epsilon : float, optional
        Stepsize, if None, optimal stepsize is used. Optimal step-size is
        EPS*x. See note.
    args : tuple
        Tuple of additional arguments for function `f`.
    kwargs : dict
        Dictionary of additional keyword arguments for function `f`.

    Returns
    -------
    partials : ndarray
       array of partial derivatives, Gradient or Jacobian

    Notes
    -----
    The complex-step derivative has truncation error O(epsilon**2), so
    truncation error can be eliminated by choosing epsilon to be very small.
    The complex-step derivative avoids the problem of round-off error with
    small epsilon because there is no subtraction.
    '''
    # From Guilherme P. de Freitas, numpy mailing list
    # May 04 2010 thread "Improvement of performance"
    # http://mail.scipy.org/pipermail/numpy-discussion/2010-May/050250.html
    n = len(x)
    epsilon = _get_epsilon(x, 1, epsilon, n)
    increments = np.identity(n) * 1j * epsilon
    # TODO: see if this can be vectorized, but usually dim is small
    partials = [f(x+ih, *args, **kwargs).imag / epsilon[i]
                for i, ih in enumerate(increments)]
    return np.array(partials).T 
Example #20
Source File: numdiff.py    From vnpy_crypto with MIT License 4 votes vote down vote up
def approx_fprime(x, f, epsilon=None, args=(), kwargs={}, centered=False):
    '''
    Gradient of function, or Jacobian if function f returns 1d array

    Parameters
    ----------
    x : array
        parameters at which the derivative is evaluated
    f : function
        `f(*((x,)+args), **kwargs)` returning either one value or 1d array
    epsilon : float, optional
        Stepsize, if None, optimal stepsize is used. This is EPS**(1/2)*x for
        `centered` == False and EPS**(1/3)*x for `centered` == True.
    args : tuple
        Tuple of additional arguments for function `f`.
    kwargs : dict
        Dictionary of additional keyword arguments for function `f`.
    centered : bool
        Whether central difference should be returned. If not, does forward
        differencing.

    Returns
    -------
    grad : array
        gradient or Jacobian

    Notes
    -----
    If f returns a 1d array, it returns a Jacobian. If a 2d array is returned
    by f (e.g., with a value for each observation), it returns a 3d array
    with the Jacobian of each observation with shape xk x nobs x xk. I.e.,
    the Jacobian of the first observation would be [:, 0, :]
    '''
    n = len(x)
    # TODO:  add scaled stepsize
    f0 = f(*((x,)+args), **kwargs)
    dim = np.atleast_1d(f0).shape  # it could be a scalar
    grad = np.zeros((n,) + dim, np.promote_types(float, x.dtype))
    ei = np.zeros((n,), float)
    if not centered:
        epsilon = _get_epsilon(x, 2, epsilon, n)
        for k in range(n):
            ei[k] = epsilon[k]
            grad[k, :] = (f(*((x+ei,) + args), **kwargs) - f0)/epsilon[k]
            ei[k] = 0.0
    else:
        epsilon = _get_epsilon(x, 3, epsilon, n) / 2.
        for k in range(len(x)):
            ei[k] = epsilon[k]
            grad[k, :] = (f(*((x+ei,)+args), **kwargs) -
                          f(*((x-ei,)+args), **kwargs))/(2 * epsilon[k])
            ei[k] = 0.0
    return grad.squeeze().T 
Example #21
Source File: differentiation.py    From estimagic with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def gradient(
    func,
    params,
    method="central",
    extrapolation=True,
    func_kwargs=None,
    step_options=None,
):
    """
    Calculate the gradient of *func*.

    Args:
        func (function): A function that maps params into a float.
        params (DataFrame): see :ref:`params`
        method (str): The method for the computation of the derivative. Default is
            central as it gives the highest accuracy.
        extrapolation (bool): This variable allows to specify the use of the
            richardson extrapolation.
        func_kwargs (dict): additional positional arguments for func.
        step_options (dict): Options for the numdifftools step generator.
            See :ref:`step_options`


    Returns:
        Series: The index is the index of params, the values contain the estimated
            gradient.

    """
    step_options = step_options if step_options is not None else {}

    if method not in ["central", "forward", "backward"]:
        raise ValueError("Method has to be in ['central', 'forward', 'backward']")

    func_kwargs = {} if func_kwargs is None else func_kwargs

    internal_func = _create_internal_func(func, params, func_kwargs)
    params_value = params["value"].to_numpy()

    if extrapolation:
        grad_np = nd.Gradient(internal_func, method=method, **step_options)(
            params_value
        )
    else:
        grad_np = _no_extrapolation_gradient(internal_func, params_value, method)
    return pd.Series(data=grad_np, index=params.index, name="gradient") 
Example #22
Source File: numdiff.py    From Splunking-Crime with GNU Affero General Public License v3.0 4 votes vote down vote up
def approx_fprime(x, f, epsilon=None, args=(), kwargs={}, centered=False):
    '''
    Gradient of function, or Jacobian if function f returns 1d array

    Parameters
    ----------
    x : array
        parameters at which the derivative is evaluated
    f : function
        `f(*((x,)+args), **kwargs)` returning either one value or 1d array
    epsilon : float, optional
        Stepsize, if None, optimal stepsize is used. This is EPS**(1/2)*x for
        `centered` == False and EPS**(1/3)*x for `centered` == True.
    args : tuple
        Tuple of additional arguments for function `f`.
    kwargs : dict
        Dictionary of additional keyword arguments for function `f`.
    centered : bool
        Whether central difference should be returned. If not, does forward
        differencing.

    Returns
    -------
    grad : array
        gradient or Jacobian

    Notes
    -----
    If f returns a 1d array, it returns a Jacobian. If a 2d array is returned
    by f (e.g., with a value for each observation), it returns a 3d array
    with the Jacobian of each observation with shape xk x nobs x xk. I.e.,
    the Jacobian of the first observation would be [:, 0, :]
    '''
    n = len(x)
    # TODO:  add scaled stepsize
    f0 = f(*((x,)+args), **kwargs)
    dim = np.atleast_1d(f0).shape  # it could be a scalar
    grad = np.zeros((n,) + dim, np.promote_types(float, x.dtype))
    ei = np.zeros((n,), float)
    if not centered:
        epsilon = _get_epsilon(x, 2, epsilon, n)
        for k in range(n):
            ei[k] = epsilon[k]
            grad[k, :] = (f(*((x+ei,) + args), **kwargs) - f0)/epsilon[k]
            ei[k] = 0.0
    else:
        epsilon = _get_epsilon(x, 3, epsilon, n) / 2.
        for k in range(len(x)):
            ei[k] = epsilon[k]
            grad[k, :] = (f(*((x+ei,)+args), **kwargs) -
                          f(*((x-ei,)+args), **kwargs))/(2 * epsilon[k])
            ei[k] = 0.0
    return grad.squeeze().T