Python autograd.grad() Examples

The following are 30 code examples of autograd.grad(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd , or try the search function .
Example #1
Source File: test_reverse_mode.py    From tangent with Apache License 2.0 7 votes vote down vote up
def test_logistic_regression(motion, optimized):
  func = logistic_regression
  w = np.random.randn(3, 5)
  b = np.random.randn(5)
  input_ = np.random.rand(3)
  label = np.zeros(5)
  label[1] = 1

  func.__globals__['np'] = np
  df = tangent.autodiff(
      func,
      wrt=(2, 3),
      motion=motion,
      optimized=optimized,
      verbose=True,
      input_derivative=INPUT_DERIVATIVE.DefaultOne)
  dw, db = df(input_, label, w, b)

  func.__globals__['np'] = ag_np
  ag_dw = ag_grad(func, argnum=2)(input_, label, w, b)
  ag_db = ag_grad(func, argnum=3)(input_, label, w, b)
  assert np.allclose(ag_dw, dw)
  assert np.allclose(ag_db, db) 
Example #2
Source File: test_quantum_gradients.py    From pennylane with Apache License 2.0 6 votes vote down vote up
def test_cv_gradients_parameters_inside_array(self, gaussian_dev, tol):
        "Tests that free parameters inside an array passed to an Operation yield correct gradients."
        par = [0.4, 1.3]

        def qf(x, y):
            qml.Displacement(0.5, 0, wires=[0])
            qml.Squeezing(x, 0, wires=[0])
            M = np.zeros((5, 5), dtype=object)
            M[1,1] = y
            M[1,2] = 1.0
            M[2,1] = 1.0
            return qml.expval(qml.PolyXP(M, [0, 1]))

        q = qml.QNode(qf, gaussian_dev)
        grad = q.jacobian(par)
        grad_F = q.jacobian(par, method='F')
        grad_A = q.jacobian(par, method="best")
        grad_A2 = q.jacobian(par, method="best", options={"force_order2": True})

        # par[0] can use the 'A' method, par[1] cannot
        assert q.par_to_grad_method == {0:'A', 1:'F'}
        # the different methods agree
        assert grad == pytest.approx(grad_F, abs=tol) 
Example #3
Source File: test_reverse_mode.py    From tangent with Apache License 2.0 6 votes vote down vote up
def test_inlining_contextmanager(motion, optimized, a):
  func = inlining_contextmanager
  func = tangent.tangent(func)

  func.__globals__['np'] = np
  df = tangent.autodiff(
      func,
      motion=motion,
      optimized=optimized,
      verbose=True,
      input_derivative=INPUT_DERIVATIVE.DefaultOne)
  dx = df(a)

  func.__globals__['np'] = ag_np
  df_ag = ag_grad(func)
  df_ag(a)
  assert np.allclose(dx, 2.9 * a**2) 
Example #4
Source File: likelihood.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def rearrange_dict_grad(fun):
    """
    Decorator that allows us to save memory on the forward pass,
    by precomputing the gradient
    """
    @primitive
    def wrapped_fun_helper(xdict, dummy):
        ## ag.value_and_grad() to avoid second forward pass
        ## ag.checkpoint() ensures hessian gets properly checkpointed
        val, grad = ag.checkpoint(ag.value_and_grad(fun))(xdict)
        assert len(val.shape) == 0
        dummy.cache = grad
        return val

    def wrapped_fun_helper_grad(ans, xdict, dummy):
        def grad(g):
            #print("foo")
            return {k:g*v for k,v in dummy.cache.items()}
        return grad
    defvjp(wrapped_fun_helper, wrapped_fun_helper_grad, None)

    @functools.wraps(fun)
    def wrapped_fun(xdict):
        return wrapped_fun_helper(ag.dict(xdict), lambda:None)
    return wrapped_fun 
Example #5
Source File: test_list.py    From autograd with MIT License 6 votes vote down vote up
def test_grads():
    def fun(input_list):
        A = np.sum(np.sin(input_list[0]))
        B = np.sum(np.cos(input_list[1]))
        return A + B

    def d_fun(input_list):
        g = grad(fun)(input_list)
        A = np.sum(g[0])
        B = np.sum(np.sin(g[0]))
        C = np.sum(np.sin(g[1]))
        return A + B + C

    input_list = [npr.randn(5, 6),
                  npr.randn(4, 3),
                  npr.randn(2, 4)]

    check_grads(fun)(input_list)
    check_grads(d_fun)(input_list) 
Example #6
Source File: test_autograd.py    From momi2 with GNU General Public License v3.0 6 votes vote down vote up
def check_gradient(f, x):
    print(x, "\n", f(x))

    print("# grad2")
    grad2 = Gradient(f)(x)
    print("# building grad1")
    g = grad(f)
    print("# computing grad1")
    grad1 = g(x)

    print("gradient1\n", grad1, "\ngradient2\n", grad2)
    np.allclose(grad1, grad2)

    # check Hessian vector product
    y = np.random.normal(size=x.shape)
    gdot = lambda u: np.dot(g(u), y)
    hess1, hess2 = grad(gdot)(x), Gradient(gdot)(x)
    print("hess1\n", hess1, "\nhess2\n", hess2)
    np.allclose(hess1, hess2) 
Example #7
Source File: test_list.py    From autograd with MIT License 6 votes vote down vote up
def test_getter():
    def fun(input_list):
        A = np.sum(input_list[0])
        B = np.sum(input_list[1])
        C = np.sum(input_list[1])
        return A + B + C

    d_fun = grad(fun)
    input_list = [npr.randn(5, 6),
                   npr.randn(4, 3),
                   npr.randn(2, 4)]

    result = d_fun(input_list)
    assert np.allclose(result[0], np.ones((5, 6)))
    assert np.allclose(result[1], 2 * np.ones((4, 3)))
    assert np.allclose(result[2], np.zeros((2, 4))) 
Example #8
Source File: gradient_descent.py    From pennylane with Apache License 2.0 6 votes vote down vote up
def compute_grad(objective_fn, x, grad_fn=None):
        r"""Compute gradient of the objective_fn at the point x.

        Args:
            objective_fn (function): the objective function for optimization
            x (array): NumPy array containing the current values of the variables to be updated
            grad_fn (function): Optional gradient function of the
                objective function with respect to the variables ``x``.
                If ``None``, the gradient function is computed automatically.

        Returns:
            array: NumPy array containing the gradient :math:`\nabla f(x^{(t)})`
        """
        if grad_fn is not None:
            g = grad_fn(x)  # just call the supplied grad function
        else:
            # default is autograd
            g = autograd.grad(objective_fn)(x)  # pylint: disable=no-value-for-parameter
        return g 
Example #9
Source File: gradient_descent.py    From pennylane with Apache License 2.0 6 votes vote down vote up
def apply_grad(self, grad, x):
        r"""Update the variables x to take a single optimization step. Flattens and unflattens
        the inputs to maintain nested iterables as the parameters of the optimization.

        Args:
            grad (array): The gradient of the objective
                function at point :math:`x^{(t)}`: :math:`\nabla f(x^{(t)})`
            x (array): the current value of the variables :math:`x^{(t)}`

        Returns:
            array: the new values :math:`x^{(t+1)}`
        """

        x_flat = _flatten(x)
        grad_flat = _flatten(grad)

        x_new_flat = [e - self._stepsize * g for g, e in zip(grad_flat, x_flat)]

        return unflatten(x_new_flat, x) 
Example #10
Source File: test_autograd.py    From pennylane with Apache License 2.0 6 votes vote down vote up
def test_rotation_gradient(self, theta, tol):
        """Tests that the automatic gradient of a phase space rotation is correct."""

        def circuit(y):
            qml.Displacement(alpha, 0., wires=[0])
            qml.Rotation(y, wires=[0])
            return qml.expval(qml.X(0))

        dev = qml.device('default.gaussian', wires=1)
        circuit = to_autograd(QubitQNode(circuit, dev))
        grad_fn = autograd.grad(circuit)

        autograd_val = grad_fn(theta)
        # qfunc evalutes to hbar * alpha * cos(theta)
        manualgrad_val = - hbar * alpha * np.sin(theta)
        assert autograd_val == pytest.approx(manualgrad_val, abs=tol) 
Example #11
Source File: test_autograd.py    From pennylane with Apache License 2.0 6 votes vote down vote up
def test_displacement_gradient(self, mag, theta, tol):
        """Tests that the automatic gradient of a phase space displacement is correct."""

        def circuit(r, phi):
            qml.Displacement(r, phi, wires=[0])
            return qml.expval(qml.X(0))

        dev = qml.device('default.gaussian', wires=1)
        circuit = to_autograd(CVQNode(circuit, dev))
        grad_fn = autograd.grad(circuit)

        #alpha = mag * np.exp(1j * theta)
        autograd_val = grad_fn(mag, theta)
        # qfunc evalutes to hbar * Re(alpha)
        manualgrad_val = hbar * np.cos(theta)
        assert autograd_val == pytest.approx(manualgrad_val, abs=tol) 
Example #12
Source File: test_autograd.py    From pennylane with Apache License 2.0 6 votes vote down vote up
def test_squeeze_gradient(self, r, tol):
        """Tests that the automatic gradient of a phase space squeezing is correct."""

        def circuit(y):
            qml.Displacement(alpha, 0., wires=[0])
            qml.Squeezing(y, 0., wires=[0])
            return qml.expval(qml.X(0))

        dev = qml.device('default.gaussian', wires=1)
        circuit = to_autograd(CVQNode(circuit, dev))
        grad_fn = autograd.grad(circuit)

        autograd_val = grad_fn(r)
        # qfunc evaluates to -exp(-r) * hbar * Re(alpha)
        manualgrad_val = -np.exp(-r) * hbar * alpha
        assert autograd_val == pytest.approx(manualgrad_val, abs=tol) 
Example #13
Source File: train.py    From tree-regularization-public with MIT License 6 votes vote down vote up
def build_batched_grad_fences(grad, batch_size, inputs, fences, targets):
    """Return grad on batched gradient. 

    @param grad: gradient function.
    @param batch_size: integer
                       batch size
    @param inputs: NumPy Array
                   size D x N
    @param fences: NumPy Array
    @param targets: NumPy Array
                    size O x N
    @return batched_grad: function
                          function to compute gradients on inputs with fenceposts.
    """
    def batched_grad(weights, i):
        cur_idxs, cur_slice = get_ith_minibatch_ixs_fences(i, batch_size, fences)
        batched_inputs = inputs[:, cur_slice]
        batched_targets = None if targets is None else targets[:, cur_slice]
        batched_fences = fences[cur_idxs.start:cur_idxs.stop+1] - fences[cur_idxs.start]
        return grad(weights, batched_inputs, batched_fences, batched_targets)
    return batched_grad 
Example #14
Source File: bench_rnn.py    From autograd with MIT License 6 votes vote down vote up
def setup(self):
        self.batch_size = 16
        self.dtype = "float32"
        self.D = 2**10
        self.x = 0.01 * np.random.randn(self.batch_size,self.D).astype(self.dtype)
        self.W1 = 0.01 * np.random.randn(self.D,self.D).astype(self.dtype)
        self.b1 = 0.01 * np.random.randn(self.D).astype(self.dtype)
        self.Wout = 0.01 * np.random.randn(self.D,1).astype(self.dtype)
        self.bout = 0.01 * np.random.randn(1).astype(self.dtype)
        self.l = (np.random.rand(self.batch_size,1) > 0.5).astype(self.dtype)
        self.n = 50

        def autograd_rnn(params, x, label, n):
            W, b, Wout, bout = params
            h1 = x
            for i in range(n):
                h1 = np.tanh(np.dot(h1, W) + b)
            logit = np.dot(h1, Wout) + bout
            loss = -np.sum(label * logit - (
                    logit + np.log(1 + np.exp(-logit))))
            return loss

        self.fn = autograd_rnn
        self.grad_fn = grad(self.fn) 
Example #15
Source File: test_quantum_gradients.py    From pennylane with Apache License 2.0 6 votes vote down vote up
def test_Rot(self, qubit_device_1_wire, tol):
        "Tests that the automatic gradient of a arbitrary Euler-angle-parameterized gate is correct."

        @qml.qnode(qubit_device_1_wire)
        def circuit(x,y,z):
            qml.Rot(x,y,z, wires=[0])
            return qml.expval(qml.PauliZ(0))

        grad_fn = autograd.grad(circuit, argnum=[0,1,2])

        eye = np.eye(3)
        for theta in thetas:
            angle_inputs = np.array([theta, theta ** 3, np.sqrt(2) * theta])
            autograd_val = grad_fn(*angle_inputs)
            for idx in range(3):
                onehot_idx = eye[idx]
                param1 = angle_inputs + np.pi / 2 * onehot_idx
                param2 = angle_inputs - np.pi / 2 * onehot_idx
                manualgrad_val = (circuit(*param1) - circuit(*param2)) / 2
                assert autograd_val[idx] == pytest.approx(manualgrad_val, abs=tol) 
Example #16
Source File: test_reverse_over_reverse.py    From tangent with Apache License 2.0 6 votes vote down vote up
def _test_gradgrad_array(func, optimized, *args):
  """Test gradients of functions with NumPy-compatible signatures."""

  def tangent_func():
    func.__globals__['np'] = np
    df = tangent.grad(func, optimized=optimized, verbose=True)
    ddf = tangent.grad(df, optimized=optimized, verbose=True)
    return ddf(*args)

  def reference_func():
    func.__globals__['np'] = ag_np
    return ag_grad(ag_grad(func))(*args)

  def backup_reference_func():
    return utils.numeric_grad(utils.numeric_grad(func))(*args)

  utils.assert_result_matches_reference(
      tangent_func, reference_func, backup_reference_func,
      tolerance=1e-2)  # extra loose bounds for 2nd order grad 
Example #17
Source File: test_quantum_gradients.py    From pennylane with Apache License 2.0 6 votes vote down vote up
def test_U2(self, tol):
        """Tests that the gradient of an arbitrary U2 gate is correct"""
        dev = qml.device("default.qubit", wires=1)

        @qml.qnode(dev)
        def circuit(x, y):
            qml.QubitStateVector(1j*np.array([1, -1])/np.sqrt(2), wires=[0])
            qml.U2(x, y, wires=[0])
            return qml.expval(qml.PauliX(0))

        phi = -0.234
        lam = 0.654

        res = circuit(phi, lam)
        expected = np.sin(lam)*np.sin(phi)
        assert np.allclose(res, expected, atol=tol, rtol=0)

        grad_fn = autograd.grad(circuit, argnum=[0, 1])
        res = grad_fn(phi, lam)
        expected = np.array([
            np.sin(lam)*np.cos(phi),
            np.cos(lam)*np.sin(phi)
        ])
        assert np.allclose(res, expected, atol=tol, rtol=0) 
Example #18
Source File: test_autograd.py    From pennylane with Apache License 2.0 6 votes vote down vote up
def test_no_differentiable_parameters(self):
        """If there are no differentiable parameters, the output of the gradient
        function is an empty tuple, and a warning is emitted."""
        dev = qml.device("default.qubit", wires=2)

        @qml.qnode(dev, interface="autograd")
        def circuit(data1):
            qml.templates.AmplitudeEmbedding(data1, wires=[0, 1])
            return qml.expval(qml.PauliZ(0))

        grad_fn = qml.grad(circuit)
        data1 = qml.numpy.array([0, 1, 1, 0], requires_grad=False) / np.sqrt(2)

        with pytest.warns(UserWarning, match="Output seems independent of input"):
            res = grad_fn(data1)

        assert res == tuple() 
Example #19
Source File: train.py    From tree-regularization-public with MIT License 6 votes vote down vote up
def train(self, X_train, F_train, y_train, batch_size=32, num_iters=1000, 
              lr=1e-3, param_scale=0.01, log_every=100, init_weights=None):
        grad_fun = build_batched_grad_fences(grad(self.objective), batch_size, 
                                             X_train, F_train, y_train)
        if init_weights is None:
            init_weights = self.init_weights(param_scale)
        saved_weights = np.zeros((num_iters, self.num_weights))

        def callback(weights, i, gradients):
            apl = self.average_path_length(weights, X_train, F_train, y_train)
            saved_weights[i, :] = weights
            loss_train = self.objective(weights, X_train, F_train, y_train)
            if i % log_every == 0: 
                print('model: gru | iter: {} | loss: {:.2f} | apl: {:.2f}'.format(i, loss_train, apl))

        optimized_weights = adam(grad_fun, init_weights, num_iters=num_iters, 
                                 step_size=lr, callback=callback)
        self.saved_weights = saved_weights
        self.weights = optimized_weights
        return optimized_weights 
Example #20
Source File: train.py    From tree-regularization-public with MIT License 6 votes vote down vote up
def train(self, X_train, y_train, batch_size=32, num_iters=1000, 
              lr=1e-3, param_scale=0.01, log_every=100, init_weights=None):
        grad_fun = build_batched_grad(grad(self.objective), batch_size, 
                                      X_train, y_train)
        if init_weights is None:
            init_weights = self.init_weights(param_scale)
    
        def callback(weights, i, gradients):
            loss_train = self.objective(weights, X_train, y_train)
            if i % log_every == 0: 
                print('model: mlp | iter: {} | loss: {:.2f}'.format(i, loss_train))

        optimized_weights = adam(grad_fun, init_weights, num_iters=num_iters,
                                 step_size=lr, callback=callback)
        self.weights = optimized_weights
        return optimized_weights 
Example #21
Source File: train.py    From tree-regularization-public with MIT License 6 votes vote down vote up
def build_batched_grad(grad, batch_size, inputs, targets):
    """Return grad on batched gradient. 

    @param grad: gradient function.
    @param batch_size: integer
                       batch size
    @param inputs: NumPy Array
                   size D x N
    @param targets: NumPy Array
                    size O x N
    @return batched_grad: function
                          function to compute gradients on inputs.
    """
    def batched_grad(weights, i):
        cur_idxs = get_ith_minibatch_ixs(i, targets.shape[1], batch_size)
        return grad(weights, inputs[:, cur_idxs], targets[:, cur_idxs])
    return batched_grad 
Example #22
Source File: black_box_svi.py    From autograd with MIT License 6 votes vote down vote up
def black_box_variational_inference(logprob, D, num_samples):
    """Implements http://arxiv.org/abs/1401.0118, and uses the
    local reparameterization trick from http://arxiv.org/abs/1506.02557"""

    def unpack_params(params):
        # Variational dist is a diagonal Gaussian.
        mean, log_std = params[:D], params[D:]
        return mean, log_std

    def gaussian_entropy(log_std):
        return 0.5 * D * (1.0 + np.log(2*np.pi)) + np.sum(log_std)

    rs = npr.RandomState(0)
    def variational_objective(params, t):
        """Provides a stochastic estimate of the variational lower bound."""
        mean, log_std = unpack_params(params)
        samples = rs.randn(num_samples, D) * np.exp(log_std) + mean
        lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples, t))
        return -lower_bound

    gradient = grad(variational_objective)

    return variational_objective, gradient, unpack_params 
Example #23
Source File: test_quantum_gradients.py    From pennylane with Apache License 2.0 5 votes vote down vote up
def test_U3(self, tol):
        """Tests that the gradient of an arbitrary U3 gate is correct"""
        dev = qml.device("default.qubit", wires=1)

        @qml.qnode(dev)
        def circuit(x, y ,z):
            qml.QubitStateVector(1j*np.array([1, -1])/np.sqrt(2), wires=[0])
            qml.U3(x, y, z, wires=[0])
            return qml.expval(qml.PauliX(0))

        theta = 0.543
        phi = -0.234
        lam = 0.654

        res = circuit(theta, phi, lam)
        expected = np.sin(lam)*np.sin(phi) - np.cos(theta)*np.cos(lam)*np.cos(phi)
        assert np.allclose(res, expected, atol=tol, rtol=0)

        grad_fn = autograd.grad(circuit, argnum=[0, 1, 2])
        res = grad_fn(theta, phi, lam)
        expected = np.array([
            np.sin(theta)*np.cos(lam)*np.cos(phi),
            np.cos(theta)*np.cos(lam)*np.sin(phi) + np.sin(lam)*np.cos(phi),
            np.cos(theta)*np.sin(lam)*np.cos(phi) + np.cos(lam)*np.sin(phi)
        ])
        assert np.allclose(res, expected, atol=tol, rtol=0) 
Example #24
Source File: test_graphs.py    From autograd with MIT License 5 votes vote down vote up
def test_grad_identity():
    fun = lambda x : x
    df = grad(fun)
    ddf = grad(df)
    assert np.allclose(df(2.0), 1.0)
    assert np.allclose(ddf(2.0), 0.0) 
Example #25
Source File: test_graphs.py    From autograd with MIT License 5 votes vote down vote up
def test_enclosing_scope_ref():
    def fun(x):
        inner_fun = lambda y : x * y
        return x * grad(inner_fun)(2.0)
    check_grads(fun)(1.0) 
Example #26
Source File: test_quantum_gradients.py    From pennylane with Apache License 2.0 5 votes vote down vote up
def test_qfunc_gradients(self, qubit_device_2_wires, tol):
        "Tests that the various ways of computing the gradient of a qfunc all agree."

        def circuit(x, y, z):
            qml.RX(x, wires=[0])
            qml.CNOT(wires=[0, 1])
            qml.RY(-1.6, wires=[0])
            qml.RY(y, wires=[1])
            qml.CNOT(wires=[1, 0])
            qml.RX(z, wires=[0])
            qml.CNOT(wires=[0, 1])
            return qml.expval(qml.PauliZ(0))

        qnode = qml.QNode(circuit, qubit_device_2_wires)
        params = np.array([0.1, -1.6, np.pi / 5])

        # manual gradients
        grad_fd1 = qnode.jacobian(params, method='F', options={"order": 1})
        grad_fd2 = qnode.jacobian(params, method='F', options={"order": 2})
        grad_angle = qnode.jacobian(params, method='A')

        # automatic gradient
        # Note: the lambda function is required as evaluate now receives a required `kwargs` argument
        # that cannot be differentiated by autograd.
        grad_fn = autograd.grad(lambda x: qnode.evaluate(x, {}))
        grad_auto = grad_fn(params)[np.newaxis, :]  # so shapes will match

        # gradients computed with different methods must agree
        assert grad_fd1 == pytest.approx(grad_fd2, abs=tol)
        assert grad_fd1 == pytest.approx(grad_angle, abs=tol)
        assert grad_fd1 == pytest.approx(grad_auto, abs=tol) 
Example #27
Source File: test_quantum_gradients.py    From pennylane with Apache License 2.0 5 votes vote down vote up
def test_RX_gradient(self, qubit_device_1_wire, tol):
        "Tests that the automatic gradient of a Pauli X-rotation is correct."

        @qml.qnode(qubit_device_1_wire)
        def circuit(x):
            qml.RX(x, wires=[0])
            return qml.expval(qml.PauliZ(0))

        grad_fn = autograd.grad(circuit)

        for theta in thetas:
            autograd_val = grad_fn(theta)
            manualgrad_val = (circuit(theta + np.pi / 2) - circuit(theta - np.pi / 2)) / 2
            assert autograd_val == pytest.approx(manualgrad_val, abs=tol) 
Example #28
Source File: test_quantum_gradients.py    From pennylane with Apache License 2.0 5 votes vote down vote up
def test_RY_gradient(self, qubit_device_1_wire, tol):
        "Tests that the automatic gradient of a Pauli Y-rotation is correct."

        @qml.qnode(qubit_device_1_wire)
        def circuit(x):
            qml.RY(x, wires=[0])
            return qml.expval(qml.PauliZ(0))

        grad_fn = autograd.grad(circuit)

        for theta in thetas:
            autograd_val = grad_fn(theta)
            manualgrad_val = (circuit(theta + np.pi / 2) - circuit(theta - np.pi / 2)) / 2
            assert autograd_val == pytest.approx(manualgrad_val, abs=tol) 
Example #29
Source File: test_graphs.py    From autograd with MIT License 5 votes vote down vote up
def test_complex_separate_real_and_imaginary():
    def fun(a):
        r, i = np.real(a), np.imag(a)
        a = np.abs(r)**1.4 + np.abs(i)**1.3
        return np.sum(np.sin(a))
    d_fun = lambda x : grad(fun)(x)
    A = npr.randn(5, 3) + 0.1j*npr.randn(5, 3)
    check_grads(fun)(A)
    check_grads(d_fun)(A) 
Example #30
Source File: test_graphs.py    From autograd with MIT License 5 votes vote down vote up
def test_third_derivative():
    fun = lambda x : np.sin(np.sin(x) + np.sin(x))
    df = grad(fun)
    ddf = grad(fun)
    dddf = grad(fun)
    check_grads(fun)(npr.randn())
    check_grads(df)(npr.rand())
    check_grads(ddf)(npr.rand())
    check_grads(dddf)(npr.rand())