Python scipy.optimize.approx_fprime() Examples

The following are 30 code examples of scipy.optimize.approx_fprime(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.optimize , or try the search function .
Example #1
Source File: test_gpr.py    From scikit-optimize with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_std_gradient():
    length_scale = np.arange(1, 6)
    X = rng.randn(10, 5)
    y = rng.randn(10)
    X_new = rng.randn(5)

    rbf = RBF(length_scale=length_scale, length_scale_bounds="fixed")
    gpr = GaussianProcessRegressor(rbf, random_state=0).fit(X, y)

    _, _, _, std_grad = gpr.predict(
        np.expand_dims(X_new, axis=0),
        return_std=True, return_cov=False, return_mean_grad=True,
        return_std_grad=True)
    num_grad = optimize.approx_fprime(
        X_new, lambda x: predict_wrapper(x, gpr)[1], 1e-4)
    assert_array_almost_equal(std_grad, num_grad, decimal=3) 
Example #2
Source File: cost.py    From ilqr with GNU General Public License v3.0 6 votes vote down vote up
def l_x(self, x, u, i, terminal=False):
        """Partial derivative of cost function with respect to x.

        Args:
            x: Current state [state_size].
            u: Current control [action_size]. None if terminal.
            i: Current time step.
            terminal: Compute terminal cost. Default: False.

        Returns:
            dl/dx [state_size].
        """
        if terminal:
            return approx_fprime(x, lambda x: self._l_terminal(x, i),
                                 self._x_eps)

        return approx_fprime(x, lambda x: self._l(x, u, i), self._x_eps) 
Example #3
Source File: cost.py    From ilqr with GNU General Public License v3.0 6 votes vote down vote up
def l_u(self, x, u, i, terminal=False):
        """Partial derivative of cost function with respect to u.

        Args:
            x: Current state [state_size].
            u: Current control [action_size]. None if terminal.
            i: Current time step.
            terminal: Compute terminal cost. Default: False.

        Returns:
            dl/du [action_size].
        """
        if terminal:
            # Not a function of u, so the derivative is zero.
            return np.zeros(self._action_size)

        return approx_fprime(u, lambda u: self._l(x, u, i), self._u_eps) 
Example #4
Source File: cost.py    From ilqr with GNU General Public License v3.0 6 votes vote down vote up
def l_xx(self, x, u, i, terminal=False):
        """Second partial derivative of cost function with respect to x.

        Args:
            x: Current state [state_size].
            u: Current control [action_size]. None if terminal.
            i: Current time step.
            terminal: Compute terminal cost. Default: False.

        Returns:
            d^2l/dx^2 [state_size, state_size].
        """
        eps = self._x_eps_hess
        Q = np.vstack([
            approx_fprime(x, lambda x: self.l_x(x, u, i, terminal)[m], eps)
            for m in range(self._state_size)
        ])
        return Q 
Example #5
Source File: cost.py    From ilqr with GNU General Public License v3.0 6 votes vote down vote up
def l_ux(self, x, u, i, terminal=False):
        """Second partial derivative of cost function with respect to u and x.

        Args:
            x: Current state [state_size].
            u: Current control [action_size]. None if terminal.
            i: Current time step.
            terminal: Compute terminal cost. Default: False.

        Returns:
            d^2l/dudx [action_size, state_size].
        """
        if terminal:
            # Not a function of u, so the derivative is zero.
            return np.zeros((self._action_size, self._state_size))

        eps = self._x_eps_hess
        Q = np.vstack([
            approx_fprime(x, lambda x: self.l_u(x, u, i)[m], eps)
            for m in range(self._action_size)
        ])
        return Q 
Example #6
Source File: cost.py    From ilqr with GNU General Public License v3.0 6 votes vote down vote up
def l_uu(self, x, u, i, terminal=False):
        """Second partial derivative of cost function with respect to u.

        Args:
            x: Current state [state_size].
            u: Current control [action_size]. None if terminal.
            i: Current time step.
            terminal: Compute terminal cost. Default: False.

        Returns:
            d^2l/du^2 [action_size, action_size].
        """
        if terminal:
            # Not a function of u, so the derivative is zero.
            return np.zeros((self._action_size, self._action_size))

        eps = self._u_eps_hess
        Q = np.vstack([
            approx_fprime(u, lambda u: self.l_u(x, u, i)[m], eps)
            for m in range(self._action_size)
        ])
        return Q 
Example #7
Source File: test_soft_dtw.py    From soft-dtw with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_soft_dtw_grad_X():
    def make_func(gamma):
        def func(x):
            X_ = x.reshape(*X.shape)
            D_ = SquaredEuclidean(X_, Y)
            return SoftDTW(D_, gamma).compute()
        return func

    for gamma in (0.001, 0.01, 0.1, 1, 10, 100, 1000):
        dist = SquaredEuclidean(X, Y)
        sdtw = SoftDTW(dist, gamma)
        sdtw.compute()
        E = sdtw.grad()
        G = dist.jacobian_product(E)

        func = make_func(gamma)
        G_num = approx_fprime(X.ravel(), func, 1e-6).reshape(*G.shape)
        assert_array_almost_equal(G, G_num, 5) 
Example #8
Source File: markovChain.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def hessian(x0,epsilon,M):
    # The first derivative
    f1 = approx_fprime(x0,tpLikelihood,epsilon,M) 
    n = x0.shape[0]
    hessian = np.zeros([n,n])
    xx = x0
    for j in range(0,n):
        xx0 = xx[j] # Store old value
        xx[j] = xx0 + epsilon # Perturb with finite difference
        # Recalculate the partial derivatives for this new point
        f2 = approx_fprime(xx, tpLikelihood,epsilon,M) 
        hessian[:, j] = (f2 - f1)/epsilon # scale...
        xx[j] = xx0 # Restore initial value of x0        
    return hessian 
Example #9
Source File: test_kernels.py    From scikit-optimize with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def numerical_gradient(X, Y, kernel, step_size=1e-4):
    grad = []
    for y in Y:
        num_grad = optimize.approx_fprime(X, kernel_X_Y, step_size, y, kernel)
        grad.append(num_grad)
    return np.asarray(grad) 
Example #10
Source File: test_acquisition.py    From scikit-optimize with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def check_gradient_correctness(X_new, model, acq_func, y_opt):
    analytic_grad = gaussian_acquisition_1D(
        X_new, model, y_opt, acq_func)[1]
    num_grad_func = lambda x:  gaussian_acquisition_1D(
        x, model, y_opt, acq_func=acq_func)[0]
    num_grad = optimize.approx_fprime(X_new, num_grad_func, 1e-5)
    assert_array_almost_equal(analytic_grad, num_grad, 3) 
Example #11
Source File: test_soft_dtw.py    From soft-dtw with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_soft_dtw_grad():
    def make_func(gamma):
        def func(d):
            D_ = d.reshape(*D.shape)
            return SoftDTW(D_, gamma).compute()
        return func

    for gamma in (0.001, 0.01, 0.1, 1, 10, 100, 1000):
        sdtw = SoftDTW(D, gamma)
        sdtw.compute()
        E = sdtw.grad()
        func = make_func(gamma)
        E_num = approx_fprime(D.ravel(), func, 1e-6).reshape(*E.shape)
        assert_array_almost_equal(E, E_num, 5) 
Example #12
Source File: test_loss_and_gradient.py    From alphacsc with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def gradient_checker(func, grad, shape, args=(), kwargs={}, n_checks=10,
                     rtol=1e-5, grad_name='gradient', debug=False,
                     random_seed=None):
    """Check that the gradient correctly approximate the finite difference
    """

    rng = np.random.RandomState(random_seed)

    msg = ("Computed {} did not match gradient computed with finite "
           "difference. Relative error is {{}}".format(grad_name))

    func = partial(func, **kwargs)

    def test_grad(z0):
        grad_approx = approx_fprime(z0, func, 1e-8, *args)
        grad_compute = grad(z0, *args, **kwargs)
        error = np.sum((grad_approx - grad_compute) ** 2)
        error /= np.sum(grad_approx ** 2)
        error = np.sqrt(error)
        try:
            assert error < rtol, msg.format(error)
        except AssertionError:
            if debug:
                import matplotlib.pyplot as plt
                plt.plot(grad_approx)
                plt.plot(grad_compute)
                plt.show()
            raise

    z0 = np.zeros(shape)
    test_grad(z0)

    for _ in range(n_checks):
        z0 = rng.randn(shape)
        test_grad(z0) 
Example #13
Source File: assetCorrelation.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def hessian2r(x0,epsilon,T,pMat,nMat,kMat):
    # The first derivative
    f1 = approx_fprime(x0,logL2r,epsilon,T,pMat,nMat,kMat) 
    n = x0.shape[0]
    hessian = np.zeros([n,n])
    xx = x0
    for j in range(0,n):
        xx0 = xx[j] # Store old value
        xx[j] = xx0 + epsilon # Perturb with finite difference
        # Recalculate the partial derivatives for this new point
        f2 = approx_fprime(xx, logL2r,epsilon,T,pMat,nMat,kMat) 
        hessian[:, j] = (f2 - f1)/epsilon # scale...
        xx[j] = xx0 # Restore initial value of x0        
    return hessian 
Example #14
Source File: assetCorrelation.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def score2r(x0,epsilon,T,pMat,nMat,kMat):
    score = approx_fprime(x0,logL2r,epsilon,T,pMat,nMat,kMat) 
    return score 
Example #15
Source File: ipopt_wrapper.py    From cyipopt with Eclipse Public License 1.0 5 votes vote down vote up
def __init__(self, fun, args=(), kwargs=None, jac=None, hess=None, hessp=None,
                 constraints=(), eps=1e-8):
        if not SCIPY_INSTALLED:
            raise ImportError('Install SciPy to use the `IpoptProblemWrapper` class.')
        self.fun_with_jac = None
        self.last_x = None
        if hess is not None or hessp is not None:
            raise NotImplementedError('Using hessian matrixes is not yet implemented!')
        if jac is None:
            #fun = FunctionWithApproxJacobian(fun, epsilon=eps, verbose=False)
            jac = lambda x0, *args, **kwargs: approx_fprime(x0, fun, eps, *args, **kwargs)
        elif jac is True:
            self.fun_with_jac = fun
        elif not callable(jac):
            raise NotImplementedError('jac has to be bool or a function')
        self.fun = fun
        self.jac = jac
        self.args = args
        self.kwargs = kwargs or {}
        self._constraint_funs = []
        self._constraint_jacs = []
        self._constraint_args = []
        if isinstance(constraints, dict):
            constraints = (constraints, )
        for con in constraints:
            con_fun = con['fun']
            con_jac = con.get('jac', None)
            if con_jac is None:
                con_fun = FunctionWithApproxJacobian(con_fun, epsilon=eps, verbose=False)
                con_jac = con_fun.jac
            con_args = con.get('args', [])
            self._constraint_funs.append(con_fun)
            self._constraint_jacs.append(con_jac)
            self._constraint_args.append(con_args)
        # Set up evaluation counts
        self.nfev = 0
        self.njev = 0
        self.nit = 0 
Example #16
Source File: sheet_vertex_solver.py    From tyssue with GNU General Public License v3.0 5 votes vote down vote up
def approx_grad(cls, sheet, geom, model):
        pos0 = sheet.vert_df[sheet.coords].values.ravel()
        pos_idx = sheet.vert_df[sheet.vert_df["is_active"] == 1].index

        grad = optimize.approx_fprime(
            pos0, cls.opt_energy, 1e-9, pos_idx, sheet, geom, model
        )
        return grad 
Example #17
Source File: quasistatic.py    From tyssue with GNU General Public License v3.0 5 votes vote down vote up
def approx_grad(self, eptm, geom, model):
        pos0 = eptm.vert_df.loc[
            eptm.vert_df.is_active.astype(bool), eptm.coords
        ].values.ravel()
        grad = optimize.approx_fprime(pos0, self._opt_energy, 1e-9, eptm, geom, model)
        return grad 
Example #18
Source File: test_logistic.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_logistic_loss_and_grad():
    X_ref, y = make_classification(n_samples=20, random_state=0)
    n_features = X_ref.shape[1]

    X_sp = X_ref.copy()
    X_sp[X_sp < .1] = 0
    X_sp = sp.csr_matrix(X_sp)
    for X in (X_ref, X_sp):
        w = np.zeros(n_features)

        # First check that our derivation of the grad is correct
        loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
        approx_grad = optimize.approx_fprime(
            w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
        )
        assert_array_almost_equal(grad, approx_grad, decimal=2)

        # Second check that our intercept implementation is good
        w = np.zeros(n_features + 1)
        loss_interp, grad_interp = _logistic_loss_and_grad(
            w, X, y, alpha=1.
        )
        assert_array_almost_equal(loss, loss_interp)

        approx_grad = optimize.approx_fprime(
            w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
        )
        assert_array_almost_equal(grad_interp, approx_grad, decimal=2) 
Example #19
Source File: test_gpr.py    From scikit-optimize with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_mean_gradient():
    length_scale = np.arange(1, 6)
    X = rng.randn(10, 5)
    y = rng.randn(10)
    X_new = rng.randn(5)

    rbf = RBF(length_scale=length_scale, length_scale_bounds="fixed")
    gpr = GaussianProcessRegressor(rbf, random_state=0).fit(X, y)

    mean, std, mean_grad = gpr.predict(
        np.expand_dims(X_new, axis=0),
        return_std=True, return_cov=False, return_mean_grad=True)
    num_grad = optimize.approx_fprime(
        X_new, lambda x: predict_wrapper(x, gpr)[0], 1e-4)
    assert_array_almost_equal(mean_grad, num_grad, decimal=3) 
Example #20
Source File: nbla_test_utils.py    From nnabla with Apache License 2.0 5 votes vote down vote up
def compute_analytical_and_numerical_grad_graph(terminal, inputs,
                                                epsilon=1e-3,
                                                recompute_graph=True):
    def set_inputs(x0):
        begin = 0
        for i in inputs:
            end = begin + i.size
            i.d = x0[begin:end].reshape(i.shape)
            begin = end

    def func(x0):
        set_inputs(x0)
        terminal.forward()
        return terminal.d.copy()

    def grad(x0):
        set_inputs(x0)
        backups = [i.g.copy() for i in inputs]
        if recompute_graph:
            terminal.forward()
            terminal.backward()
        gx0 = []
        for i, b in zip(inputs, backups):
            if recompute_graph:
                gx0.append((i.g.copy() - b).flatten())
            else:
                gx0.append(i.g.copy().flatten())
            i.g = b
        return np.concatenate(gx0)

    inputs0 = np.concatenate([i.d.flatten() for i in inputs])
    analytical_grad = grad(inputs0)
    from scipy.optimize import approx_fprime
    numerical_grad = approx_fprime(inputs0, func, epsilon)
    return analytical_grad, numerical_grad 
Example #21
Source File: constraint.py    From relaxed_ik with MIT License 5 votes vote down vote up
def func_nlopt(self, x, grad):
        numDOF = len(x)
        g = O.approx_fprime(x, self.func, numDOF * [0.001])
        if grad.size > 0:
            for i in xrange(numDOF):
                grad[i] = -g[i]
        return -self.func(x) 
Example #22
Source File: objective.py    From relaxed_ik with MIT License 5 votes vote down vote up
def objective_master_nlopt(x, grad):
    vars = get_groove_global_vars()
    numDOF = len(x)
    g = O.approx_fprime(x, vars.objective_function, numDOF * [0.001])
    if grad.size > 0:
        for i in xrange(numDOF):
            grad[i] = g[i]

    return vars.objective_function(x)


################################################################################################# 
Example #23
Source File: test_pyglmnet.py    From pyglmnet with MIT License 5 votes vote down vote up
def test_gradients(distr):
    """Test gradient accuracy."""
    # data
    scaler = StandardScaler()
    n_samples, n_features = 1000, 100
    X = np.random.normal(0.0, 1.0, [n_samples, n_features])
    X = scaler.fit_transform(X)

    density = 0.1
    beta_ = np.zeros(n_features + 1)
    beta_[0] = np.random.rand()
    beta_[1:] = sps.rand(n_features, 1, density=density).toarray()[:, 0]

    reg_lambda = 0.1

    glm = GLM(distr=distr, reg_lambda=reg_lambda)
    y = simulate_glm(glm.distr, beta_[0], beta_[1:], X)

    func = partial(_L2loss, distr, glm.alpha,
                   glm.Tau, reg_lambda, X, y, glm.eta, glm.group)
    grad = partial(_grad_L2loss, distr, glm.alpha, glm.Tau,
                   reg_lambda, X, y,
                   glm.eta)
    approx_grad = approx_fprime(beta_, func, 1.5e-8)
    analytical_grad = grad(beta_)
    assert_allclose(approx_grad, analytical_grad, rtol=1e-5, atol=1e-3) 
Example #24
Source File: test_environments.py    From safe-exploration with MIT License 5 votes vote down vote up
def test_gradients(before_test_inv_pend):
    env = before_test_inv_pend
    n_s = env.n_s
    n_u = env.n_u

    for i in range(n_s):
        f = lambda z: env._dynamics(0, z[:env.n_s], z[env.n_s:])[i]
        f_grad = env._jac_dynamics()[i, :]
        grad_finite_diff = approx_fprime(np.zeros((n_s + n_u,)), f, 1e-8)

        # err = check_grad(f,f_grad,np.zeros((n_s+n_u,)))

        assert np.allclose(f_grad,
                           grad_finite_diff), 'Is the gradient of the {}-th dynamics dimension correct?'.format(
            i) 
Example #25
Source File: test_opt.py    From choix with MIT License 5 votes vote down vote up
def _test_hessian(n_items, fcts):
    """Helper for testing the hessian of objective functions."""
    for sigma in np.linspace(1, 20, num=10):
        xs = sigma * RND.randn(n_items)
        for i in range(n_items):
            obj = lambda xs: fcts.gradient(xs)[i]
            grad = lambda xs: fcts.hessian(xs)[i]
            val = approx_fprime(xs, obj, EPS)
            err = check_grad(obj, grad, xs, epsilon=EPS)
            assert abs(err / np.linalg.norm(val)) < 1e-5 
Example #26
Source File: test_opt.py    From choix with MIT License 5 votes vote down vote up
def _test_gradient(n_items, fcts):
    """Helper for testing the gradient of objective functions."""
    for sigma in np.linspace(1, 20, num=10):
        xs = sigma * RND.randn(n_items)
        val = approx_fprime(xs, fcts.objective, EPS)
        err = check_grad(fcts.objective, fcts.gradient, xs, epsilon=EPS)
        assert abs(err / np.linalg.norm(val)) < 1e-5 
Example #27
Source File: test_logistic.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_logistic_loss_and_grad():
    X_ref, y = make_classification(n_samples=20, random_state=0)
    n_features = X_ref.shape[1]

    X_sp = X_ref.copy()
    X_sp[X_sp < .1] = 0
    X_sp = sp.csr_matrix(X_sp)
    for X in (X_ref, X_sp):
        w = np.zeros(n_features)

        # First check that our derivation of the grad is correct
        loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
        approx_grad = optimize.approx_fprime(
            w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
        )
        assert_array_almost_equal(grad, approx_grad, decimal=2)

        # Second check that our intercept implementation is good
        w = np.zeros(n_features + 1)
        loss_interp, grad_interp = _logistic_loss_and_grad(
            w, X, y, alpha=1.
        )
        assert_array_almost_equal(loss, loss_interp)

        approx_grad = optimize.approx_fprime(
            w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
        )
        assert_array_almost_equal(grad_interp, approx_grad, decimal=2) 
Example #28
Source File: simple_convnet_tests.py    From simple_convnet with MIT License 5 votes vote down vote up
def _check_gradients(layer_args, input_shape):
    rand = np.random.RandomState(0)
    net = cn.SoftmaxNet(layer_args=layer_args, input_shape=input_shape, rand_state=rand)
    x = rand.randn(*(10,)+net.input_shape)/100
    y = rand.randn(10) > 0
    by = net.binarize_labels(y)

    g1 = approx_fprime(net.get_params(), net.cost_for_params, 1e-5, x, by)
    g2 = net.param_grad(x, by)
    err = np.max(np.abs(g1-g2))/np.abs(g1).max()
    print err
    assert err < 1e-3, 'incorrect gradient!' 
Example #29
Source File: test_ml_factor.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_score():

    uniq, load, corr, par = _toy()
    fa = Factor(n_factor=2, corr=corr)

    def f(par):
        return fa.loglike(par)

    par2 = np.r_[0.1, 0.2, 0.3, 0.4, 0.3, 0.1, 0.2, -0.2, 0, 0.8, 0.5, 0]

    for pt in (par, par2):
        g1 = approx_fprime(pt, f, 1e-8)
        g2 = fa.score(pt)
        assert_allclose(g1, g2, atol=1e-3) 
Example #30
Source File: test_bayes_mixed_glm.py    From vnpy_crypto with MIT License 4 votes vote down vote up
def test_elbo_grad():

    for f in range(2):
        for j in range(2):

            if f == 0:
                if j == 0:
                    y, exog_fe, exog_vc, ident = gen_simple_logit(10, 10, 2)
                else:
                    y, exog_fe, exog_vc, ident = gen_crossed_logit(
                        10, 10, 1, 2)
            elif f == 1:
                if j == 0:
                    y, exog_fe, exog_vc, ident = gen_simple_poisson(
                        10, 10, 0.5)
                else:
                    y, exog_fe, exog_vc, ident = gen_crossed_poisson(
                        10, 10, 1, 0.5)

            exog_vc = sparse.csr_matrix(exog_vc)

            if f == 0:
                glmm1 = BinomialBayesMixedGLM(y, exog_fe, exog_vc, ident,
                                              vcp_p=0.5)
            else:
                glmm1 = PoissonBayesMixedGLM(y, exog_fe, exog_vc, ident,
                                             vcp_p=0.5)

            rslt1 = glmm1.fit_map()

            for k in range(3):

                if k == 0:
                    vb_mean = rslt1.params
                    vb_sd = np.ones_like(vb_mean)
                elif k == 1:
                    vb_mean = np.zeros(len(vb_mean))
                    vb_sd = np.ones_like(vb_mean)
                else:
                    vb_mean = np.random.normal(size=len(vb_mean))
                    vb_sd = np.random.uniform(1, 2, size=len(vb_mean))

                mean_grad, sd_grad = glmm1.vb_elbo_grad(vb_mean, vb_sd)

                def elbo(vec):
                    n = len(vec) // 2
                    return glmm1.vb_elbo(vec[:n], vec[n:])

                x = np.concatenate((vb_mean, vb_sd))
                g1 = approx_fprime(x, elbo, 1e-5)
                n = len(x) // 2

                mean_grad_n = g1[:n]
                sd_grad_n = g1[n:]

                assert_allclose(mean_grad, mean_grad_n, atol=1e-2,
                                rtol=1e-2)
                assert_allclose(sd_grad, sd_grad_n, atol=1e-2,
                                rtol=1e-2)