Python scipy.optimize.minimize() Examples

The following are 30 code examples of scipy.optimize.minimize(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.optimize , or try the search function .
Example #1
Source File: conftest.py    From NiBetaSeries with MIT License 9 votes vote down vote up
def betaseries_file(tmpdir_factory,
                    deriv_betaseries_fname=deriv_betaseries_fname):
    bfile = tmpdir_factory.mktemp("beta").ensure(deriv_betaseries_fname)
    np.random.seed(3)
    num_trials = 40
    tgt_corr = 0.1
    bs1 = np.random.rand(num_trials)
    # create another betaseries with a target correlation
    bs2 = minimize(lambda x: abs(tgt_corr - pearsonr(bs1, x)[0]),
                   np.random.rand(num_trials)).x

    # two identical beta series
    bs_data = np.array([[[bs1, bs2]]])

    # the nifti image
    bs_img = nib.Nifti1Image(bs_data, np.eye(4))
    bs_img.to_filename(str(bfile))

    return bfile 
Example #2
Source File: test_optimize.py    From revrand with Apache License 2.0 7 votes vote down vote up
def test_structured_params(make_quadratic, make_random):

    random = make_random
    a, b, c, data, _ = make_quadratic
    w0 = [Parameter(random.randn(2), Bound()),
          Parameter(random.randn(1), Bound())
          ]

    qobj_struc = lambda w12, w3, data: q_struc(w12, w3, data, qobj)
    assert_opt = lambda Eab, Ec: \
        np.allclose((a, b, c), (Eab[0], Eab[1], Ec), atol=1e-3, rtol=0)

    nmin = structured_minimizer(minimize)
    res = nmin(qobj_struc, w0, args=(data,), jac=True, method='L-BFGS-B')
    assert_opt(*res.x)

    nsgd = structured_sgd(sgd)
    res = nsgd(qobj_struc, w0, data, eval_obj=True,
               random_state=make_random)
    assert_opt(*res.x)

    qf_struc = lambda w12, w3, data: q_struc(w12, w3, data, qfun)
    qg_struc = lambda w12, w3, data: q_struc(w12, w3, data, qgrad)
    res = nmin(qf_struc, w0, args=(data,), jac=qg_struc, method='L-BFGS-B')
    assert_opt(*res.x) 
Example #3
Source File: test_optimize.py    From Computable with MIT License 7 votes vote down vote up
def test_minimize_l_bfgs_b_ftol(self):
        # Check that the `ftol` parameter in l_bfgs_b works as expected
        v0 = None
        for tol in [1e-1, 1e-4, 1e-7, 1e-10]:
            opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol}
            sol = optimize.minimize(self.func, self.startparams,
                                    method='L-BFGS-B', jac=self.grad,
                                    options=opts)
            v = self.func(sol.x)

            if v0 is None:
                v0 = v
            else:
                assert_(v < v0)

            assert_allclose(v, self.func(self.solution), rtol=tol) 
Example #4
Source File: construct_portoflio.py    From Risk_Budgeting with GNU General Public License v3.0 7 votes vote down vote up
def rb_p_weights(asset_rets, rb):
	# number of ARP series
	num_arp = asset_rets.shape[1]
	# covariance matrix of asset returns
	p_cov = asset_rets.cov()
	# initial weights
	w0 = 1.0 * np.ones((num_arp, 1)) / num_arp
	# constraints
	cons = ({'type': 'eq', 'fun': cons_sum_weight}, {'type': 'ineq', 'fun': cons_long_only_weight})
	# portfolio optimisation
	return minimize(obj_fun, w0, args=(p_cov, rb), method='SLSQP', constraints=cons) 
Example #5
Source File: recastlib.py    From nevergrad with MIT License 6 votes vote down vote up
def _optimization_function(self, objective_function: Callable[[base.ArrayLike], float]) -> base.ArrayLike:
        # pylint:disable=unused-argument
        budget = np.inf if self.budget is None else self.budget
        best_res = np.inf
        best_x: np.ndarray = self.current_bests["average"].x  # np.zeros(self.dimension)
        if self.initial_guess is not None:
            best_x = np.array(self.initial_guess, copy=True)  # copy, just to make sure it is not modified
        remaining = budget - self._num_ask
        while remaining > 0:  # try to restart if budget is not elapsed
            options: Dict[str, int] = {} if self.budget is None else {"maxiter": remaining}
            res = scipyoptimize.minimize(
                objective_function,
                best_x if not self.random_restart else self._rng.normal(0.0, 1.0, self.dimension),
                method=self.method,
                options=options,
                tol=0,
            )
            if res.fun < best_res:
                best_res = res.fun
                best_x = res.x
            remaining = budget - self._num_ask
        return best_x 
Example #6
Source File: fisheye.py    From DualFisheye with MIT License 6 votes vote down vote up
def optimize(self, psize=256, wt_pixel=1000, wt_blank=1000):
        # Precalculate raster-order XYZ coordinates at given resolution.
        [xyz, rows, cols] = self._get_equirectangular_raster(psize)
        # Scoring function gives bonus points per overlapping pixel.
        score = lambda svec: self._score(svec, xyz, wt_pixel, wt_blank)
        # Multivariable optimization using gradient-descent or similar.
        # https://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html
        svec0 = self._get_state_vector()
        final = minimize(score, svec0, method='Nelder-Mead',
                         options={'xtol':1e-4, 'disp':True})
        # Store final lens parameters.
        self._set_state_vector(final.x)

    # Render combined panorama in equirectangular projection mode.
    # See also: https://en.wikipedia.org/wiki/Equirectangular_projection 
Example #7
Source File: test_optimize.py    From Computable with MIT License 6 votes vote down vote up
def test_minimize(self):
        """Tests for the minimize wrapper."""
        self.setUp()
        self.test_bfgs(True)
        self.setUp()
        self.test_bfgs_infinite(True)
        self.setUp()
        self.test_cg(True)
        self.setUp()
        self.test_ncg(True)
        self.setUp()
        self.test_ncg_hess(True)
        self.setUp()
        self.test_ncg_hessp(True)
        self.setUp()
        self.test_neldermead(True)
        self.setUp()
        self.test_powell(True) 
Example #8
Source File: test_optimize.py    From Computable with MIT License 6 votes vote down vote up
def test_minimize_tol_parameter(self):
        # Check that the minimize() tol= argument does something
        def func(z):
            x, y = z
            return x**2*y**2 + x**4 + 1

        def dfunc(z):
            x, y = z
            return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])

        for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
                       'newton-cg', 'anneal', 'l-bfgs-b', 'tnc',
                       'cobyla', 'slsqp']:
            if method in ('nelder-mead', 'powell', 'anneal', 'cobyla'):
                jac = None
            else:
                jac = dfunc
            sol1 = optimize.minimize(func, [1,1], jac=jac, tol=1e-10,
                                     method=method)
            sol2 = optimize.minimize(func, [1,1], jac=jac, tol=1.0,
                                     method=method)
            assert_(func(sol1.x) < func(sol2.x),
                    "%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x))) 
Example #9
Source File: iGAN_predict.py    From iGAN with MIT License 6 votes vote down vote up
def invert_bfgs(gen_model, invert_model, ftr_model, im, z_predict=None, npx=64):
    _f, z = invert_model
    nz = gen_model.nz
    if z_predict is None:
        z_predict = np_rng.uniform(-1., 1., size=(1, nz))
    else:
        z_predict = floatX(z_predict)
    z_predict = np.arctanh(z_predict)
    im_t = gen_model.transform(im)
    ftr = ftr_model(im_t)

    prob = optimize.minimize(f_bfgs, z_predict, args=(_f, im_t, ftr),
                             tol=1e-6, jac=True, method='L-BFGS-B', options={'maxiter': 200})
    print('n_iters = %3d, f = %.3f' % (prob.nit, prob.fun))
    z_opt = prob.x
    z_opt_n = floatX(z_opt[np.newaxis, :])
    [f_opt, g, gx] = _f(z_opt_n, im_t, ftr)
    gx = gen_model.inverse_transform(gx, npx=npx)
    z_opt = np.tanh(z_opt)
    return gx, z_opt, f_opt 
Example #10
Source File: test_anneal.py    From Computable with MIT License 6 votes vote down vote up
def anneal_schedule(self, schedule='fast', use_wrapper=False):
        """ Call anneal algorithm using specified schedule """
        n = 0  # index of test function
        if use_wrapper:
            opts = {'upper': self.upper[n],
                    'lower': self.lower[n],
                    'ftol': 1e-3,
                    'maxiter': self.maxiter,
                    'schedule': schedule,
                    'disp': False}
            res = minimize(self.fun[n], self.x0[n], method='anneal',
                               options=opts)
            x, retval = res['x'], res['status']
        else:
            x, retval = anneal(self.fun[n], self.x0[n], full_output=False,
                               upper=self.upper[n], lower=self.lower[n],
                               feps=1e-3, maxiter=self.maxiter,
                               schedule=schedule, disp=False)

        assert_almost_equal(x, self.sol[n], 2)
        return retval 
Example #11
Source File: env.py    From fragile with MIT License 6 votes vote down vote up
def minimize_point(self, x: numpy.ndarray) -> Tuple[numpy.ndarray, Scalar]:
        """
        Minimize the target function passing one starting point.

        Args:
            x: Array representing a single point of the function to be minimized.

        Returns:
            Tuple containing a numpy array representing the best solution found, \
            and the numerical value of the function at that point.

        """
        optim_result = self.minimize(x)
        point = optim_result["x"]
        reward = float(optim_result["fun"])
        return point, reward 
Example #12
Source File: env.py    From fragile with MIT License 6 votes vote down vote up
def minimize(self, x: numpy.ndarray):
        """
        Apply ``scipy.optimize.minimize`` to a single point.

        Args:
            x: Array representing a single point of the function to be minimized.

        Returns:
            Optimization result object returned by ``scipy.optimize.minimize``.

        """

        def _optimize(_x):
            try:
                _x = _x.reshape((1,) + _x.shape)
                y = self.function(_x)
            except (ZeroDivisionError, RuntimeError):
                y = numpy.inf
            return y

        bounds = ScipyBounds(
            ub=self.bounds.high if self.bounds is not None else None,
            lb=self.bounds.low if self.bounds is not None else None,
        )
        return minimize(_optimize, x, bounds=bounds, *self.args, **self.kwargs) 
Example #13
Source File: env.py    From fragile with MIT License 6 votes vote down vote up
def __init__(self, function: Function, bounds=None, *args, **kwargs):
        """
        Initialize a :class:`Minimizer`.

        Args:
            function: :class:`Function` that will be minimized.
            bounds: :class:`Bounds` defining the domain of the minimization \
                    process. If it is ``None`` the :class:`Function` :class:`Bounds` \
                    will be used.
            *args: Passed to ``scipy.optimize.minimize``.
            **kwargs: Passed to ``scipy.optimize.minimize``.

        """
        self.env = function
        self.function = function.function
        self.bounds = self.env.bounds if bounds is None else bounds
        self.args = args
        self.kwargs = kwargs 
Example #14
Source File: sampler.py    From phoenics with Apache License 2.0 6 votes vote down vote up
def _proposal_optimization_thread(self, batch_index, return_dict = None):
		print('starting process for ', batch_index)
		# prepare penalty function
		def penalty(x):
			num, den = self.penalty_contributions(x)
			return (num + self.lambda_values[batch_index]) / den

		optimized = []
#		for sample in self.proposals:
#			if np.random.uniform() < 0.5:
#				optimized.append(sample)
#				continue
#			res = minimize(penalty, sample, method = 'L-BFGS-B', options = {'maxiter': 25})
#
#			# FIXME
#			if np.any(res.x < self.var_lows) or np.any(res.x > self.var_highs):
#				optimized.append(sample)
#			else:
#				optimized.append(res.x)
	
		for sample in self.proposals:

			# set some entries to zero!
			set_to_zero = self._gen_set_to_zero_vector(sample)
			nulls = np.where(set_to_zero == 0)[0]

			opt = self.local_opt.optimize(penalty, sample * set_to_zero, max_iter = 10, ignore = nulls)

			optimized.append(opt)


		optimized = np.array(optimized)
		optimized[:, self._ints] = np.around(optimized[:, self._ints])
		optimized[:, self._cats] = np.around(optimized[:, self._cats])

		print('finished process for ', batch_index)
		if return_dict.__class__.__name__ == 'DictProxy':
			return_dict[batch_index] = optimized
		else:
			return optimized 
Example #15
Source File: test_optimize.py    From revrand with Apache License 2.0 6 votes vote down vote up
def test_unbounded(make_quadratic, make_random):

    random = make_random
    a, b, c, data, _ = make_quadratic
    w0 = random.randn(3)

    assert_opt = lambda Ea, Eb, Ec: \
        np.allclose((a, b, c), (Ea, Eb, Ec), atol=1e-3, rtol=0)

    for updater in [SGDUpdater, AdaDelta, AdaGrad, Momentum, Adam]:
        res = sgd(qobj, w0, data, eval_obj=True, updater=updater(),
                  random_state=make_random)
        assert_opt(*res['x'])

    res = minimize(qobj, w0, args=(data,), jac=True, method='L-BFGS-B')
    assert_opt(*res['x'])

    res = minimize(qfun, w0, args=(data,), jac=qgrad, method='L-BFGS-B')
    assert_opt(*res['x'])

    res = minimize(qfun, w0, args=(data), jac=False, method=None)
    assert_opt(*res['x']) 
Example #16
Source File: test_optimize.py    From revrand with Apache License 2.0 6 votes vote down vote up
def test_bounded(make_quadratic, make_random):

    random = make_random
    a, b, c, data, bounds = make_quadratic
    w0 = np.concatenate((random.randn(2), [1.5]))

    res = minimize(qobj, w0, args=(data,), jac=True, bounds=bounds,
                   method='L-BFGS-B')
    Ea_bfgs, Eb_bfgs, Ec_bfgs = res['x']

    res = sgd(qobj, w0, data, bounds=bounds, eval_obj=True,
              random_state=random)
    Ea_sgd, Eb_sgd, Ec_sgd = res['x']

    assert np.allclose((Ea_bfgs, Eb_bfgs, Ec_bfgs),
                       (Ea_sgd, Eb_sgd, Ec_sgd),
                       atol=5e-2, rtol=0) 
Example #17
Source File: test_optimize.py    From revrand with Apache License 2.0 6 votes vote down vote up
def test_log_params(make_quadratic, make_random):

    random = make_random
    a, b, c, data, _ = make_quadratic
    w0 = np.abs(random.randn(3))
    bounds = [Positive(), Bound(), Positive()]

    assert_opt = lambda Ea, Eb, Ec: \
        np.allclose((a, b, c), (Ea, Eb, Ec), atol=1e-3, rtol=0)

    nmin = logtrick_minimizer(minimize)
    res = nmin(qobj, w0, args=(data,), jac=True, method='L-BFGS-B',
               bounds=bounds)
    assert_opt(*res.x)

    nsgd = logtrick_sgd(sgd)
    res = nsgd(qobj, w0, data, eval_obj=True, bounds=bounds,
               random_state=make_random)
    assert_opt(*res.x)

    nmin = logtrick_minimizer(minimize)
    res = nmin(qfun, w0, args=(data,), jac=qgrad, method='L-BFGS-B',
               bounds=bounds)
    assert_opt(*res.x) 
Example #18
Source File: test_optimize.py    From revrand with Apache License 2.0 6 votes vote down vote up
def test_logstruc_params(make_quadratic, make_random):

    random = make_random
    a, b, c, data, _ = make_quadratic

    w0 = [Parameter(random.gamma(2, size=(2,)), Positive()),
          Parameter(random.randn(), Bound())
          ]

    qobj_struc = lambda w12, w3, data: q_struc(w12, w3, data, qobj)
    assert_opt = lambda Eab, Ec: \
        np.allclose((a, b, c), (Eab[0], Eab[1], Ec), atol=1e-3, rtol=0)

    nmin = structured_minimizer(logtrick_minimizer(minimize))
    res = nmin(qobj_struc, w0, args=(data,), jac=True, method='L-BFGS-B')
    assert_opt(*res.x)

    nsgd = structured_sgd(logtrick_sgd(sgd))
    res = nsgd(qobj_struc, w0, data, eval_obj=True, random_state=make_random)
    assert_opt(*res.x)

    qf_struc = lambda w12, w3, data: q_struc(w12, w3, data, qfun)
    qg_struc = lambda w12, w3, data: q_struc(w12, w3, data, qgrad)
    res = nmin(qf_struc, w0, args=(data,), jac=qg_struc, method='L-BFGS-B')
    assert_opt(*res.x) 
Example #19
Source File: test_optimize.py    From Computable with MIT License 5 votes vote down vote up
def test_bfgs(self, use_wrapper=False):
        """ Broyden-Fletcher-Goldfarb-Shanno optimization routine """
        if use_wrapper:
            opts = {'maxiter': self.maxiter, 'disp': False,
                    'return_all': False}
            res = optimize.minimize(self.func, self.startparams,
                                    jac=self.grad, method='BFGS', args=(),
                                    options=opts)

            params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
                    res['x'], res['fun'], res['jac'], res['hess_inv'], \
                    res['nfev'], res['njev'], res['status']
        else:
            retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
                                        args=(), maxiter=self.maxiter,
                                        full_output=True, disp=False, retall=False)

            (params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6)

        # Ensure that function call counts are 'known good'; these are from
        # Scipy 0.7.0. Don't allow them to increase.
        assert_(self.funccalls == 10, self.funccalls)
        assert_(self.gradcalls == 8, self.gradcalls)

        # Ensure that the function behaves the same; this is from Scipy 0.7.0
        assert_allclose(self.trace[6:8],
                        [[0, -5.25060743e-01, 4.87748473e-01],
                         [0, -5.24885582e-01, 4.87530347e-01]],
                        atol=1e-14, rtol=1e-7) 
Example #20
Source File: equitysectorweights.py    From systematictradingexamples with GNU General Public License v2.0 5 votes vote down vote up
def basic_opt(std,corr,mus):
    number_assets=mus.shape[0]
    sigma=sigma_from_corr(std, corr)
    start_weights=[1.0/number_assets]*number_assets
    
    ## Constraints - positive weights, adding to 1.0
    bounds=[(0.0,1.0)]*number_assets
    cdict=[{'type':'eq', 'fun':addem}]

    return minimize(neg_SR_riskfree, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001) 
Example #21
Source File: bayes_mixed_glm.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def fit_map(self, method="BFGS", minim_opts=None):
        """
        Construct the Laplace approximation to the posterior
        distribution.

        Parameters
        ----------
        method : string
            Optimization method for finding the posterior mode.
        minim_opts : dict-like
            Options passed to scipy.minimize.

        Returns
        -------
        BayesMixedGLMResults instance.
        """

        def fun(params):
            return -self.logposterior(params)

        def grad(params):
            return -self.logposterior_grad(params)

        start = self._get_start()

        r = minimize(fun, start, method=method, jac=grad, options=minim_opts)
        if not r.success:
            msg = ("Laplace fitting did not converge, |gradient|=%.6f" %
                   np.sqrt(np.sum(r.jac**2)))
            warnings.warn(msg)

        from statsmodels.tools.numdiff import approx_fprime
        hess = approx_fprime(r.x, grad)
        hess_inv = np.linalg.inv(hess)

        return BayesMixedGLMResults(self, r.x, hess_inv, optim_retvals=r) 
Example #22
Source File: test_optimize.py    From Computable with MIT License 5 votes vote down vote up
def test_powell(self, use_wrapper=False):
        """ Powell (direction set) optimization routine
        """
        if use_wrapper:
            opts = {'maxiter': self.maxiter, 'disp': False,
                    'return_all': False}
            res = optimize.minimize(self.func, self.startparams, args=(),
                                    method='Powell', options=opts)
            params, fopt, direc, numiter, func_calls, warnflag = \
                    res['x'], res['fun'], res['direc'], res['nit'], \
                    res['nfev'], res['status']
        else:
            retval = optimize.fmin_powell(self.func, self.startparams,
                                        args=(), maxiter=self.maxiter,
                                        full_output=True, disp=False, retall=False)

            (params, fopt, direc, numiter, func_calls, warnflag) = retval

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6)

        # Ensure that function call counts are 'known good'; these are from
        # Scipy 0.7.0. Don't allow them to increase.
        #
        # However, some leeway must be added: the exact evaluation
        # count is sensitive to numerical error, and floating-point
        # computations are not bit-for-bit reproducible across
        # machines, and when using e.g. MKL, data alignment
        # etc. affect the rounding error.
        #
        assert_(self.funccalls <= 116 + 20, self.funccalls)
        assert_(self.gradcalls == 0, self.gradcalls)

        # Ensure that the function behaves the same; this is from Scipy 0.7.0
        assert_allclose(self.trace[34:39],
                        [[0.72949016, -0.44156936, 0.47100962],
                         [0.72949016, -0.44156936, 0.48052496],
                         [1.45898031, -0.88313872, 0.95153458],
                         [0.72949016, -0.44156936, 0.47576729],
                         [1.72949016, -0.44156936, 0.47576729]],
                        atol=1e-14, rtol=1e-7) 
Example #23
Source File: optimizer.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def _fit_minimize(f, score, start_params, fargs, kwargs, disp=True,
                        maxiter=100, callback=None, retall=False,
                        full_output=True, hess=None):
    kwargs.setdefault('min_method', 'BFGS')

    # prepare options dict for minimize
    filter_opts = ['extra_fit_funcs', 'niter', 'min_method', 'tol']
    options = dict((k,v) for k,v in kwargs.items() if k not in filter_opts)
    options['disp']    = disp
    options['maxiter'] = maxiter

    # Use Hessian/Jacobian only if they're required by the method
    no_hess = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'COBYLA', 'SLSQP']
    no_jac  = ['Nelder-Mead', 'Powell', 'COBYLA']
    if kwargs['min_method'] in no_hess: hess = None
    if kwargs['min_method'] in no_jac: score = None

    res = optimize.minimize(f, start_params, args=fargs, method=kwargs['min_method'],
                            jac=score, hess=hess, callback=callback, options=options)

    xopt    = res.x
    retvals = None
    if full_output:
        nit = getattr(res, 'nit', np.nan) # scipy 0.14 compat
        retvals = {'fopt': res.fun, 'iterations': nit,
                   'fcalls': res.nfev, 'warnflag': res.status,
                   'converged': res.success}
        if retall:
            retvals.update({'allvecs': res.values()})

    return xopt, retvals 
Example #24
Source File: optimizer.py    From heamy with MIT License 5 votes vote down vote up
def minimize(self, method):
        starting_values = [0.5] * len(self.predictions)
        cons = {"type": "eq", "fun": lambda w: 1 - sum(w)}
        bounds = [(0, 1)] * len(self.predictions)
        res = minimize(
            self.loss_func, starting_values, method=method, bounds=bounds, constraints=cons
        )
        print("Best Score (%s): %s" % (self.scorer.__name__, res["fun"]))
        print("Best Weights: %s" % res["x"])
        return res["x"] 
Example #25
Source File: 4_multi_classification.py    From deep-learning-note with MIT License 5 votes vote down vote up
def one_vs_all(X, y, num_labels, learning_rate):
    rows = X.shape[0]
    params = X.shape[1]
    
    # k X (n + 1) array for the parameters of each of the k classifiers
    all_theta = np.zeros((num_labels, params + 1))
    
    # insert a column of ones at the beginning for the intercept term
    X = np.insert(X, 0, values=np.ones(rows), axis=1)
    
    # labels are 1-indexed instead of 0-indexed
    for i in range(1, num_labels + 1):
        theta = np.zeros(params + 1)
        y_i = np.array([1 if label == i else 0 for label in y])
        y_i = np.reshape(y_i, (rows, 1))
        
        # minimize the objective function
        fmin = minimize(fun=cost, x0=theta, args=(X, y_i, learning_rate), method='TNC', jac=gradient)
        all_theta[i-1,:] = fmin.x
    
    return all_theta 
Example #26
Source File: test_optimize.py    From Computable with MIT License 5 votes vote down vote up
def test_ncg_hessp(self, use_wrapper=False):
        """ Newton conjugate gradient with Hessian times a vector p """
        if use_wrapper:
            opts = {'maxiter': self.maxiter, 'disp': False,
                    'return_all': False}
            retval = optimize.minimize(self.func, self.startparams,
                                       method='Newton-CG', jac=self.grad,
                                       hessp=self.hessp,
                                       args=(), options=opts)['x']
        else:
            retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
                                       fhess_p=self.hessp,
                                       args=(), maxiter=self.maxiter,
                                       full_output=False, disp=False,
                                       retall=False)

        params = retval

        assert_allclose(self.func(params), self.func(self.solution),
                        atol=1e-6)

        # Ensure that function call counts are 'known good'; these are from
        # Scipy 0.7.0. Don't allow them to increase.
        assert_(self.funccalls == 7, self.funccalls)
        assert_(self.gradcalls <= 18, self.gradcalls)  # 0.9.0
        # assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
        # assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0

        # Ensure that the function behaves the same; this is from Scipy 0.7.0
        assert_allclose(self.trace[3:5],
                        [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
                         [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
                        atol=1e-6, rtol=1e-7) 
Example #27
Source File: test_optimize.py    From Computable with MIT License 5 votes vote down vote up
def test_no_increase(self):
        # Check that the solver doesn't return a value worse than the
        # initial point.

        def func(x):
            return (x - 1)**2

        def bad_grad(x):
            # purposefully invalid gradient function, simulates a case
            # where line searches start failing
            return 2*(x - 1) * (-1) - 2

        def check(method):
            x0 = np.array([2.0])
            f0 = func(x0)
            jac = bad_grad
            if method in ['nelder-mead', 'powell', 'anneal', 'cobyla']:
                jac = None
            sol = optimize.minimize(func, x0, jac=jac, method=method,
                                    options=dict(maxiter=20))
            assert_equal(func(sol.x), sol.fun)

            dec.knownfailureif(method == 'slsqp', "SLSQP returns slightly worse")(lambda: None)()
            assert_(func(sol.x) <= f0)

        for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
                       'newton-cg', 'anneal', 'l-bfgs-b', 'tnc',
                       'cobyla', 'slsqp']:
            yield check, method 
Example #28
Source File: test_optimize.py    From Computable with MIT License 5 votes vote down vote up
def test_slsqp_respect_bounds(self):
        # github issue 3108
        def f(x):
            return sum((x - np.array([1., 2., 3., 4.]))**2)
        def cons(x):
            a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]])
            return np.concatenate([np.dot(a, x) + np.array([5, 10]), x])
        x0 = np.array([0.5, 1., 1.5, 2.])
        res = optimize.minimize(f, x0, method='slsqp',
                                constraints={'type': 'ineq', 'fun': cons})
        assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12) 
Example #29
Source File: test_optimize.py    From Computable with MIT License 5 votes vote down vote up
def test_rosenbrock(self):
        x0 = np.array([-1.2, 1.0])
        sol = optimize.minimize(optimize.rosen, x0,
                                jac=optimize.rosen_der,
                                hess=optimize.rosen_hess,
                                tol=1e-5,
                                method='Newton-CG')
        assert_(sol.success, sol.message)
        assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4) 
Example #30
Source File: compareoptmethods.py    From systematictradingexamples with GNU General Public License v2.0 5 votes vote down vote up
def basic_opt(std,corr,mus):
    number_assets=mus.shape[0]
    sigma=sigma_from_corr(std, corr)
    start_weights=[1.0/number_assets]*number_assets
    
    ## Constraints - positive weights, adding to 1.0
    bounds=[(0.0,1.0)]*number_assets
    cdict=[{'type':'eq', 'fun':addem}]

    return minimize(neg_SR_riskfree, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)