Python skopt.Optimizer() Examples

The following are 18 code examples of skopt.Optimizer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module skopt , or try the search function .
Example #1
Source File: test_tune_restore.py    From ray with Apache License 2.0 6 votes vote down vote up
def set_basic_conf(self):
        optimizer = skopt.Optimizer([(0, 20), (-100, 100)])
        previously_run_params = [[10, 0], [15, -20]]
        known_rewards = [-189, -1144]

        def cost(space, reporter):
            reporter(loss=(space["height"]**2 + space["width"]**2))

        search_alg = SkOptSearch(
            optimizer,
            ["width", "height"],
            metric="loss",
            mode="min",
            max_concurrent=1000,  # Here to avoid breaking back-compat.
            points_to_evaluate=previously_run_params,
            evaluated_rewards=known_rewards)
        return search_alg, cost 
Example #2
Source File: test_utils.py    From scikit-optimize with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_dict_list_space_representation():
    """
    Tests whether the conversion of the dictionary and list representation
    of a point from a search space works properly.
    """

    chef_space = {
        'Cooking time': (0, 1200),  # in minutes
        'Main ingredient': [
            'cheese', 'cherimoya', 'chicken', 'chard', 'chocolate', 'chicory'
        ],
        'Secondary ingredient': [
            'love', 'passion', 'dedication'
        ],
        'Cooking temperature': (-273.16, 10000.0)  # in Celsius
    }

    opt = Optimizer(dimensions=dimensions_aslist(chef_space))
    point = opt.ask()

    # check if the back transformed point and original one are equivalent
    assert_equal(
        point,
        point_aslist(chef_space, point_asdict(chef_space, point))
    ) 
Example #3
Source File: test_plots.py    From scikit-optimize with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_names_dimensions():
    # Define objective
    def objective(x, noise_level=0.1):
        return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2)) +\
               np.random.randn() * noise_level

    # Initialize Optimizer
    opt = Optimizer([(-2.0, 2.0)], n_initial_points=1)

    # Optimize
    for i in range(2):
        next_x = opt.ask()
        f_val = objective(next_x)
        res = opt.tell(next_x, f_val)

    # Plot results
    plots.plot_objective(res) 
Example #4
Source File: tasks.py    From law with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def run(self):
        import skopt
        optimizer = self.input().load() if self.branch != 0 else skopt.Optimizer(
            dimensions=[skopt.space.Real(-5.0, 10.0), skopt.space.Real(0.0, 15.0)],
            random_state=1, n_initial_points=self.n_initial_points
        )

        x = optimizer.ask(n_points=self.n_parallel)

        output = yield Objective.req(self, x=x, iteration=self.branch, branch=-1)

        y = [f.load()["y"] for f in output["collection"].targets.values()]

        optimizer.tell(x, y)

        print("minimum after {} iterations: {}".format(self.branch + 1, min(optimizer.yi)))

        with self.output().localize("w") as tmp:
            tmp.dump(optimizer) 
Example #5
Source File: skopt_searcher.py    From autogluon with Apache License 2.0 5 votes vote down vote up
def config2skopt(self, config):
        """ Converts autogluon config (dict object) to skopt format (list object).

        Returns
        -------
        Object of same type as: `skOpt.Optimizer.ask()`
        """
        point = []
        for hp in self.hp_ordering:
            point.append(config[hp])
        return point 
Example #6
Source File: tune.py    From fastchess with GNU General Public License v3.0 5 votes vote down vote up
def summarize(opt, steps):
    print('Summarizing best values')
    for kappa in [0] + list(np.logspace(-1, 1, steps-1)):
        new_opt = skopt.Optimizer(opt.space.dimensions,
                acq_func='LCB',
                acq_func_kwargs=dict(
                    kappa=kappa/5,
                    n_jobs=-1,
                    ),
                acq_optimizer='lbfgs',
                acq_optimizer_kwargs=dict(
                    n_restarts_optimizer=100,
                    )
                )
        new_opt.tell(opt.Xi, opt.yi)
        x = new_opt.ask()
        y, sigma = new_opt.models[-1].predict([x], return_std=True)
        y = -y # Change back from minimization to maximization
        def score_to_elo(score):
            if score <= -1:
                return float('inf')
            if score >= 1:
                return -float('inf')
            return 400 * math.log10((1+score)/(1-score))
        elo = score_to_elo(y)
        pm = max(abs(score_to_elo(y + sigma) - elo),
                 abs(score_to_elo(y - sigma) - elo))
        print(f'Best expectation (κ={kappa:.1f}): {x}'
              f' = {y[0]:.3f} ± {sigma[0]:.3f}'
              f' (ELO-diff {elo:.3f} ± {pm:.3f})') 
Example #7
Source File: skopt_searcher.py    From autogluon with Apache License 2.0 5 votes vote down vote up
def __init__(self, configspace, **kwargs):
        super().__init__(
            configspace, reward_attribute=kwargs.get('reward_attribute'))
        self.hp_ordering = configspace.get_hyperparameter_names() # fix order of hyperparams in configspace.
        skopt_hpspace = []
        for hp in self.hp_ordering:
            hp_obj = configspace.get_hyperparameter(hp)
            hp_type = str(type(hp_obj)).lower() # type of hyperparam
            if 'integer' in hp_type:
                hp_dimension = Integer(low=int(hp_obj.lower), high=int(hp_obj.upper),name=hp)
            elif 'float' in hp_type:
                if hp_obj.log: # log10-scale hyperparmeter
                    hp_dimension = Real(low=float(hp_obj.lower), high=float(hp_obj.upper), prior='log-uniform', name=hp)
                else:
                    hp_dimension = Real(low=float(hp_obj.lower), high=float(hp_obj.upper), name=hp)
            elif 'categorical' in hp_type:
                hp_dimension = Categorical(hp_obj.choices, name=hp)
            elif 'ordinal' in hp_type:
                hp_dimension = Categorical(hp_obj.sequence, name=hp)
            else:
                raise ValueError("unknown hyperparameter type: %s" % hp)
            skopt_hpspace.append(hp_dimension)
        skopt_keys = {
            'base_estimator', 'n_random_starts', 'n_initial_points',
            'acq_func', 'acq_optimizer', 'random_state',  'model_queue_size',
            'acq_func_kwargs', 'acq_optimizer_kwargs'}
        skopt_kwargs = self._filter_skopt_kwargs(kwargs, skopt_keys)
        self.bayes_optimizer = Optimizer(
            dimensions=skopt_hpspace, **skopt_kwargs) 
Example #8
Source File: skopt.py    From optuna with MIT License 5 votes vote down vote up
def __init__(
        self, search_space: Dict[str, distributions.BaseDistribution], skopt_kwargs: Dict[str, Any]
    ) -> None:

        self._search_space = search_space

        dimensions = []
        for name, distribution in sorted(self._search_space.items()):
            if isinstance(distribution, distributions.UniformDistribution):
                # Convert the upper bound from exclusive (optuna) to inclusive (skopt).
                high = np.nextafter(distribution.high, float("-inf"))
                dimension = space.Real(distribution.low, high)
            elif isinstance(distribution, distributions.LogUniformDistribution):
                # Convert the upper bound from exclusive (optuna) to inclusive (skopt).
                high = np.nextafter(distribution.high, float("-inf"))
                dimension = space.Real(distribution.low, high, prior="log-uniform")
            elif isinstance(distribution, distributions.IntUniformDistribution):
                count = (distribution.high - distribution.low) // distribution.step
                dimension = space.Integer(0, count)
            elif isinstance(distribution, distributions.IntLogUniformDistribution):
                low = distribution.low - 0.5
                high = distribution.high + 0.5
                dimension = space.Real(low, high, prior="log-uniform")
            elif isinstance(distribution, distributions.DiscreteUniformDistribution):
                count = int((distribution.high - distribution.low) // distribution.q)
                dimension = space.Integer(0, count)
            elif isinstance(distribution, distributions.CategoricalDistribution):
                dimension = space.Categorical(distribution.choices)
            else:
                raise NotImplementedError(
                    "The distribution {} is not implemented.".format(distribution)
                )

            dimensions.append(dimension)

        self._optimizer = skopt.Optimizer(dimensions, **skopt_kwargs) 
Example #9
Source File: skopt_learner.py    From adaptive with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def tell_pending(self, x):
        # 'skopt.Optimizer' takes care of points we
        # have not got results for.
        self.pending_points.add(tuple(x)) 
Example #10
Source File: tasks.py    From law with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def requires(self):
        return Optimizer.req(self) 
Example #11
Source File: tasks.py    From law with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def requires(self):
        if self.branch == 0:
            return None
        return Optimizer.req(self, branch=self.branch - 1) 
Example #12
Source File: scikit_optimizer.py    From bayesmark with Apache License 2.0 5 votes vote down vote up
def __init__(self, api_config, base_estimator="GP", acq_func="gp_hedge", n_initial_points=5, **kwargs):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        base_estimator : {'GP', 'RF', 'ET', 'GBRT'}
            How to estimate the objective function.
        acq_func : {'LCB', 'EI', 'PI', 'gp_hedge', 'EIps', 'PIps'}
            Acquisition objective to decide next suggestion.
        n_initial_points : int
            Number of points to sample randomly before actual Bayes opt.
        """
        AbstractOptimizer.__init__(self, api_config)

        dimensions, self.round_to_values = ScikitOptimizer.get_sk_dimensions(api_config)

        # Older versions of skopt don't copy over the dimensions names during
        # normalization and hence the names are missing in
        # self.skopt.space.dimensions. Therefore, we save our own copy of
        # dimensions list to be safe. If we can commit to using the newer
        # versions of skopt we can delete self.dimensions.
        self.dimensions_list = tuple(dd.name for dd in dimensions)

        # Undecided where we want to pass the kwargs, so for now just make sure
        # they are blank
        assert len(kwargs) == 0

        self.skopt = SkOpt(
            dimensions,
            n_initial_points=n_initial_points,
            base_estimator=base_estimator,
            acq_func=acq_func,
            acq_optimizer="auto",
            acq_func_kwargs={},
            acq_optimizer_kwargs={},
        ) 
Example #13
Source File: scikit_optimizer.py    From bayesmark with Apache License 2.0 5 votes vote down vote up
def __init__(self, api_config, base_estimator="GP", acq_func="gp_hedge", n_initial_points=5):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        base_estimator : {'GP', 'RF', 'ET', 'GBRT'}
            How to estimate the objective function.
        acq_func : {'LCB', 'EI', 'PI', 'gp_hedge', 'EIps', 'PIps'}
            Acquisition objective to decide next suggestion.
        n_initial_points : int
            Number of points to sample randomly before actual Bayes opt.
        """
        AbstractOptimizer.__init__(self, api_config)

        dimensions, self.round_to_values = ScikitOptimizer.get_sk_dimensions(api_config)

        # Older versions of skopt don't copy over the dimensions names during
        # normalization and hence the names are missing in
        # self.skopt.space.dimensions. Therefore, we save our own copy of
        # dimensions list to be safe. If we can commit to using the newer
        # versions of skopt we can delete self.dimensions.
        self.dimensions_list = tuple(dd.name for dd in dimensions)

        self.skopt = SkOpt(
            dimensions,
            n_initial_points=n_initial_points,
            base_estimator=base_estimator,
            acq_func=acq_func,
            acq_optimizer="auto",
            acq_func_kwargs={},
            acq_optimizer_kwargs={},
        ) 
Example #14
Source File: test_space.py    From scikit-optimize with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_purely_categorical_space():
    # Test reproduces the bug in #908, make sure it doesn't come back
    dims = [Categorical(['a', 'b', 'c']), Categorical(['A', 'B', 'C'])]
    optimizer = Optimizer(dims, n_initial_points=1, random_state=3)

    x = optimizer.ask()
    # before the fix this call raised an exception
    optimizer.tell(x, 1.) 
Example #15
Source File: test_parallel_cl.py    From scikit-optimize with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_reproducible_runs(strategy, surrogate):
    # two runs of the optimizer should yield exactly the same results

    optimizer = Optimizer(
        base_estimator=surrogate(random_state=1),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_optimizer='sampling',
        random_state=1
    )

    points = []
    for i in range(n_steps):
        x = optimizer.ask(n_points, strategy)
        points.append(x)
        optimizer.tell(x, [branin(v) for v in x])

    # the x's should be exaclty as they are in `points`
    optimizer = Optimizer(
        base_estimator=surrogate(random_state=1),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_optimizer='sampling',
        random_state=1
    )
    for i in range(n_steps):
        x = optimizer.ask(n_points, strategy)

        assert points[i] == x

        optimizer.tell(x, [branin(v) for v in x]) 
Example #16
Source File: test_parallel_cl.py    From scikit-optimize with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_same_set_of_points_ask(strategy, surrogate):
    """
    For n_points not None, tests whether two consecutive calls to ask
    return the same sets of points.

    Parameters
    ----------
    * `strategy` [string]:
        Name of the strategy to use during optimization.

    * `surrogate` [scikit-optimize surrogate class]:
        A class of the scikit-optimize surrogate used in Optimizer.
    """

    optimizer = Optimizer(
        base_estimator=surrogate(),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_optimizer='sampling',
        random_state=2
    )

    for i in range(n_steps):
        xa = optimizer.ask(n_points, strategy)
        xb = optimizer.ask(n_points, strategy)
        optimizer.tell(xa, [branin(v) for v in xa])
        assert_equal(xa, xb)  # check if the sets of points generated are equal 
Example #17
Source File: test_parallel_cl.py    From scikit-optimize with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_all_points_different(strategy, surrogate):
    """
    Tests whether the parallel optimizer always generates
    different points to evaluate.

    Parameters
    ----------
    * `strategy` [string]:
        Name of the strategy to use during optimization.

    * `surrogate` [scikit-optimize surrogate class]:
        A class of the scikit-optimize surrogate used in Optimizer.
    """
    optimizer = Optimizer(
        base_estimator=surrogate(),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_optimizer='sampling',
        random_state=1
    )

    tolerance = 1e-3  # distance above which points are assumed same
    for i in range(n_steps):
        x = optimizer.ask(n_points, strategy)
        optimizer.tell(x, [branin(v) for v in x])
        distances = pdist(x)
        assert all(distances > tolerance) 
Example #18
Source File: test_parallel_cl.py    From scikit-optimize with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_constant_liar_runs(strategy, surrogate, acq_func):
    """
    Tests whether the optimizer runs properly during the random
    initialization phase and beyond

    Parameters
    ----------
    * `strategy` [string]:
        Name of the strategy to use during optimization.

    * `surrogate` [scikit-optimize surrogate class]:
        A class of the scikit-optimize surrogate used in Optimizer.
    """
    optimizer = Optimizer(
        base_estimator=surrogate(),
        dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
        acq_func=acq_func,
        acq_optimizer='sampling',
        random_state=0
    )

    # test arguments check
    assert_raises(ValueError, optimizer.ask, {"strategy": "cl_maen"})
    assert_raises(ValueError, optimizer.ask, {"n_points": "0"})
    assert_raises(ValueError, optimizer.ask, {"n_points": 0})

    for i in range(n_steps):
        x = optimizer.ask(n_points=n_points, strategy=strategy)
        # check if actually n_points was generated
        assert_equal(len(x), n_points)

        if "ps" in acq_func:
            optimizer.tell(x, [[branin(v), 1.1] for v in x])
        else:
            optimizer.tell(x, [branin(v) for v in x])