Python hyperopt.hp.loguniform() Examples

The following are 28 code examples of hyperopt.hp.loguniform(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module hyperopt.hp , or try the search function .
Example #1
Source File: automl.py    From kddcup2019-automl with MIT License 8 votes vote down vote up
def hyperopt_lightgbm(X: pd.DataFrame, y: pd.Series, params: Dict, config: Config):
    X_train, X_val, y_train, y_val = data_split(X, y, test_size=0.5)
    train_data = lgb.Dataset(X_train, label=y_train)
    valid_data = lgb.Dataset(X_val, label=y_val)

    space = {
        "max_depth": hp.choice("max_depth", np.arange(2, 10, 1, dtype=int)),
        # smaller than 2^(max_depth)
        "num_leaves": hp.choice("num_leaves", np.arange(4, 200, 4, dtype=int)),
        "feature_fraction": hp.quniform("feature_fraction", 0.2, 0.8, 0.1),
        # "bagging_fraction": hp.quniform("bagging_fraction", 0.2, 0.8, 0.1),
        # "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 10, 2, dtype=int)),
        # "scale_pos_weight":hp.uniform('scale_pos_weight',1.0, 10.0),
        # "colsample_by_tree":hp.uniform("colsample_bytree",0.5,1.0),
        "min_child_weight": hp.quniform('min_child_weight', 2, 50, 2),
        "reg_alpha": hp.uniform("reg_alpha", 2.0, 8.0),
        "reg_lambda": hp.uniform("reg_lambda", 2.0, 8.0),
        "learning_rate": hp.quniform("learning_rate", 0.05, 0.4, 0.01),
        # "learning_rate": hp.loguniform("learning_rate", np.log(0.04), np.log(0.5)),
        #
        "min_data_in_leaf": hp.choice('min_data_in_leaf', np.arange(200, 2000, 100, dtype=int)),
        #"is_unbalance": hp.choice("is_unbalance", [True])
    }

    def objective(hyperparams):
        model = lgb.train({**params, **hyperparams}, train_data, 300,
                          valid_data, early_stopping_rounds=45, verbose_eval=0)

        score = model.best_score["valid_0"][params["metric"]]

        # in classification, less is better
        return {'loss': -score, 'status': STATUS_OK}

    trials = Trials()
    best = hyperopt.fmin(fn=objective, space=space, trials=trials,
                         algo=tpe.suggest, max_evals=150, verbose=1,
                         rstate=np.random.RandomState(1))

    hyperparams = space_eval(space, best)
    log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
    return hyperparams 
Example #2
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #3
Source File: cartpole_worker.py    From BOAH with Apache License 2.0 5 votes vote down vote up
def tpe_configspace(self):

        import numpy as np
        from hyperopt import hp

        space = {
            'learning_rate': hp.loguniform('learning_rate', np.log(1e-7), np.log(1e-1)),
            'batch_size': hp.qloguniform('batch_size', np.log(8), np.log(256), 1),
            'n_units_1': hp.qloguniform('n_units_1', np.log(8), np.log(128), 1),
            'n_units_2': hp.qloguniform('n_units_2', np.log(8), np.log(128), 1),
            'discount': hp.uniform('discount', 0, 1),
            'likelihood_ratio_clipping': hp.uniform('likelihood_ratio_clipping', 0, 1),
            'entropy_regularization': hp.uniform('entropy_regularization', 0, 1)
        }
        return(space) 
Example #4
Source File: bnn_worker.py    From BOAH with Apache License 2.0 5 votes vote down vote up
def tpe_configspace(self):
        from hyperopt import hp
        import numpy as np
        space = {
            'l_rate': hp.loguniform('l_rate', np.log(1e-6), np.log(1e-1)),
            'burn_in': hp.uniform('burn_in', 0, .8),
            'n_units_1': hp.qloguniform('n_units_1', np.log(16), np.log(512), 1),
            'n_units_2': hp.qloguniform('n_units_2', np.log(16), np.log(512), 1),
            'mdecay': hp.uniform('mdecay', 0, 1)
        }
        return(space) 
Example #5
Source File: test_pyll_util.py    From HPOlib with GNU General Public License v3.0 5 votes vote down vote up
def test_write_loguniform(self):
        c = configuration_space.UniformFloatHyperparameter("c", 0.001, 1, base=np.e)
        expected = ("c", 'param_0 = hp.loguniform("c", -6.90775527898, 0.0)')
        value = self.pyll_writer.write_hyperparameter(c, None)
        self.assertEqual(expected, value) 
Example #6
Source File: test_pyll_util.py    From HPOlib with GNU General Public License v3.0 5 votes vote down vote up
def test_read_loguniform(self):
        # 0 float
        # 1   hyperopt_param
        # 2     Literal{colnorm_thresh}
        # 3     loguniform
        # 4       Literal{-20.7232658369}
        # 5       Literal{-6.90775527898}
        loguniform = hp.loguniform('colnorm_thresh', np.log(1e-9),
            np.log(1e-3)).inputs()[0].inputs()[1]
        ret = self.pyll_reader.read_loguniform(loguniform, 'colnorm_thresh')
        expected = configuration_space.UniformFloatHyperparameter(
            'colnorm_thresh', 1e-9, 1e-3, base=np.e)
        self.assertEqual(expected, ret) 
Example #7
Source File: ch06-01-hopt.py    From kagglebook with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def predict(self, x):
        data = xgb.DMatrix(x)
        pred = self.model.predict(data)
        return pred


# -----------------------------------
# 探索するパラメータの空間の指定
# -----------------------------------
# hp.choiceでは、複数の選択肢から選ぶ
# hp.uniformでは、下限・上限を指定した一様分布から抽出する。引数は下限・上限
# hp.quniformでは、下限・上限を指定した一様分布のうち一定の間隔ごとの点から抽出する。引数は下限・上限・間隔
# hp.loguniformでは、下限・上限を指定した対数が一様分布に従う分布から抽出する。引数は下限・上限の対数をとった値 
Example #8
Source File: hyperbandInterface.py    From helen with MIT License 5 votes vote down vote up
def __init__(self, train_file, test_file, gpu_mode, model_out_dir, log_dir, max_epochs, batch_size, num_workers):
        """
        Initialize the object
        :param train_file: A train CSV file containing train data set information
        :param test_file: A test CSV file containing train data set information
        :param gpu_mode: If true, GPU will be used to train and test the models
        :param model_out_dir: Directory to save the model
        :param log_dir: Directory to save the log
        """
        # the hyper-parameter space is defined here
        self.space = {
            # hp.loguniform returns a value drawn according to exp(uniform(low, high)) so that the logarithm of the
            # return value is uniformly distributed.
            'lr': hp.loguniform('lr', -8, -4),
            'l2': hp.loguniform('l2', -12, -4)
        }
        self.train_file = train_file
        self.test_file = test_file
        self.gpu_mode = gpu_mode
        self.log_directory = log_dir
        self.model_out_dir = model_out_dir
        self.max_epochs = max_epochs
        self.batch_size = batch_size
        self.num_workers = num_workers
        self.hidden_size = TrainOptions.HIDDEN_SIZE
        self.gru_layers = TrainOptions.GRU_LAYERS 
Example #9
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #10
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #11
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #12
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #13
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'L1_flag': hp.choice('L1_flag', [True, False]),
          'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'margin': hp.uniform('margin', 0.0, 2.0),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #14
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'L1_flag': hp.choice('L1_flag', [True, False]),
          'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'margin': hp.uniform('margin', 0.0, 2.0),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #15
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'L1_flag': hp.choice('L1_flag', [True, False]),
          'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(256),1)),
          'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'margin': hp.uniform('margin', 0.0, 2.0),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #16
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'L1_flag': hp.choice('L1_flag', [True, False]),
          'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(64),1)),
          'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(64),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'margin': hp.uniform('margin', 0.0, 2.0),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #17
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'L1_flag': hp.choice('L1_flag', [True, False]),
          'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(256),1)),
          'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'margin': hp.uniform('margin', 0.0, 2.0),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #18
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'L1_flag': hp.choice('L1_flag', [True, False]),
          'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'margin': hp.uniform('margin', 0.0, 2.0),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'bilinear': hp.choice('bilinear', [True, False]),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #19
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'L1_flag': hp.choice('L1_flag', [True, False]),
          'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(128),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'margin': hp.uniform('margin', 0.0, 2.0),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #20
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'L1_flag': hp.choice('L1_flag', [True, False]),
          'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'margin': hp.uniform('margin', 0.0, 2.0),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #21
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'L1_flag': hp.choice('L1_flag', [True, False]),
          'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'margin': hp.uniform('margin', 0.0, 2.0),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
        } 
Example #22
Source File: hyperparams.py    From pykg2vec with MIT License 5 votes vote down vote up
def __init__(self):
        self.search_space = {
          'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
          'L1_flag': hp.choice('L1_flag', [True, False]),
          'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)),
          'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
          'margin': hp.uniform('margin', 0.0, 10.0),
          'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
          'epochs': hp.choice('epochs', [500]) # always choose 10 training epochs.
        } 
Example #23
Source File: automl.py    From KDDCup2019_admin with MIT License 5 votes vote down vote up
def hyperopt_lightgbm(X_train: pd.DataFrame, y_train: pd.Series, params: Dict, config: Config, max_evals=10):
    X_train, X_test, y_train, y_test = data_split_by_time(X_train, y_train, test_size=0.2)
    X_train, X_val, y_train, y_val = data_split_by_time(X_train, y_train, test_size=0.3)
    train_data = lgb.Dataset(X_train, label=y_train)
    valid_data = lgb.Dataset(X_val, label=y_val)

    space = {
        "learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.5)),
        #"max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]),
        "max_depth": hp.choice("max_depth", [1, 2, 3, 4, 5, 6]),
        "num_leaves": hp.choice("num_leaves", np.linspace(10, 200, 50, dtype=int)),
        "feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1),
        "bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1),
        "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 50, 10, dtype=int)),
        "reg_alpha": hp.uniform("reg_alpha", 0, 2),
        "reg_lambda": hp.uniform("reg_lambda", 0, 2),
        "min_child_weight": hp.uniform('min_child_weight', 0.5, 10),
    }

    def objective(hyperparams):
        if config.time_left() < 50:
            return {'status': STATUS_FAIL}
        else:
            model = lgb.train({**params, **hyperparams}, train_data, 100,
                          valid_data, early_stopping_rounds=10, verbose_eval=0)
            pred = model.predict(X_test)
            score = roc_auc_score(y_test, pred)

            #score = model.best_score["valid_0"][params["metric"]]

            # in classification, less is better
            return {'loss': -score, 'status': STATUS_OK}

    trials = Trials()
    best = hyperopt.fmin(fn=objective, space=space, trials=trials,
                         algo=tpe.suggest, max_evals=max_evals, verbose=1,
                         rstate=np.random.RandomState(1))

    hyperparams = space_eval(space, best)
    log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
    return hyperparams 
Example #24
Source File: automl.py    From KDDCup2019_admin with MIT License 4 votes vote down vote up
def hyperopt_lightgbm_basic(X, y, params, config, max_evals=50):
    X_train, X_test, y_train, y_test = data_split_by_time(X, y, test_size=0.2)
    X_train, X_val, y_train, y_val = data_split_by_time(X, y, test_size=0.3)
    train_data = lgb.Dataset(X_train, label=y_train)
    val_data = lgb.Dataset(X_val, label=y_val)

    space = {
        "learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.5)),
        #"forgetting_factor": hp.loguniform("forgetting_factor", 0.01, 0.1)
        #"max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]),
        "max_depth": hp.choice("max_depth", [1, 2, 3, 4, 5, 6]),
        "num_leaves": hp.choice("num_leaves", np.linspace(10, 200, 50, dtype=int)),
        "feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1),
        "bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1),
        "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 50, 10, dtype=int)),
        "reg_alpha": hp.uniform("reg_alpha", 0, 2),
        "reg_lambda": hp.uniform("reg_lambda", 0, 2),
        "min_child_weight": hp.uniform('min_child_weight', 0.5, 10),
    }

    def objective(hyperparams):
        model = lgb.train({**params, **hyperparams}, train_data, 100,
                        val_data, early_stopping_rounds=30, verbose_eval=0)
        pred = model.predict(X_test)
        score = roc_auc_score(y_test, pred)
        return {'loss': -score, 'status': STATUS_OK}

    trials = Trials()
    best = hyperopt.fmin(fn=objective, space=space, trials=trials,
                         algo=tpe.suggest, max_evals=max_evals, verbose=1,
                         rstate=np.random.RandomState(1))

    hyperparams = space_eval(space, best)
    log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
    return hyperparams 
Example #25
Source File: hyperopt_optimizer.py    From bayesmark with Apache License 2.0 4 votes vote down vote up
def get_hyperopt_dimensions(api_config):
        """Help routine to setup hyperopt search space in constructor.

        Take api_config as argument so this can be static.
        """
        # The ordering of iteration prob makes no difference, but just to be
        # safe and consistnent with space.py, I will make sorted.
        param_list = sorted(api_config.keys())

        space = {}
        round_to_values = {}
        for param_name in param_list:
            param_config = api_config[param_name]

            param_type = param_config["type"]

            param_space = param_config.get("space", None)
            param_range = param_config.get("range", None)
            param_values = param_config.get("values", None)

            # Some setup for case that whitelist of values is provided:
            values_only_type = param_type in ("cat", "ordinal")
            if (param_values is not None) and (not values_only_type):
                assert param_range is None
                param_values = np.unique(param_values)
                param_range = (param_values[0], param_values[-1])
                round_to_values[param_name] = interp1d(
                    param_values, param_values, kind="nearest", fill_value="extrapolate"
                )

            if param_type == "int":
                low, high = param_range
                if param_space in ("log", "logit"):
                    space[param_name] = hp.qloguniform(param_name, np.log(low), np.log(high), 1)
                else:
                    space[param_name] = hp.quniform(param_name, low, high, 1)
            elif param_type == "bool":
                assert param_range is None
                assert param_values is None
                space[param_name] = hp.choice(param_name, (False, True))
            elif param_type in ("cat", "ordinal"):
                assert param_range is None
                space[param_name] = hp.choice(param_name, param_values)
            elif param_type == "real":
                low, high = param_range
                if param_space in ("log", "logit"):
                    space[param_name] = hp.loguniform(param_name, np.log(low), np.log(high))
                else:
                    space[param_name] = hp.uniform(param_name, low, high)
            else:
                assert False, "type %s not handled in API" % param_type

        return space, round_to_values 
Example #26
Source File: hyperopt_optimizer.py    From bayesmark with Apache License 2.0 4 votes vote down vote up
def get_hyperopt_dimensions(api_config):
        """Help routine to setup hyperopt search space in constructor.

        Take api_config as argument so this can be static.
        """
        # The ordering of iteration prob makes no difference, but just to be
        # safe and consistnent with space.py, I will make sorted.
        param_list = sorted(api_config.keys())

        space = {}
        round_to_values = {}
        for param_name in param_list:
            param_config = api_config[param_name]

            param_type = param_config["type"]

            param_space = param_config.get("space", None)
            param_range = param_config.get("range", None)
            param_values = param_config.get("values", None)

            # Some setup for case that whitelist of values is provided:
            values_only_type = param_type in ("cat", "ordinal")
            if (param_values is not None) and (not values_only_type):
                assert param_range is None
                param_values = np.unique(param_values)
                param_range = (param_values[0], param_values[-1])
                round_to_values[param_name] = interp1d(
                    param_values, param_values, kind="nearest", fill_value="extrapolate"
                )

            if param_type == "int":
                low, high = param_range
                if param_space in ("log", "logit"):
                    space[param_name] = hp.qloguniform(param_name, np.log(low), np.log(high), 1)
                else:
                    space[param_name] = hp.quniform(param_name, low, high, 1)
            elif param_type == "bool":
                assert param_range is None
                assert param_values is None
                space[param_name] = hp.choice(param_name, (False, True))
            elif param_type in ("cat", "ordinal"):
                assert param_range is None
                space[param_name] = hp.choice(param_name, param_values)
            elif param_type == "real":
                low, high = param_range
                if param_space in ("log", "logit"):
                    space[param_name] = hp.loguniform(param_name, np.log(low), np.log(high))
                else:
                    space[param_name] = hp.uniform(param_name, low, high)
            else:
                assert False, "type %s not handled in API" % param_type

        return space, round_to_values 
Example #27
Source File: lale_hyperopt.py    From lale with Apache License 2.0 4 votes vote down vote up
def visitSearchSpaceNumber(self, space:SearchSpaceNumber, path:str, counter=None):
        label = self.mk_label(path, counter)

        if space.pgo is not None:
            return scope.pgo_sample(space.pgo, hp.quniform(label, 0, len(space.pgo)-1, 1))

        dist = "uniform"
        if space.distribution:
            dist = space.distribution

        if space.maximum is None:
            raise SearchSpaceError(path, f"maximum not specified for a number with distribution {dist}")
        max = space.getInclusiveMax()

        # These distributions need only a maximum
        if dist == "integer":
            if not space.discrete:
                raise SearchSpaceError(path, "integer distribution specified for a non discrete numeric type")
            return hp.randint(label, max)

        if space.minimum is None:
            raise SearchSpaceError(path, f"minimum not specified for a number with distribution {dist}")
        min = space.getInclusiveMin()

        if dist == "uniform":
            if space.discrete:
                return scope.int(hp.quniform(label, min, max, 1))
            else:
                return hp.uniform(label, min, max)
        elif dist == "loguniform":
            # for log distributions, hyperopt requires that we provide the log of the min/max
            if min <= 0:
                raise SearchSpaceError(path, f"minimum of 0 specified with a {dist} distribution.  This is not allowed; please set it (possibly using minimumForOptimizer) to be positive")
            if min > 0:
                min = math.log(min)
            if max > 0:
                max = math.log(max)
            if space.discrete:
                return scope.int(hp.qloguniform(label, min, max, 1))
            else:
                return hp.loguniform(label, min, max)

        else:
            raise SearchSpaceError(path, f"Unknown distribution type: {dist}") 
Example #28
Source File: lale_hyperopt.py    From lale with Apache License 2.0 4 votes vote down vote up
def visitSearchSpaceNumber(self, space:SearchSpaceNumber, path:str, counter=None, useCounter=True):
        label = self.mk_label(path, counter, useCounter=useCounter)

        if space.pgo is not None:
            self.pgo_dict[label] = space.pgo
            return f"scope.pgo_sample(pgo_{label}, hp.quniform('{label}', {0}, {len(space.pgo)-1}, 1))"


        dist = "uniform"
        if space.distribution:
            dist = space.distribution

        if space.maximum is None:
            SearchSpaceError(path, f"maximum not specified for a number with distribution {dist}")
        max = space.getInclusiveMax()

        # These distributions need only a maximum
        if dist == "integer":
            if not space.discrete:
                raise SearchSpaceError(path, "integer distribution specified for a non discrete numeric type....")

            return f"hp.randint('{label}', {max})"

        if space.minimum is None:
            raise SearchSpaceError(path, f"minimum not specified for a number with distribution {dist}")
        min = space.getInclusiveMin()

        if dist == "uniform":
            if space.discrete:
                return f"hp.quniform('{label}', {min}, {max}, 1)"
            else:
                return f"hp.uniform('{label}', {min}, {max})"
        elif dist == "loguniform":
            # for log distributions, hyperopt requires that we provide the log of the min/max
            if min <= 0:
                    raise SearchSpaceError(path, f"minimum of 0 specified with a {dist} distribution.  This is not allowed; please set it (possibly using minimumForOptimizer) to be positive")
            if min > 0:
                min = math.log(min)
            if max > 0:
                max = math.log(max)

            if space.discrete:
                return f"hp.qloguniform('{label}', {min}, {max}, 1)"
            else:
                return f"hp.loguniform('{label}', {min}, {max})"
        else:
            raise SearchSpaceError(path, f"Unknown distribution type: {dist}")