Python hyperopt.STATUS_OK Examples

The following are 30 code examples of hyperopt.STATUS_OK(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module hyperopt , or try the search function .
Example #1
Source File: automl.py    From kddcup2019-automl with MIT License 8 votes vote down vote up
def hyperopt_lightgbm(X: pd.DataFrame, y: pd.Series, params: Dict, config: Config):
    X_train, X_val, y_train, y_val = data_split(X, y, test_size=0.5)
    train_data = lgb.Dataset(X_train, label=y_train)
    valid_data = lgb.Dataset(X_val, label=y_val)

    space = {
        "max_depth": hp.choice("max_depth", np.arange(2, 10, 1, dtype=int)),
        # smaller than 2^(max_depth)
        "num_leaves": hp.choice("num_leaves", np.arange(4, 200, 4, dtype=int)),
        "feature_fraction": hp.quniform("feature_fraction", 0.2, 0.8, 0.1),
        # "bagging_fraction": hp.quniform("bagging_fraction", 0.2, 0.8, 0.1),
        # "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 10, 2, dtype=int)),
        # "scale_pos_weight":hp.uniform('scale_pos_weight',1.0, 10.0),
        # "colsample_by_tree":hp.uniform("colsample_bytree",0.5,1.0),
        "min_child_weight": hp.quniform('min_child_weight', 2, 50, 2),
        "reg_alpha": hp.uniform("reg_alpha", 2.0, 8.0),
        "reg_lambda": hp.uniform("reg_lambda", 2.0, 8.0),
        "learning_rate": hp.quniform("learning_rate", 0.05, 0.4, 0.01),
        # "learning_rate": hp.loguniform("learning_rate", np.log(0.04), np.log(0.5)),
        #
        "min_data_in_leaf": hp.choice('min_data_in_leaf', np.arange(200, 2000, 100, dtype=int)),
        #"is_unbalance": hp.choice("is_unbalance", [True])
    }

    def objective(hyperparams):
        model = lgb.train({**params, **hyperparams}, train_data, 300,
                          valid_data, early_stopping_rounds=45, verbose_eval=0)

        score = model.best_score["valid_0"][params["metric"]]

        # in classification, less is better
        return {'loss': -score, 'status': STATUS_OK}

    trials = Trials()
    best = hyperopt.fmin(fn=objective, space=space, trials=trials,
                         algo=tpe.suggest, max_evals=150, verbose=1,
                         rstate=np.random.RandomState(1))

    hyperparams = space_eval(space, best)
    log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
    return hyperparams 
Example #2
Source File: automl.py    From Kaggler with MIT License 7 votes vote down vote up
def optimize_hyperparam(self, X, y, test_size=.2, n_eval=100):
        X_trn, X_val, y_trn, y_val = train_test_split(X, y, test_size=test_size, shuffle=self.shuffle)

        train_data = lgb.Dataset(X_trn, label=y_trn)
        valid_data = lgb.Dataset(X_val, label=y_val)

        def objective(hyperparams):
            model = lgb.train({**self.params, **hyperparams}, train_data, self.n_est,
                              valid_data, early_stopping_rounds=self.n_stop, verbose_eval=0)

            score = model.best_score["valid_0"][self.metric] * self.loss_sign

            return {'loss': score, 'status': STATUS_OK, 'model': model}

        trials = Trials()
        best = hyperopt.fmin(fn=objective, space=self.space, trials=trials,
                             algo=tpe.suggest, max_evals=n_eval, verbose=1,
                             rstate=self.random_state)

        hyperparams = space_eval(self.space, best)
        return hyperparams, trials 
Example #3
Source File: automl.py    From Kaggler with MIT License 7 votes vote down vote up
def optimize_hyperparam(self, X, y, test_size=.2, n_eval=100):
        X_trn, X_val, y_trn, y_val = train_test_split(X, y, test_size=test_size, shuffle=self.shuffle)

        def objective(hyperparams):
            model = XGBModel(n_estimators=self.n_est, **self.params, **hyperparams)
            model.fit(X=X_trn, y=y_trn,
                      eval_set=[(X_val, y_val)],
                      eval_metric=self.metric,
                      early_stopping_rounds=self.n_stop,
                      verbose=False)
            score = model.evals_result()['validation_0'][self.metric][model.best_iteration] * self.loss_sign

            return {'loss': score, 'status': STATUS_OK, 'model': model}

        trials = Trials()
        best = hyperopt.fmin(fn=objective, space=self.space, trials=trials,
                             algo=tpe.suggest, max_evals=n_eval, verbose=1,
                             rstate=self.random_state)

        hyperparams = space_eval(self.space, best)
        return hyperparams, trials 
Example #4
Source File: topk_voting_classifier.py    From lale with Apache License 2.0 6 votes vote down vote up
def fit(self, X_train, y_train):
        optimizer_instance = self.optimizer(estimator=self.estimator, **self.args_to_optimizer)
        trained_optimizer1 = optimizer_instance.fit(X_train, y_train)
        results = trained_optimizer1.summary()
        results = results[results['status']==STATUS_OK]#Consider only successful trials
        results = results.sort_values(by=['loss'], axis=0)
        k = min(self.k, results.shape[0])
        top_k_pipelines = results.iloc[0:k]
        pipeline_tuples=[]
        for pipeline_name in top_k_pipelines.index:
            pipeline_instance = trained_optimizer1.get_pipeline(pipeline_name)
            pipeline_tuple = (pipeline_name, pipeline_instance)
            pipeline_tuples.append(pipeline_tuple)
        voting = VotingClassifier(estimators=pipeline_tuples)
        args_to_optimizer = copy.copy(self.args_to_optimizer)
        try:
            del args_to_optimizer['max_evals']
        except KeyError:
            pass
        args_to_optimizer['max_evals'] = 1 #Currently, voting classifier has no useful hyperparameters to tune.
        optimizer_instance2 = self.optimizer(estimator=voting, **args_to_optimizer)
        trained_optimizer2 = optimizer_instance2.fit(X_train, y_train)
        self._best_estimator = trained_optimizer2.get_pipeline()
        return self 
Example #5
Source File: model_tta_hyperopt.py    From KagglePlanetPytorch with MIT License 6 votes vote down vote up
def objective(space):
                estimator = XGBClassifier(
                    n_estimators=n_estimators,
                    max_depth=int(space['max_depth']),
                    min_child_weight=int(space['min_child_weight']),
                    gamma=space['gamma'],
                    subsample=space['subsample'],
                    colsample_bytree=space['colsample_bytree']
                )

                estimator.fit(
                    x_train,
                    y_train,
                    eval_set=[(x_train, y_train), (x_val, y_val)],
                    early_stopping_rounds=30,
                    verbose=False,
                    eval_metric='error'
                )

                score = accuracy_score(y_val, estimator.predict(x_val))

                return {'loss': 1 - score, 'status': STATUS_OK} 
Example #6
Source File: test_e2e.py    From hyperas with MIT License 6 votes vote down vote up
def ensemble_model(X_train, X_test, Y_train, Y_test):
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([400, 512, 600])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    nb_epoch = 10
    batch_size = 128

    model.fit(X_train, Y_train,
              batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=2,
              validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model} 
Example #7
Source File: test_e2e.py    From hyperas with MIT License 6 votes vote down vote up
def model(X_train, Y_train, X_test, Y_test):
    model = Sequential()
    model.add(Dense(50, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([20, 30, 40])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              epochs=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model} 
Example #8
Source File: test_lr_plateau.py    From hyperas with MIT License 6 votes vote down vote up
def create_model(x_train, y_train, x_test, y_test):
    model = Sequential()
    model.add(Dense(44, input_shape=(784,)))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dense(44))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dense(10))

    model.compile(loss='mae', metrics=['mse'], optimizer="adam")

    es = EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=10)
    rlr = ReduceLROnPlateau(factor=0.1, patience=10)
    _ = model.fit(x_train, y_train, epochs=1, verbose=0, callbacks=[es, rlr],
                  batch_size=24, validation_data=(x_test, y_test))

    mae, mse = model.evaluate(x_test, y_test, verbose=0)
    print('MAE:', mae)
    return {'loss': mae, 'status': STATUS_OK, 'model': model} 
Example #9
Source File: test_functional_api.py    From hyperas with MIT License 6 votes vote down vote up
def model(X_train, Y_train, X_test, Y_test):
    inputs = Input(shape=(784,))

    x = Dense({{choice([20, 30, 40])}}, activation='relu')(inputs)
    x = Dense(64, activation='relu')(x)
    predictions = Dense(10, activation='softmax')(x)
    model = Model(inputs=inputs, outputs=predictions)

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              epochs=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model} 
Example #10
Source File: mnist_ensemble.py    From hyperas with MIT License 6 votes vote down vote up
def model(X_train, X_test, Y_train, Y_test):
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([400, 512, 600])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    nb_epoch = 10
    batch_size = 128

    model.fit(X_train, Y_train,
              batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=2,
              validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model} 
Example #11
Source File: testh.py    From deep with Apache License 2.0 6 votes vote down vote up
def model(X_train, Y_train, X_test, Y_test):
    model = Sequential()
    model.add(Dense({{choice([15, 512, 1024])}},input_dim=8,init='uniform', activation='softplus'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'sigmoid','softplus'])}}))
    model.add(Dropout({{uniform(0, 1)}}))
    
    model.add(Dense(1, init='uniform', activation='sigmoid'))

    model.compile(loss='mse', metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(X_train, Y_train,
              batch_size={{choice([10, 50, 100])}},
              nb_epoch={{choice([1, 50])}},
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model} 
Example #12
Source File: hyperoptim.py    From scgen with GNU General Public License v3.0 6 votes vote down vote up
def create_model(x_train):
    network = scgen.VAEArith(x_dimension=x_train.X.shape[1],
                             z_dimension={{choice([10, 20, 50, 75, 100])}},
                             learning_rate={{choice([0.1, 0.01, 0.001, 0.0001])}},
                             alpha={{choice([0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001])}},
                             dropout_rate={{choice([0.2, 0.25, 0.5, 0.75, 0.8])}},
                             model_path=f"./")

    result = network.train(x_train,
                           n_epochs={{choice([100, 150, 200, 250])}},
                           batch_size={{choice([32, 64, 128, 256])}},
                           verbose=2,
                           shuffle=True,
                           save=False)
    best_loss = np.amin(result.history['loss'])
    print('Best Loss of model:', best_loss)
    return {'loss': best_loss, 'status': STATUS_OK, 'model': network.vae_model} 
Example #13
Source File: hyperparam_search.py    From robotics-rl-srl with MIT License 6 votes vote down vote up
def run(self):
        trials = hyperopt.Trials()
        hyperopt.fmin(fn=lambda kwargs: {'loss': self.train(kwargs), 'status': hyperopt.STATUS_OK},
                      space=self.search_space,
                      algo=hyperopt.tpe.suggest,
                      max_evals=self.num_eval,
                      trials=trials,
                      verbose=10)

        # from the trials, get the values for every parameter
        # set the number of iter to None as they are not changed in Hyperopt
        # and zip the loss
        self.history.extend(zip([(
            {name: val[0] for name, val in params["misc"]["vals"].items()}, None)
            for params in trials.trials], trials.losses()))
        return self.history[int(np.argmin([val[1] for val in self.history]))] 
Example #14
Source File: neural_network.py    From PES-Learn with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def hyperopt_model(self, params):
        """
        A Hyperopt-friendly wrapper for build_model
        """
        # skip building this model if hyperparameter combination already attempted
        for i in self.hyperopt_trials.results:
            if 'memo' in i:
                if params == i['memo']:
                    return {'loss': i['loss'], 'status': STATUS_OK, 'memo': 'repeat'}
        if self.itercount > self.hp_maxit:
            return {'loss': 0.0, 'status': STATUS_FAIL, 'memo': 'max iters reached'}
        error_test, error_valid = self.build_model(params)
        self.itercount += 1
        if np.isnan(error_valid):
            return {'loss': 1e5, 'status': STATUS_FAIL, 'memo': 'nan'}
        else:
            return {'loss': error_valid, 'status': STATUS_OK, 'memo': params} 
Example #15
Source File: ch06-01-hopt.py    From kagglebook with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def score(params):
    # パラメータを与えたときに最小化する評価指標を指定する
    # 具体的には、モデルにパラメータを指定して学習・予測させた場合のスコアを返すようにする

    # max_depthの型を整数型に修正する
    params['max_depth'] = int(params['max_depth'])

    # Modelクラスを定義しているものとする
    # Modelクラスは、fitで学習し、predictで予測値の確率を出力する
    model = Model(params)
    model.fit(tr_x, tr_y, va_x, va_y)
    va_pred = model.predict(va_x)
    score = log_loss(va_y, va_pred)
    print(f'params: {params}, logloss: {score:.4f}')

    # 情報を記録しておく
    history.append((params, score))

    return {'loss': score, 'status': STATUS_OK}


# 探索するパラメータの空間を指定する 
Example #16
Source File: ch06-03-hopt_nn.py    From kagglebook with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def score(params):
    # パラメータセットを指定したときに最小化すべき関数を指定する
    # モデルのパラメータ探索においては、モデルにパラメータを指定して学習・予測させた場合のスコアとする
    model = MLP(params)
    model.fit(tr_x, tr_y, va_x, va_y)
    va_pred = model.predict(va_x)
    score = log_loss(va_y, va_pred)
    print(f'params: {params}, logloss: {score:.4f}')

    # 情報を記録しておく
    history.append((params, score))

    return {'loss': score, 'status': STATUS_OK}


# hyperoptによるパラメータ探索の実行 
Example #17
Source File: optimize_parameters.py    From facial-expression-recognition-svm with GNU General Public License v3.0 6 votes vote down vote up
def function_to_minimize(hyperparams, gamma='auto', decision_function='ovr'):
    decision_function = hyperparams['decision_function']
    gamma = hyperparams['gamma']
    global current_eval 
    global max_evals
    print( "#################################")
    print( "       Evaluation {} of {}".format(current_eval, max_evals))
    print( "#################################")
    start_time = time.time()
    try:
        accuracy = train(epochs=HYPERPARAMS.epochs_during_hyperopt, decision_function=decision_function, gamma=gamma)
        training_time = int(round(time.time() - start_time))
        current_eval += 1
        train_history.append({'accuracy':accuracy, 'decision_function':decision_function, 'gamma':gamma, 'time':training_time})
    except Exception as e:
        print( "#################################")
        print( "Exception during training: {}".format(str(e)))
        print( "Saving train history in train_history.npy")
        np.save("train_history.npy", train_history)
        exit()
    return {'loss': -accuracy, 'time': training_time, 'status': STATUS_OK}

# lunch the hyperparameters search 
Example #18
Source File: bot.py    From ebisu with MIT License 6 votes vote down vote up
def params_search(self):
        """
 ˜      function to search params
        """
        def objective(args):
            logger.info(f"Params : {args}")
            try:
                self.params = args
                self.exchange = BitMexBackTest()
                self.exchange.on_update(self.bin_size, self.strategy)
                profit_factor = self.exchange.win_profit/self.exchange.lose_loss
                logger.info(f"Profit Factor : {profit_factor}")
                ret = {
                    'status': STATUS_OK,
                    'loss': 1/profit_factor
                }
            except Exception as e:
                ret = {
                    'status': STATUS_FAIL
                }

            return ret

        trials = Trials()
        best_params = fmin(objective, self.options(), algo=tpe.suggest, trials=trials, max_evals=200)
        logger.info(f"Best params is {best_params}")
        logger.info(f"Best profit factor is {1/trials.best_trial['result']['loss']}") 
Example #19
Source File: task.py    From kaggle-HomeDepot with MIT License 6 votes vote down vote up
def _obj(self, param_dict):
        self.trial_counter += 1
        param_dict = self.model_param_space._convert_int_param(param_dict)
        learner = Learner(self.learner_name, param_dict)
        suffix = "_[Id@%s]"%str(self.trial_counter)
        if self.task_mode == "single":
            self.task = Task(learner, self.feature, suffix, self.logger, self.verbose, self.plot_importance)
        elif self.task_mode == "stacking":
            self.task = StackingTask(learner, self.feature, suffix, self.logger, self.verbose, self.refit_once)
        self.task.go()
        ret = {
            "loss": self.task.rmse_cv_mean,
            "attachments": {
                "std": self.task.rmse_cv_std,
            },
            "status": STATUS_OK,
        }
        return ret 
Example #20
Source File: run_tpe.py    From nas_benchmarks with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def objective(x):
    config = deepcopy(x)
    for h in cs.get_hyperparameters():
        if type(h) == ConfigSpace.hyperparameters.OrdinalHyperparameter:
            
            config[h.name] = h.sequence[int(x[h.name])]

        elif type(h) == ConfigSpace.hyperparameters.UniformIntegerHyperparameter:

            config[h.name] = int(x[h.name])
    y, c = b.objective_function(config)

    return {
        'config': config,
        'loss': y,
        'cost': c,
        'status': STATUS_OK} 
Example #21
Source File: model_train.py    From emnlp2017-relation-extraction with Apache License 2.0 5 votes vote down vote up
def f_train(params):
    model = getattr(keras_models, model_name)(params, embedding_matrix, max_sent_len, n_out)
    callback_history = model.fit(train_as_indices[:-1],
                                 [train_y_properties_one_hot],
                                 epochs=20, batch_size=keras_models.model_params['batch_size'], verbose=1,
                                 validation_data=(
                                     val_as_indices[:-1], val_y_properties_one_hot),
                                 callbacks=[callbacks.EarlyStopping(monitor="val_loss", patience=1, verbose=1)])

    predictions = model.predict(val_as_indices[:-1], batch_size=16, verbose=1)
    predictions_classes = np.argmax(predictions, axis=1)
    _, _, acc = metrics.compute_micro_PRF(predictions_classes, val_as_indices[-1])
    return {'loss': -acc, 'status': hy.STATUS_OK} 
Example #22
Source File: base_worker.py    From BOAH with Apache License 2.0 5 votes vote down vote up
def run_tpe(self, num_iterations):
        """
            Wrapper around TPE to return a HpBandSter Result object to integrate better with the other methods
        """
        try:
            from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
        except ImportError:
            raise ImportError('To run TPE, please install the hyperopt package!')
        except:
            raise

        def tpe_objective(config):
            loss = self.evaluate_and_log(config, budget=self.max_budget)
            return({    'config': config,
                        'loss': loss,
                        'status': STATUS_OK})




        space = self.tpe_configspace()
        trials = Trials()
        best = fmin(tpe_objective,
                space=space,
                algo=tpe.suggest,
                max_evals=num_iterations,
                trials=trials)
        return(self.get_result()) 
Example #23
Source File: tuning.py    From SGC with MIT License 5 votes vote down vote up
def sgc_objective(space):
    model = get_model(args.model, features.size(1), labels.max().item()+1, args.hidden, args.dropout, args.cuda)
    model, acc_val, _ = train_regression(model, features[idx_train], labels[idx_train], features[idx_val], labels[idx_val],
                                      args.epochs, space['weight_decay'], args.lr, args.dropout)
    print('weight decay: {:.2e} '.format(space['weight_decay']) + 'accuracy: {:.4f}'.format(acc_val))
    return {'loss': -acc_val, 'status': STATUS_OK} 
Example #24
Source File: tuning.py    From SGC with MIT License 5 votes vote down vote up
def linear_objective(space):
    model = get_model(args.model, nfeat=feat_dict["train"].size(1),
                      nclass=nclass,
                      nhid=0, dropout=0, cuda=args.cuda)
    val_acc, _, _ = train_linear(model, feat_dict, space['weight_decay'], args.dataset=="mr")
    print( 'weight decay ' + str(space['weight_decay']) + '\n' + \
          'overall accuracy: ' + str(val_acc))
    return {'loss': -val_acc, 'status': STATUS_OK}

# Hyperparameter optimization 
Example #25
Source File: average.py    From fnc-1 with Apache License 2.0 5 votes vote down vote up
def hyperopt_wrapper(param):
    
    print "++++++++++++++++++++++++++++++"
    for k, v in sorted(param.items()):
        print "%s: %s" % (k,v)

    loss = stack_cv(param)
    print "-cost: ", loss

    return {'loss': loss, 'status': STATUS_OK} 
Example #26
Source File: hyopt.py    From kopt with MIT License 5 votes vote down vote up
def best_trial_tid(self, rank=0):
        """Get tid of the best trial

        rank=0 means the best model
        rank=1 means second best
        ...
        """
        candidates = [t for t in self.trials
                      if t['result']['status'] == STATUS_OK]
        if len(candidates) == 0:
            return None
        losses = [float(t['result']['loss']) for t in candidates]
        assert not np.any(np.isnan(losses))
        lid = np.where(np.argsort(losses).argsort() == rank)[0][0]
        return candidates[lid]["tid"] 
Example #27
Source File: hp_search.py    From Scene-Classification with MIT License 5 votes vote down vote up
def create_model(train_generator, validation_generator):
    l2_reg = regularizers.l2({{loguniform(log(1e-6), log(1e-2))}})
    base_model = InceptionResNetV2(weights='imagenet', include_top=False)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dropout({{uniform(0, 1)}})(x)
    x = Dense(1024, activation='relu', kernel_regularizer=l2_reg, activity_regularizer=l2_reg)(x)
    x = Dropout({{uniform(0, 1)}})(x)
    predictions = Dense(num_classes, activation='softmax', kernel_regularizer=l2_reg, activity_regularizer=l2_reg)(x)
    model = Model(inputs=base_model.input, outputs=predictions)

    model_weights_path = os.path.join('models', best_model)
    model.load_weights(model_weights_path)

    for i in range(int(len(base_model.layers) * {{uniform(0, 1)}})):
        layer = base_model.layers[i]
        layer.trainable = False

    adam = keras.optimizers.Adam(lr={{loguniform(log(1e-6), log(1e-3))}})
    model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=adam)

    # print(model.summary())

    model.fit_generator(
        train_generator,
        steps_per_epoch=num_train_samples // batch_size,
        validation_data=validation_generator,
        validation_steps=num_valid_samples // batch_size)

    score, acc = model.evaluate_generator(validation_generator)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model} 
Example #28
Source File: automl.py    From KDDCup2019_admin with MIT License 5 votes vote down vote up
def hyperopt_lightgbm(X_train: pd.DataFrame, y_train: pd.Series, params: Dict, config: Config, max_evals=10):
    X_train, X_test, y_train, y_test = data_split_by_time(X_train, y_train, test_size=0.2)
    X_train, X_val, y_train, y_val = data_split_by_time(X_train, y_train, test_size=0.3)
    train_data = lgb.Dataset(X_train, label=y_train)
    valid_data = lgb.Dataset(X_val, label=y_val)

    space = {
        "learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.5)),
        #"max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]),
        "max_depth": hp.choice("max_depth", [1, 2, 3, 4, 5, 6]),
        "num_leaves": hp.choice("num_leaves", np.linspace(10, 200, 50, dtype=int)),
        "feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1),
        "bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1),
        "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 50, 10, dtype=int)),
        "reg_alpha": hp.uniform("reg_alpha", 0, 2),
        "reg_lambda": hp.uniform("reg_lambda", 0, 2),
        "min_child_weight": hp.uniform('min_child_weight', 0.5, 10),
    }

    def objective(hyperparams):
        if config.time_left() < 50:
            return {'status': STATUS_FAIL}
        else:
            model = lgb.train({**params, **hyperparams}, train_data, 100,
                          valid_data, early_stopping_rounds=10, verbose_eval=0)
            pred = model.predict(X_test)
            score = roc_auc_score(y_test, pred)

            #score = model.best_score["valid_0"][params["metric"]]

            # in classification, less is better
            return {'loss': -score, 'status': STATUS_OK}

    trials = Trials()
    best = hyperopt.fmin(fn=objective, space=space, trials=trials,
                         algo=tpe.suggest, max_evals=max_evals, verbose=1,
                         rstate=np.random.RandomState(1))

    hyperparams = space_eval(space, best)
    log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
    return hyperparams 
Example #29
Source File: bayesian_optimizer.py    From pykg2vec with MIT License 5 votes vote down vote up
def _get_loss(self, params):
        """Function that defines and acquires the loss"""
        
        # copy the hyperparameters to trainer config and hyperparameter set. 
        for key, value in params.items():
          self.config_local.__dict__[key] = value

        model = self.model_obj(**self.config_local.__dict__)

        self.trainer = Trainer(model, self.config_local)

        # configure common setting for a tuning training. 
        self.config_local.disp_result = False
        self.config_local.disp_summary = False
        self.config_local.save_model = False

        # do not overwrite test numbers if set
        if self.config_local.test_num is None:
            self.config_local.test_num = 1000

        if self.kge_args.debug:
            self.config_local.epochs = 1

        # start the trial.
        self.trainer.build_model()
        loss = self.trainer.tune_model()

        return {'loss': loss, 'status': STATUS_OK} 
Example #30
Source File: cnn_lstm.py    From hyperas with MIT License 5 votes vote down vote up
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
    embedding_size = 300
    pool_length = 4
    lstm_output_size = 100
    batch_size = 200
    nb_epoch = 1

    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Dropout({{uniform(0, 1)}}))
    # Note that we use unnamed parameters here, which is bad style, but is used here
    # to demonstrate that it works. Always prefer named parameters.
    model.add(Convolution1D({{choice([64, 128])}},
                            {{choice([6, 8])}},
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)

    print('Test score:', score)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}