Python sklearn.gaussian_process.GaussianProcessRegressor() Examples

The following are 30 code examples of sklearn.gaussian_process.GaussianProcessRegressor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sklearn.gaussian_process , or try the search function .
Example #1
Source File: test_sklearn_gaussian_process.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_gpr_rbf_fitted_return_std_exp_sine_squared_double_true(self):

        gp = GaussianProcessRegressor(kernel=ExpSineSquared(),
                                      alpha=1e-7,
                                      n_restarts_optimizer=15,
                                      normalize_y=True)
        gp.fit(Xtrain_, Ytrain_)

        # return_cov=False, return_std=False
        options = {GaussianProcessRegressor: {"return_std": True}}
        gp.predict(Xtrain_, return_std=True)
        model_onnx = to_onnx(
            gp, initial_types=[('X', DoubleTensorType([None, None]))],
            options=options, dtype=np.float64,
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            Xtest_.astype(np.float64), gp, model_onnx,
            verbose=False,
            basename="SklearnGaussianProcessExpSineSquaredStdDouble-Out0-Dec4")
        self.check_outputs(gp, model_onnx, Xtest_.astype(np.float64),
                           predict_attributes=options[
                             GaussianProcessRegressor],
                           decimal=4) 
Example #2
Source File: test_sklearn_grid_search_cv_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_grid_search_gaussian_regressor_double(self):
        tuned_parameters = [{'alpha': np.logspace(-4, -0.5, 4)}]
        clf = GridSearchCV(GaussianProcessRegressor(),
                           tuned_parameters, cv=3)
        model, X = fit_regression_model(clf)
        model_onnx = convert_sklearn(
            model, "GridSearchCV",
            [("input", DoubleTensorType([None, X.shape[1]]))],
            dtype=np.float64)
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X.astype(np.float64),
            model,
            model_onnx,
            basename="SklearnGridSearchGaussianRegressionDouble"
                     "-OneOffArray-Dec4",
            allow_failure="StrictVersion("
            "onnxruntime.__version__) "
            "<= StrictVersion('0.4.0') or "
            "StrictVersion(onnx.__version__) "
            "== StrictVersion('1.4.1')",
        ) 
Example #3
Source File: test_gaussian_process.py    From pandas-ml with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_objectmapper(self):
        df = pdml.ModelFrame([])
        dgp = df.gaussian_process

        self.assertIs(dgp.GaussianProcessClassifier,
                      gp.GaussianProcessClassifier)
        self.assertIs(dgp.GaussianProcessRegressor,
                      gp.GaussianProcessRegressor)
        self.assertIs(dgp.correlation_models.absolute_exponential,
                      gp.correlation_models.absolute_exponential)
        self.assertIs(dgp.correlation_models.squared_exponential,
                      gp.correlation_models.squared_exponential)
        self.assertIs(dgp.correlation_models.generalized_exponential,
                      gp.correlation_models.generalized_exponential)
        self.assertIs(dgp.correlation_models.pure_nugget,
                      gp.correlation_models.pure_nugget)
        self.assertIs(dgp.correlation_models.cubic,
                      gp.correlation_models.cubic)
        self.assertIs(dgp.correlation_models.linear,
                      gp.correlation_models.linear) 
Example #4
Source File: test_sklearn_grid_search_cv_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_grid_search_gaussian_regressor_float(self):
        tuned_parameters = [{'alpha': np.logspace(-4, -0.5, 4)}]
        clf = GridSearchCV(GaussianProcessRegressor(),
                           tuned_parameters, cv=5)
        model, X = fit_regression_model(clf)
        model_onnx = convert_sklearn(
            model, "GridSearchCV",
            [("input", FloatTensorType([None, X.shape[1]]))])
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            basename="SklearnGridSearchGaussianRegressionFloat"
                     "-OneOffArray-Dec4",
            allow_failure="StrictVersion("
            "onnxruntime.__version__) "
            "<= StrictVersion('0.4.0') or "
            "StrictVersion(onnx.__version__) "
            "== StrictVersion('1.4.1')",
        ) 
Example #5
Source File: CreateModel.py    From nni with MIT License 6 votes vote down vote up
def create_model(samples_x, samples_y_aggregation,
                 n_restarts_optimizer=250, is_white_kernel=False):
    '''
    Trains GP regression model
    '''
    kernel = gp.kernels.ConstantKernel(constant_value=1,
                                       constant_value_bounds=(1e-12, 1e12)) * \
                                                gp.kernels.Matern(nu=1.5)
    if is_white_kernel is True:
        kernel += gp.kernels.WhiteKernel(noise_level=1, noise_level_bounds=(1e-12, 1e12))
    regressor = gp.GaussianProcessRegressor(kernel=kernel,
                                            n_restarts_optimizer=n_restarts_optimizer,
                                            normalize_y=True,
                                            alpha=1e-10)
    regressor.fit(numpy.array(samples_x), numpy.array(samples_y_aggregation))

    model = {}
    model['model'] = regressor
    model['kernel_prior'] = str(kernel)
    model['kernel_posterior'] = str(regressor.kernel_)
    model['model_loglikelihood'] = regressor.log_marginal_likelihood(regressor.kernel_.theta)

    return model 
Example #6
Source File: test_gpr.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_K_inv_reset(kernel):
    y2 = f(X2).ravel()

    # Test that self._K_inv is reset after a new fit
    gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
    assert hasattr(gpr, '_K_inv')
    assert gpr._K_inv is None
    gpr.predict(X, return_std=True)
    assert gpr._K_inv is not None
    gpr.fit(X2, y2)
    assert gpr._K_inv is None
    gpr.predict(X2, return_std=True)
    gpr2 = GaussianProcessRegressor(kernel=kernel).fit(X2, y2)
    gpr2.predict(X2, return_std=True)
    # the value of K_inv should be independent of the first fit
    assert_array_equal(gpr._K_inv, gpr2._K_inv) 
Example #7
Source File: test_gpr.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_duplicate_input(kernel):
    # Test GPR can handle two different output-values for the same input.
    gpr_equal_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
    gpr_similar_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)

    X_ = np.vstack((X, X[0]))
    y_ = np.hstack((y, y[0] + 1))
    gpr_equal_inputs.fit(X_, y_)

    X_ = np.vstack((X, X[0] + 1e-15))
    y_ = np.hstack((y, y[0] + 1))
    gpr_similar_inputs.fit(X_, y_)

    X_test = np.linspace(0, 10, 100)[:, None]
    y_pred_equal, y_std_equal = \
        gpr_equal_inputs.predict(X_test, return_std=True)
    y_pred_similar, y_std_similar = \
        gpr_similar_inputs.predict(X_test, return_std=True)

    assert_almost_equal(y_pred_equal, y_pred_similar)
    assert_almost_equal(y_std_equal, y_std_similar) 
Example #8
Source File: test_sklearn_gaussian_process.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_gpr_fitted_partial_float64(self):
        data = load_iris()
        X = data.data
        y = data.target
        X_train, X_test, y_train, y_test = train_test_split(X, y)
        gp = GaussianProcessRegressor(kernel=DotProduct(), alpha=10.)
        gp.fit(X_train, y_train)

        model_onnx = to_onnx(
            gp, initial_types=[('X', FloatTensorType([None, None]))])
        self.assertTrue(model_onnx is not None)
        try:
            self.check_outputs(gp, model_onnx, X_test.astype(np.float32), {})
        except AssertionError as e:
            assert "Max relative difference:" in str(e)

        model_onnx = to_onnx(
            gp, initial_types=[('X', DoubleTensorType([None, None]))],
            dtype=np.float64)
        self.assertTrue(model_onnx is not None)
        self.check_outputs(gp, model_onnx, X_test, {}) 
Example #9
Source File: test_gpr.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_custom_optimizer(kernel):
    # Test that GPR can use externally defined optimizers.
    # Define a dummy optimizer that simply tests 50 random hyperparameters
    def optimizer(obj_func, initial_theta, bounds):
        rng = np.random.RandomState(0)
        theta_opt, func_min = \
            initial_theta, obj_func(initial_theta, eval_gradient=False)
        for _ in range(50):
            theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
                                              np.minimum(1, bounds[:, 1])))
            f = obj_func(theta, eval_gradient=False)
            if f < func_min:
                theta_opt, func_min = theta, f
        return theta_opt, func_min

    gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
    gpr.fit(X, y)
    # Checks that optimizer improved marginal likelihood
    assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
                   gpr.log_marginal_likelihood(gpr.kernel.theta)) 
Example #10
Source File: test_sklearn_gaussian_process.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_gpr_rbf_fitted_return_std_rational_quadratic_true(self):

        gp = GaussianProcessRegressor(kernel=RationalQuadratic(),
                                      alpha=1e-7,
                                      n_restarts_optimizer=15,
                                      normalize_y=True)
        gp.fit(Xtrain_, Ytrain_)
        gp.predict(Xtrain_, return_std=True)

        # return_cov=False, return_std=False
        options = {GaussianProcessRegressor: {"return_std": True}}
        model_onnx = to_onnx(
            gp, initial_types=[('X', DoubleTensorType([None, None]))],
            options=options, dtype=np.float64,
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            Xtest_.astype(np.float64), gp, model_onnx,
            basename="SklearnGaussianProcessRationalQuadraticStdDouble-Out0")
        self.check_outputs(gp, model_onnx, Xtest_.astype(np.float64),
                           predict_attributes=options[
                             GaussianProcessRegressor]) 
Example #11
Source File: test_sklearn_gaussian_process.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_gpr_rbf_fitted_return_std_dot_product_true(self):

        gp = GaussianProcessRegressor(kernel=DotProduct(),
                                      alpha=1.,
                                      n_restarts_optimizer=15,
                                      normalize_y=True)
        gp.fit(Xtrain_, Ytrain_)
        gp.predict(Xtrain_, return_std=True)

        # return_cov=False, return_std=False
        options = {GaussianProcessRegressor: {"return_std": True}}
        model_onnx = to_onnx(
            gp, initial_types=[('X', DoubleTensorType([None, None]))],
            options=options, dtype=np.float64,
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            Xtest_.astype(np.float64), gp, model_onnx,
            basename="SklearnGaussianProcessDotProductStdDouble-Out0-Dec3")
        self.check_outputs(gp, model_onnx, Xtest_.astype(np.float64),
                           predict_attributes=options[
                             GaussianProcessRegressor],
                           decimal=3) 
Example #12
Source File: test_sklearn_gaussian_process.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_gpr_rbf_fitted_return_std_exp_sine_squared_true(self):

        gp = GaussianProcessRegressor(kernel=ExpSineSquared(),
                                      alpha=1e-7,
                                      n_restarts_optimizer=15,
                                      normalize_y=True)
        gp.fit(Xtrain_, Ytrain_)

        # return_cov=False, return_std=False
        options = {GaussianProcessRegressor: {"return_std": True}}
        gp.predict(Xtrain_, return_std=True)
        model_onnx = to_onnx(
            gp, initial_types=[('X', DoubleTensorType([None, None]))],
            options=options, dtype=np.float64,
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            Xtest_.astype(np.float64), gp, model_onnx,
            verbose=False,
            basename="SklearnGaussianProcessExpSineSquaredStdT-Out0-Dec3")
        self.check_outputs(gp, model_onnx, Xtest_.astype(np.float64),
                           predict_attributes=options[
                             GaussianProcessRegressor],
                           decimal=4) 
Example #13
Source File: causal_mechanisms.py    From CausalDiscoveryToolbox with MIT License 6 votes vote down vote up
def mechanism(self, x):
        """Mechanism function."""
        self.nb_step += 1
        x = np.reshape(x, (x.shape[0], 1))

        if(self.nb_step < 5):
            cov = computeGaussKernel(x)
            mean = np.zeros((1, self.points))[0, :]
            y = np.random.multivariate_normal(mean, cov)
        elif(self.nb_step == 5):
            cov = computeGaussKernel(x)
            mean = np.zeros((1, self.points))[0, :]
            y = np.random.multivariate_normal(mean, cov)
            self.gpr = GaussianProcessRegressor()
            self.gpr.fit(x, y)
            y = self.gpr.predict(x)
        else:
            y = self.gpr.predict(x)

        return y 
Example #14
Source File: causal_mechanisms.py    From CausalDiscoveryToolbox with MIT License 6 votes vote down vote up
def mechanism(self, x):
        """Mechanism function."""
        self.nb_step += 1
        x = np.reshape(x, (x.shape[0], x.shape[1]))

        if(self.nb_step < 2):
            cov = computeGaussKernel(x)
            mean = np.zeros((1, self.points))[0, :]
            y = np.random.multivariate_normal(mean, cov)
        elif(self.nb_step == 2):
            cov = computeGaussKernel(x)
            mean = np.zeros((1, self.points))[0, :]
            y = np.random.multivariate_normal(mean, cov)
            self.gpr = GaussianProcessRegressor()
            self.gpr.fit(x, y)
            y = self.gpr.predict(x)
        else:
            y = self.gpr.predict(x)

        return y 
Example #15
Source File: Bivariate_fit.py    From CausalDiscoveryToolbox with MIT License 6 votes vote down vote up
def b_fit_score(self, x, y):
        """ Computes the cds statistic from variable 1 to variable 2

        Args:
            a (numpy.ndarray): Variable 1
            b (numpy.ndarray): Variable 2

        Returns:
            float: BF fit score
        """
        x = np.reshape(scale(x), (-1, 1))
        y = np.reshape(scale(y), (-1, 1))
        gp = GaussianProcessRegressor().fit(x, y)
        y_predict = gp.predict(x)
        error = mean_squared_error(y_predict, y)

        return error 
Example #16
Source File: gaussianproc.py    From pyFTS with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, **kwargs):
        super(GPR, self).__init__(**kwargs)
        self.name = "GPR"
        self.detail = "Gaussian Process Regression"
        self.is_high_order = True
        self.has_point_forecasting = True
        self.has_interval_forecasting = True
        self.has_probability_forecasting = True
        self.uod_clip = False
        self.benchmark_only = True
        self.min_order = 1
        self.alpha = kwargs.get("alpha", 0.05)
        self.data = None

        self.lscale = kwargs.get('length_scale', 1)

        self.kernel = ConstantKernel(1.0) * RBF(length_scale=self.lscale)
        self.model = GaussianProcessRegressor(kernel=self.kernel, alpha=.05,
                                      n_restarts_optimizer=10,
                                      normalize_y=False)
        #self.model_fit = None 
Example #17
Source File: models.py    From mlearn with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, describer, kernel_category='RBF', restarts=10, **kwargs):
        """

        Args:
            describer (Describer): Describer to convert
                input object to descriptors.
            kernel_category (str): Name of kernel from
                sklearn.gaussian_process.kernels. Default to 'RBF', i.e.,
                squared exponential.
            restarts (int): The number of restarts of the optimizer for
                finding the kernel’s parameters which maximize the
                log-marginal likelihood.
            kwargs: kwargs to be passed to kernel object, e.g. length_scale,
                length_scale_bounds.
        """
        self.describer = describer
        kernel = getattr(kernels, kernel_category)(**kwargs)
        self.model = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=restarts)
        self._xtrain = None
        self._xtest = None 
Example #18
Source File: test_gpr.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_predict_cov_vs_std():
    # Test that predicted std.-dev. is consistent with cov's diagonal.
    for kernel in kernels:
        gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
        y_mean, y_cov = gpr.predict(X2, return_cov=True)
        y_mean, y_std = gpr.predict(X2, return_std=True)
        assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std) 
Example #19
Source File: test_sklearn_gaussian_process.py    From sklearn-onnx with MIT License 5 votes vote down vote up
def test_gpr_fitted_partial_float64_operator_cdist_quad(self):
        data = load_iris()
        X = data.data
        y = data.target
        X_train, X_test, y_train, y_test = train_test_split(X, y)
        gp = GaussianProcessRegressor(kernel=RationalQuadratic(), alpha=100.)
        gp.fit(X_train, y_train)

        try:
            to_onnx(
                gp, initial_types=[('X', FloatTensorType([None, None]))],
                options={GaussianProcessRegressor: {'optim': 'CDIST'}},
                target_opset=TARGET_OPSET)
            raise AssertionError("CDIST is not implemented")
        except ValueError:
            pass

        model_onnx = to_onnx(
            gp, initial_types=[('X', FloatTensorType([None, None]))],
            options={GaussianProcessRegressor: {'optim': 'cdist'}},
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        name_save = inspect.currentframe().f_code.co_name + '.onnx'
        with open(name_save, 'wb') as f:
            f.write(model_onnx.SerializeToString())
        try:
            self.check_outputs(gp, model_onnx, X_test.astype(np.float32), {})
        except RuntimeError as e:
            if "CDist is not a registered" in str(e):
                return
        except AssertionError as e:
            assert "Max relative difference:" in str(e)

        model_onnx = to_onnx(
            gp, initial_types=[('X', DoubleTensorType([None, None]))],
            dtype=np.float64, target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        self.check_outputs(gp, model_onnx, X_test, {}) 
Example #20
Source File: test_gpr.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_prior():
    # Test that GP prior has mean 0 and identical variances.
    for kernel in kernels:
        gpr = GaussianProcessRegressor(kernel=kernel)

        y_mean, y_cov = gpr.predict(X, return_cov=True)

        assert_almost_equal(y_mean, 0, 5)
        if len(gpr.kernel.theta) > 1:
            # XXX: quite hacky, works only for current kernels
            assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
        else:
            assert_almost_equal(np.diag(y_cov), 1, 5) 
Example #21
Source File: test_gpr.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_no_optimizer():
    # Test that kernel parameters are unmodified when optimizer is None.
    kernel = RBF(1.0)
    gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
    assert_equal(np.exp(gpr.kernel_.theta), 1.0) 
Example #22
Source File: test_gpr.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_sample_statistics():
    # Test that statistics of samples drawn from GP are correct.
    for kernel in kernels:
        gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)

        y_mean, y_cov = gpr.predict(X2, return_cov=True)

        samples = gpr.sample_y(X2, 300000)

        # More digits accuracy would require many more samples
        assert_almost_equal(y_mean, np.mean(samples, 1), 1)
        assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
                            np.var(samples, 1) / np.diag(y_cov).max(), 1) 
Example #23
Source File: bayes.py    From chocolate with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _fit_gp(self, X, Xpending, y):
        gp = gaussian_process.GaussianProcessRegressor(kernel=self.k)
        X = numpy.array([[elem[k] for k in self.space.names()] for elem in X])
        Xpending = numpy.array([[elem[k] for k in self.space.names()] for elem in Xpending])
        y = numpy.array(y)
        gp.fit(X, y)
        if Xpending.size:
            y_predict = gp.predict(Xpending)
            X = numpy.concatenate([X, Xpending])
            y = numpy.concatenate([y, y_predict])
            gp.fit(X, y)
        return gp, y 
Example #24
Source File: test_sklearn_gaussian_process.py    From sklearn-onnx with MIT License 5 votes vote down vote up
def test_gpr_fitted_partial_float64_operator_cdist_sine(self):
        data = load_iris()
        X = data.data
        y = data.target
        X_train, X_test, y_train, y_test = train_test_split(X, y)
        gp = GaussianProcessRegressor(kernel=ExpSineSquared(), alpha=100.)
        gp.fit(X_train, y_train)

        try:
            to_onnx(
                gp, initial_types=[('X', FloatTensorType([None, None]))],
                options={GaussianProcessRegressor: {'optim': 'CDIST'}},
                target_opset=TARGET_OPSET)
            raise AssertionError("CDIST is not implemented")
        except ValueError:
            pass

        model_onnx = to_onnx(
            gp, initial_types=[('X', FloatTensorType([None, None]))],
            options={GaussianProcessRegressor: {'optim': 'cdist'}},
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        name_save = inspect.currentframe().f_code.co_name + '.onnx'
        with open(name_save, 'wb') as f:
            f.write(model_onnx.SerializeToString())
        try:
            self.check_outputs(gp, model_onnx, X_test.astype(np.float32), {})
        except RuntimeError as e:
            if "CDist is not a registered" in str(e):
                return
        except AssertionError as e:
            assert "Max relative difference:" in str(e)

        model_onnx = to_onnx(
            gp, initial_types=[('X', DoubleTensorType([None, None]))],
            dtype=np.float64, target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        self.check_outputs(gp, model_onnx, X_test, {}) 
Example #25
Source File: test_sklearn_gaussian_process.py    From sklearn-onnx with MIT License 5 votes vote down vote up
def test_gpr_fitted_shapes(self):
        data = load_iris()
        X = data.data.astype(np.float32)
        y = data.target.astype(np.float32)
        X_train, X_test, y_train, y_test = train_test_split(X, y)
        gp = GaussianProcessRegressor()
        gp.fit(X_train, y_train)

        model_onnx = to_onnx(
            gp, initial_types=[('X', FloatTensorType([None, None]))],
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        self.check_outputs(gp, model_onnx, X_test, {}, skip_if_float32=True) 
Example #26
Source File: test_sklearn_gaussian_process.py    From sklearn-onnx with MIT License 5 votes vote down vote up
def test_gpr_rbf_fitted_return_std_true(self):
        gp = GaussianProcessRegressor(alpha=1e-7,
                                      n_restarts_optimizer=15,
                                      normalize_y=True)
        gp.fit(Xtrain_, Ytrain_)

        # return_cov=False, return_std=False
        options = {GaussianProcessRegressor: {"return_std": True}}
        try:
            to_onnx(
                gp, initial_types=[('X', FloatTensorType([None, None]))],
                options=options, dtype=np.float32,
                target_opset=TARGET_OPSET)
        except RuntimeError as e:
            assert "The method *predict* must be called" in str(e)
        gp.predict(Xtrain_, return_std=True)
        model_onnx = to_onnx(
            gp, initial_types=[('X', FloatTensorType([None, None]))],
            options=options, dtype=np.float32,
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        self.check_outputs(gp, model_onnx, Xtest_.astype(np.float32),
                           predict_attributes=options[
                             GaussianProcessRegressor],
                           decimal=4)
        dump_data_and_model(Xtest_.astype(np.float32), gp, model_onnx,
                            verbose=False,
                            basename="SklearnGaussianProcessRBFStd-Out0") 
Example #27
Source File: test_sklearn_gaussian_process.py    From sklearn-onnx with MIT License 5 votes vote down vote up
def test_gpr_rbf_fitted_false(self):

        gp = GaussianProcessRegressor(alpha=1e-7,
                                      n_restarts_optimizer=15,
                                      normalize_y=False)
        gp.fit(Xtrain_, Ytrain_)

        # return_cov=False, return_std=False
        model_onnx = to_onnx(
            gp, initial_types=[('X', FloatTensorType([None, None]))],
            dtype=np.float32)
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(Xtest_.astype(np.float32), gp, model_onnx,
                            verbose=False,
                            basename="SklearnGaussianProcessRBF-Dec4") 
Example #28
Source File: test_sklearn_gaussian_process.py    From sklearn-onnx with MIT License 5 votes vote down vote up
def test_gpr_rbf_fitted_true(self):

        gp = GaussianProcessRegressor(alpha=1e-7,
                                      n_restarts_optimizer=15,
                                      normalize_y=True)
        gp, X = fit_regression_model(gp)

        # return_cov=False, return_std=False
        model_onnx = to_onnx(
            gp, initial_types=[('X', DoubleTensorType([None, None]))],
            dtype=np.float64)
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(X, gp, model_onnx,
                            verbose=False,
                            basename="SklearnGaussianProcessRBFT") 
Example #29
Source File: bayesian.py    From keras-tuner with Apache License 2.0 5 votes vote down vote up
def _make_gpr(self):
        return gaussian_process.GaussianProcessRegressor(
            kernel=gaussian_process.kernels.Matern(nu=2.5),
            n_restarts_optimizer=20,
            normalize_y=True,
            alpha=self.alpha,
            random_state=self.seed) 
Example #30
Source File: acquisition_function.py    From polyaxon with Apache License 2.0 5 votes vote down vote up
def get_gaussian_process(config, random_generator):
        if not isinstance(config, GaussianProcessConfig):
            raise ValueError("Received a non valid configuration.")

        if GaussianProcessesKernels.is_rbf(config.kernel):
            kernel = RBF(length_scale=config.length_scale)
        else:
            kernel = Matern(length_scale=config.length_scale, nu=config.nu)

        return GaussianProcessRegressor(
            kernel=kernel,
            n_restarts_optimizer=config.num_restarts_optimizer,
            random_state=random_generator,
        )