Python sklearn.gaussian_process.GaussianProcessRegressor() Examples

The following are 30 code examples for showing how to use sklearn.gaussian_process.GaussianProcessRegressor(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module sklearn.gaussian_process , or try the search function .

Example 1
Project: mlearn   Author: materialsvirtuallab   File: models.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, describer, kernel_category='RBF', restarts=10, **kwargs):
        """

        Args:
            describer (Describer): Describer to convert
                input object to descriptors.
            kernel_category (str): Name of kernel from
                sklearn.gaussian_process.kernels. Default to 'RBF', i.e.,
                squared exponential.
            restarts (int): The number of restarts of the optimizer for
                finding the kernel’s parameters which maximize the
                log-marginal likelihood.
            kwargs: kwargs to be passed to kernel object, e.g. length_scale,
                length_scale_bounds.
        """
        self.describer = describer
        kernel = getattr(kernels, kernel_category)(**kwargs)
        self.model = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=restarts)
        self._xtrain = None
        self._xtest = None 
Example 2
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 6 votes vote down vote up
def test_custom_optimizer(kernel):
    # Test that GPR can use externally defined optimizers.
    # Define a dummy optimizer that simply tests 50 random hyperparameters
    def optimizer(obj_func, initial_theta, bounds):
        rng = np.random.RandomState(0)
        theta_opt, func_min = \
            initial_theta, obj_func(initial_theta, eval_gradient=False)
        for _ in range(50):
            theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
                                              np.minimum(1, bounds[:, 1])))
            f = obj_func(theta, eval_gradient=False)
            if f < func_min:
                theta_opt, func_min = theta, f
        return theta_opt, func_min

    gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
    gpr.fit(X, y)
    # Checks that optimizer improved marginal likelihood
    assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
                   gpr.log_marginal_likelihood(gpr.kernel.theta)) 
Example 3
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 6 votes vote down vote up
def test_duplicate_input(kernel):
    # Test GPR can handle two different output-values for the same input.
    gpr_equal_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
    gpr_similar_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)

    X_ = np.vstack((X, X[0]))
    y_ = np.hstack((y, y[0] + 1))
    gpr_equal_inputs.fit(X_, y_)

    X_ = np.vstack((X, X[0] + 1e-15))
    y_ = np.hstack((y, y[0] + 1))
    gpr_similar_inputs.fit(X_, y_)

    X_test = np.linspace(0, 10, 100)[:, None]
    y_pred_equal, y_std_equal = \
        gpr_equal_inputs.predict(X_test, return_std=True)
    y_pred_similar, y_std_similar = \
        gpr_similar_inputs.predict(X_test, return_std=True)

    assert_almost_equal(y_pred_equal, y_pred_similar)
    assert_almost_equal(y_std_equal, y_std_similar) 
Example 4
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 6 votes vote down vote up
def test_K_inv_reset(kernel):
    y2 = f(X2).ravel()

    # Test that self._K_inv is reset after a new fit
    gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
    assert hasattr(gpr, '_K_inv')
    assert gpr._K_inv is None
    gpr.predict(X, return_std=True)
    assert gpr._K_inv is not None
    gpr.fit(X2, y2)
    assert gpr._K_inv is None
    gpr.predict(X2, return_std=True)
    gpr2 = GaussianProcessRegressor(kernel=kernel).fit(X2, y2)
    gpr2.predict(X2, return_std=True)
    # the value of K_inv should be independent of the first fit
    assert_array_equal(gpr._K_inv, gpr2._K_inv) 
Example 5
Project: pyFTS   Author: PYFTS   File: gaussianproc.py    License: GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, **kwargs):
        super(GPR, self).__init__(**kwargs)
        self.name = "GPR"
        self.detail = "Gaussian Process Regression"
        self.is_high_order = True
        self.has_point_forecasting = True
        self.has_interval_forecasting = True
        self.has_probability_forecasting = True
        self.uod_clip = False
        self.benchmark_only = True
        self.min_order = 1
        self.alpha = kwargs.get("alpha", 0.05)
        self.data = None

        self.lscale = kwargs.get('length_scale', 1)

        self.kernel = ConstantKernel(1.0) * RBF(length_scale=self.lscale)
        self.model = GaussianProcessRegressor(kernel=self.kernel, alpha=.05,
                                      n_restarts_optimizer=10,
                                      normalize_y=False)
        #self.model_fit = None 
Example 6
Project: nni   Author: microsoft   File: CreateModel.py    License: MIT License 6 votes vote down vote up
def create_model(samples_x, samples_y_aggregation,
                 n_restarts_optimizer=250, is_white_kernel=False):
    '''
    Trains GP regression model
    '''
    kernel = gp.kernels.ConstantKernel(constant_value=1,
                                       constant_value_bounds=(1e-12, 1e12)) * \
                                                gp.kernels.Matern(nu=1.5)
    if is_white_kernel is True:
        kernel += gp.kernels.WhiteKernel(noise_level=1, noise_level_bounds=(1e-12, 1e12))
    regressor = gp.GaussianProcessRegressor(kernel=kernel,
                                            n_restarts_optimizer=n_restarts_optimizer,
                                            normalize_y=True,
                                            alpha=1e-10)
    regressor.fit(numpy.array(samples_x), numpy.array(samples_y_aggregation))

    model = {}
    model['model'] = regressor
    model['kernel_prior'] = str(kernel)
    model['kernel_posterior'] = str(regressor.kernel_)
    model['model_loglikelihood'] = regressor.log_marginal_likelihood(regressor.kernel_.theta)

    return model 
Example 7
Project: CausalDiscoveryToolbox   Author: FenTechSolutions   File: causal_mechanisms.py    License: MIT License 6 votes vote down vote up
def mechanism(self, x):
        """Mechanism function."""
        self.nb_step += 1
        x = np.reshape(x, (x.shape[0], 1))

        if(self.nb_step < 5):
            cov = computeGaussKernel(x)
            mean = np.zeros((1, self.points))[0, :]
            y = np.random.multivariate_normal(mean, cov)
        elif(self.nb_step == 5):
            cov = computeGaussKernel(x)
            mean = np.zeros((1, self.points))[0, :]
            y = np.random.multivariate_normal(mean, cov)
            self.gpr = GaussianProcessRegressor()
            self.gpr.fit(x, y)
            y = self.gpr.predict(x)
        else:
            y = self.gpr.predict(x)

        return y 
Example 8
Project: CausalDiscoveryToolbox   Author: FenTechSolutions   File: causal_mechanisms.py    License: MIT License 6 votes vote down vote up
def mechanism(self, x):
        """Mechanism function."""
        self.nb_step += 1
        x = np.reshape(x, (x.shape[0], x.shape[1]))

        if(self.nb_step < 2):
            cov = computeGaussKernel(x)
            mean = np.zeros((1, self.points))[0, :]
            y = np.random.multivariate_normal(mean, cov)
        elif(self.nb_step == 2):
            cov = computeGaussKernel(x)
            mean = np.zeros((1, self.points))[0, :]
            y = np.random.multivariate_normal(mean, cov)
            self.gpr = GaussianProcessRegressor()
            self.gpr.fit(x, y)
            y = self.gpr.predict(x)
        else:
            y = self.gpr.predict(x)

        return y 
Example 9
Project: CausalDiscoveryToolbox   Author: FenTechSolutions   File: Bivariate_fit.py    License: MIT License 6 votes vote down vote up
def b_fit_score(self, x, y):
        """ Computes the cds statistic from variable 1 to variable 2

        Args:
            a (numpy.ndarray): Variable 1
            b (numpy.ndarray): Variable 2

        Returns:
            float: BF fit score
        """
        x = np.reshape(scale(x), (-1, 1))
        y = np.reshape(scale(y), (-1, 1))
        gp = GaussianProcessRegressor().fit(x, y)
        y_predict = gp.predict(x)
        error = mean_squared_error(y_predict, y)

        return error 
Example 10
Project: sklearn-onnx   Author: onnx   File: test_sklearn_gaussian_process.py    License: MIT License 6 votes vote down vote up
def test_gpr_rbf_fitted_return_std_exp_sine_squared_true(self):

        gp = GaussianProcessRegressor(kernel=ExpSineSquared(),
                                      alpha=1e-7,
                                      n_restarts_optimizer=15,
                                      normalize_y=True)
        gp.fit(Xtrain_, Ytrain_)

        # return_cov=False, return_std=False
        options = {GaussianProcessRegressor: {"return_std": True}}
        gp.predict(Xtrain_, return_std=True)
        model_onnx = to_onnx(
            gp, initial_types=[('X', DoubleTensorType([None, None]))],
            options=options, dtype=np.float64,
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            Xtest_.astype(np.float64), gp, model_onnx,
            verbose=False,
            basename="SklearnGaussianProcessExpSineSquaredStdT-Out0-Dec3")
        self.check_outputs(gp, model_onnx, Xtest_.astype(np.float64),
                           predict_attributes=options[
                             GaussianProcessRegressor],
                           decimal=4) 
Example 11
Project: sklearn-onnx   Author: onnx   File: test_sklearn_gaussian_process.py    License: MIT License 6 votes vote down vote up
def test_gpr_rbf_fitted_return_std_exp_sine_squared_double_true(self):

        gp = GaussianProcessRegressor(kernel=ExpSineSquared(),
                                      alpha=1e-7,
                                      n_restarts_optimizer=15,
                                      normalize_y=True)
        gp.fit(Xtrain_, Ytrain_)

        # return_cov=False, return_std=False
        options = {GaussianProcessRegressor: {"return_std": True}}
        gp.predict(Xtrain_, return_std=True)
        model_onnx = to_onnx(
            gp, initial_types=[('X', DoubleTensorType([None, None]))],
            options=options, dtype=np.float64,
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            Xtest_.astype(np.float64), gp, model_onnx,
            verbose=False,
            basename="SklearnGaussianProcessExpSineSquaredStdDouble-Out0-Dec4")
        self.check_outputs(gp, model_onnx, Xtest_.astype(np.float64),
                           predict_attributes=options[
                             GaussianProcessRegressor],
                           decimal=4) 
Example 12
Project: sklearn-onnx   Author: onnx   File: test_sklearn_gaussian_process.py    License: MIT License 6 votes vote down vote up
def test_gpr_rbf_fitted_return_std_dot_product_true(self):

        gp = GaussianProcessRegressor(kernel=DotProduct(),
                                      alpha=1.,
                                      n_restarts_optimizer=15,
                                      normalize_y=True)
        gp.fit(Xtrain_, Ytrain_)
        gp.predict(Xtrain_, return_std=True)

        # return_cov=False, return_std=False
        options = {GaussianProcessRegressor: {"return_std": True}}
        model_onnx = to_onnx(
            gp, initial_types=[('X', DoubleTensorType([None, None]))],
            options=options, dtype=np.float64,
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            Xtest_.astype(np.float64), gp, model_onnx,
            basename="SklearnGaussianProcessDotProductStdDouble-Out0-Dec3")
        self.check_outputs(gp, model_onnx, Xtest_.astype(np.float64),
                           predict_attributes=options[
                             GaussianProcessRegressor],
                           decimal=3) 
Example 13
Project: sklearn-onnx   Author: onnx   File: test_sklearn_gaussian_process.py    License: MIT License 6 votes vote down vote up
def test_gpr_rbf_fitted_return_std_rational_quadratic_true(self):

        gp = GaussianProcessRegressor(kernel=RationalQuadratic(),
                                      alpha=1e-7,
                                      n_restarts_optimizer=15,
                                      normalize_y=True)
        gp.fit(Xtrain_, Ytrain_)
        gp.predict(Xtrain_, return_std=True)

        # return_cov=False, return_std=False
        options = {GaussianProcessRegressor: {"return_std": True}}
        model_onnx = to_onnx(
            gp, initial_types=[('X', DoubleTensorType([None, None]))],
            options=options, dtype=np.float64,
            target_opset=TARGET_OPSET)
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            Xtest_.astype(np.float64), gp, model_onnx,
            basename="SklearnGaussianProcessRationalQuadraticStdDouble-Out0")
        self.check_outputs(gp, model_onnx, Xtest_.astype(np.float64),
                           predict_attributes=options[
                             GaussianProcessRegressor]) 
Example 14
Project: sklearn-onnx   Author: onnx   File: test_sklearn_gaussian_process.py    License: MIT License 6 votes vote down vote up
def test_gpr_fitted_partial_float64(self):
        data = load_iris()
        X = data.data
        y = data.target
        X_train, X_test, y_train, y_test = train_test_split(X, y)
        gp = GaussianProcessRegressor(kernel=DotProduct(), alpha=10.)
        gp.fit(X_train, y_train)

        model_onnx = to_onnx(
            gp, initial_types=[('X', FloatTensorType([None, None]))])
        self.assertTrue(model_onnx is not None)
        try:
            self.check_outputs(gp, model_onnx, X_test.astype(np.float32), {})
        except AssertionError as e:
            assert "Max relative difference:" in str(e)

        model_onnx = to_onnx(
            gp, initial_types=[('X', DoubleTensorType([None, None]))],
            dtype=np.float64)
        self.assertTrue(model_onnx is not None)
        self.check_outputs(gp, model_onnx, X_test, {}) 
Example 15
Project: sklearn-onnx   Author: onnx   File: test_sklearn_grid_search_cv_converter.py    License: MIT License 6 votes vote down vote up
def test_grid_search_gaussian_regressor_float(self):
        tuned_parameters = [{'alpha': np.logspace(-4, -0.5, 4)}]
        clf = GridSearchCV(GaussianProcessRegressor(),
                           tuned_parameters, cv=5)
        model, X = fit_regression_model(clf)
        model_onnx = convert_sklearn(
            model, "GridSearchCV",
            [("input", FloatTensorType([None, X.shape[1]]))])
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            basename="SklearnGridSearchGaussianRegressionFloat"
                     "-OneOffArray-Dec4",
            allow_failure="StrictVersion("
            "onnxruntime.__version__) "
            "<= StrictVersion('0.4.0') or "
            "StrictVersion(onnx.__version__) "
            "== StrictVersion('1.4.1')",
        ) 
Example 16
Project: sklearn-onnx   Author: onnx   File: test_sklearn_grid_search_cv_converter.py    License: MIT License 6 votes vote down vote up
def test_grid_search_gaussian_regressor_double(self):
        tuned_parameters = [{'alpha': np.logspace(-4, -0.5, 4)}]
        clf = GridSearchCV(GaussianProcessRegressor(),
                           tuned_parameters, cv=3)
        model, X = fit_regression_model(clf)
        model_onnx = convert_sklearn(
            model, "GridSearchCV",
            [("input", DoubleTensorType([None, X.shape[1]]))],
            dtype=np.float64)
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X.astype(np.float64),
            model,
            model_onnx,
            basename="SklearnGridSearchGaussianRegressionDouble"
                     "-OneOffArray-Dec4",
            allow_failure="StrictVersion("
            "onnxruntime.__version__) "
            "<= StrictVersion('0.4.0') or "
            "StrictVersion(onnx.__version__) "
            "== StrictVersion('1.4.1')",
        ) 
Example 17
Project: pandas-ml   Author: pandas-ml   File: test_gaussian_process.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_objectmapper(self):
        df = pdml.ModelFrame([])
        dgp = df.gaussian_process

        self.assertIs(dgp.GaussianProcessClassifier,
                      gp.GaussianProcessClassifier)
        self.assertIs(dgp.GaussianProcessRegressor,
                      gp.GaussianProcessRegressor)
        self.assertIs(dgp.correlation_models.absolute_exponential,
                      gp.correlation_models.absolute_exponential)
        self.assertIs(dgp.correlation_models.squared_exponential,
                      gp.correlation_models.squared_exponential)
        self.assertIs(dgp.correlation_models.generalized_exponential,
                      gp.correlation_models.generalized_exponential)
        self.assertIs(dgp.correlation_models.pure_nugget,
                      gp.correlation_models.pure_nugget)
        self.assertIs(dgp.correlation_models.cubic,
                      gp.correlation_models.cubic)
        self.assertIs(dgp.correlation_models.linear,
                      gp.correlation_models.linear) 
Example 18
Project: TNT   Author: GaoangW   File: track_lib.py    License: GNU General Public License v3.0 5 votes vote down vote up
def GP_regression(tr_x,tr_y,test_x):
    A = np.ones((len(tr_x),2))
    A[:,0] = tr_x[:,0]
    p = np.matmul(np.linalg.pinv(A),tr_y)
    mean_tr_y = np.matmul(A,p)
    A = np.ones((len(test_x),2))
    A[:,0] = test_x[:,0]
    mean_test_y = np.matmul(A,p)
    kernel = ConstantKernel(100,(1e-5, 1e5))*RBF(1, (1e-5, 1e5))+RBF(1, (1e-5, 1e5))
    gp = GaussianProcessRegressor(kernel=kernel, alpha=1, n_restarts_optimizer=9)
    gp.fit(tr_x, tr_y-mean_tr_y)
    test_y, sigma = gp.predict(test_x, return_std=True)
    test_y = test_y+mean_test_y
    #import pdb; pdb.set_trace()
    return test_y 
Example 19
Project: TNT   Author: GaoangW   File: track_lib.py    License: GNU General Public License v3.0 5 votes vote down vote up
def GP_regression(tr_x,tr_y,test_x):
    A = np.ones((len(tr_x),2))
    A[:,0] = tr_x[:,0]
    p = np.matmul(np.linalg.pinv(A),tr_y)
    mean_tr_y = np.matmul(A,p)
    A = np.ones((len(test_x),2))
    A[:,0] = test_x[:,0]
    mean_test_y = np.matmul(A,p)
    kernel = ConstantKernel(100,(1e-5, 1e5))*RBF(1, (1e-5, 1e5))+RBF(1, (1e-5, 1e5))
    gp = GaussianProcessRegressor(kernel=kernel, alpha=1, n_restarts_optimizer=9)
    gp.fit(tr_x, tr_y-mean_tr_y)
    test_y, sigma = gp.predict(test_x, return_std=True)
    test_y = test_y+mean_test_y
    #import pdb; pdb.set_trace()
    return test_y 
Example 20
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 5 votes vote down vote up
def test_gpr_interpolation(kernel):
    # Test the interpolating property for different kernels.
    gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
    y_pred, y_cov = gpr.predict(X, return_cov=True)

    assert_almost_equal(y_pred, y)
    assert_almost_equal(np.diag(y_cov), 0.) 
Example 21
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 5 votes vote down vote up
def test_lml_improving(kernel):
    # Test that hyperparameter-tuning improves log-marginal likelihood.
    gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
    assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
                   gpr.log_marginal_likelihood(kernel.theta)) 
Example 22
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 5 votes vote down vote up
def test_lml_precomputed(kernel):
    # Test that lml of optimized kernel is stored correctly.
    gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
    assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
                 gpr.log_marginal_likelihood()) 
Example 23
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 5 votes vote down vote up
def test_converged_to_local_maximum(kernel):
    # Test that we are in local maximum after hyperparameter-optimization.
    gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)

    lml, lml_gradient = \
        gpr.log_marginal_likelihood(gpr.kernel_.theta, True)

    assert np.all((np.abs(lml_gradient) < 1e-4) |
                  (gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
                  (gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])) 
Example 24
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 5 votes vote down vote up
def test_solution_inside_bounds(kernel):
    # Test that hyperparameter-optimization remains in bounds#
    gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)

    bounds = gpr.kernel_.bounds
    max_ = np.finfo(gpr.kernel_.theta.dtype).max
    tiny = 1e-10
    bounds[~np.isfinite(bounds[:, 1]), 1] = max_

    assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
    assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny) 
Example 25
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 5 votes vote down vote up
def test_prior(kernel):
    # Test that GP prior has mean 0 and identical variances.
    gpr = GaussianProcessRegressor(kernel=kernel)

    y_mean, y_cov = gpr.predict(X, return_cov=True)

    assert_almost_equal(y_mean, 0, 5)
    if len(gpr.kernel.theta) > 1:
        # XXX: quite hacky, works only for current kernels
        assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
    else:
        assert_almost_equal(np.diag(y_cov), 1, 5) 
Example 26
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 5 votes vote down vote up
def test_sample_statistics(kernel):
    # Test that statistics of samples drawn from GP are correct.
    gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)

    y_mean, y_cov = gpr.predict(X2, return_cov=True)

    samples = gpr.sample_y(X2, 300000)

    # More digits accuracy would require many more samples
    assert_almost_equal(y_mean, np.mean(samples, 1), 1)
    assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
                        np.var(samples, 1) / np.diag(y_cov).max(), 1) 
Example 27
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 5 votes vote down vote up
def test_no_optimizer():
    # Test that kernel parameters are unmodified when optimizer is None.
    kernel = RBF(1.0)
    gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
    assert_equal(np.exp(gpr.kernel_.theta), 1.0) 
Example 28
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 5 votes vote down vote up
def test_predict_cov_vs_std(kernel):
    # Test that predicted std.-dev. is consistent with cov's diagonal.
    gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
    y_mean, y_cov = gpr.predict(X2, return_cov=True)
    y_mean, y_std = gpr.predict(X2, return_std=True)
    assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std) 
Example 29
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 5 votes vote down vote up
def test_anisotropic_kernel():
    # Test that GPR can identify meaningful anisotropic length-scales.
    # We learn a function which varies in one dimension ten-times slower
    # than in the other. The corresponding length-scales should differ by at
    # least a factor 5
    rng = np.random.RandomState(0)
    X = rng.uniform(-1, 1, (50, 2))
    y = X[:, 0] + 0.1 * X[:, 1]

    kernel = RBF([1.0, 1.0])
    gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
    assert_greater(np.exp(gpr.kernel_.theta[1]),
                   np.exp(gpr.kernel_.theta[0]) * 5) 
Example 30
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_gpr.py    License: MIT License 5 votes vote down vote up
def test_y_normalization(kernel):
    # Test normalization of the target values in GP

    # Fitting non-normalizing GP on normalized y and fitting normalizing GP
    # on unnormalized y should yield identical results
    y_mean = y.mean(0)
    y_norm = y - y_mean

    # Fit non-normalizing GP on normalized y
    gpr = GaussianProcessRegressor(kernel=kernel)
    gpr.fit(X, y_norm)
    # Fit normalizing GP on unnormalized y
    gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
    gpr_norm.fit(X, y)

    # Compare predicted mean, std-devs and covariances
    y_pred, y_pred_std = gpr.predict(X2, return_std=True)
    y_pred = y_mean + y_pred
    y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)

    assert_almost_equal(y_pred, y_pred_norm)
    assert_almost_equal(y_pred_std, y_pred_std_norm)

    _, y_cov = gpr.predict(X2, return_cov=True)
    _, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
    assert_almost_equal(y_cov, y_cov_norm)