Python sklearn.gaussian_process.GaussianProcess() Examples

The following are 12 code examples for showing how to use sklearn.gaussian_process.GaussianProcess(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module sklearn.gaussian_process , or try the search function .

Example 1
Project: Loan_Default_Prediction   Author: freedomljc   File: predict.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def gbc_gp_predict_part(sub_x_Train, train_y, sub_x_Test_part):
    #Owing to out of memory, the model was trained by part of training data
    #Attention, this part was trained on the ram of more than 96G
    sub_x_Train[:,16] = np.log(1-sub_x_Train[:,16])
    scaler = pp.StandardScaler()
    scaler.fit(sub_x_Train)
    sub_x_Train = scaler.transform(sub_x_Train)
    ind_train = np.where(train_y>0)[0]
    part_size= int(0.7 * len(ind_train))
    gp = GaussianProcess(theta0=1e-3, thetaL=1e-5, thetaU=10, corr= 'absolute_exponential')
    gp.fit(sub_x_Train[ind_train[:part_size]], np.log(train_y[ind_train[:part_size]]))
    flag = (sub_x_Test_part[:,16] >= 1)
    ind_tmp0 = np.where(flag)[0]
    ind_tmp = np.where(~flag)[0]
    sub_x_Test_part[ind_tmp,16] = np.log(1-sub_x_Test_part[ind_tmp,16])
    sub_x_Test_part[ind_tmp] = scaler.transform(sub_x_Test_part[ind_tmp]) 
    gp_preds_tmp = gp_predict(gp, sub_x_Test_part[ind_tmp])
    gp_preds = np.zeros(len(sub_x_Test_part))
    gp_preds[ind_tmp] = gp_preds_tmp
    return gp_preds

# use gbm classifier to predict whether the loan defaults or not, then invoke the function gbc_gp_predict_part 
Example 2
Project: twitter-stock-recommendation   Author: alvarobartt   File: test_gaussian_process.py    License: MIT License 6 votes vote down vote up
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
               random_start=10, beta0=None):
    # MLE estimation of a two-dimensional Gaussian Process model accounting for
    # anisotropy. Check random start optimization.
    # Test the GP interpolation for 2D output
    b, kappa, e = 5., .5, .1
    g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
    f = lambda x: np.vstack((g(x), g(x))).T
    X = np.array([[-4.61611719, -6.00099547],
                  [4.10469096, 5.32782448],
                  [0.00000000, -0.50000000],
                  [-6.17289014, -4.6984743],
                  [1.3109306, -6.93271427],
                  [-5.03823144, 3.10584743],
                  [-2.87600388, 6.74310541],
                  [5.21301203, 4.26386883]])
    y = f(X)
    gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
                         theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
                         thetaU=[1e-1] * 2,
                         random_start=random_start, verbose=False)
    gp.fit(X, y)
    y_pred, MSE = gp.predict(X, eval_MSE=True)

    assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)) 
Example 3
Project: twitter-stock-recommendation   Author: alvarobartt   File: test_gaussian_process.py    License: MIT License 6 votes vote down vote up
def test_random_starts():
    # Test that an increasing number of random-starts of GP fitting only
    # increases the reduced likelihood function of the optimal theta.
    n_samples, n_features = 50, 3
    rng = np.random.RandomState(0)
    X = rng.randn(n_samples, n_features) * 2 - 1
    y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
    best_likelihood = -np.inf
    for random_start in range(1, 5):
        gp = GaussianProcess(regr="constant", corr="squared_exponential",
                             theta0=[1e-0] * n_features,
                             thetaL=[1e-4] * n_features,
                             thetaU=[1e+1] * n_features,
                             random_start=random_start, random_state=0,
                             verbose=False).fit(X, y)
        rlf = gp.reduced_likelihood_function()[0]
        assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
        best_likelihood = rlf 
Example 4
Project: pilco   Author: marcino239   File: GPS.py    License: GNU General Public License v2.0 5 votes vote down vote up
def __init__( self, n_outputs, regr='constant', corr='squared_exponential',
                 storage_mode='full', verbose=False, theta0=1e-1 ):
		self.gps = [ gaussian_process.GaussianProcess( regr=regr, corr=corr,
                 storage_mode=storage_mode, verbose=verbose, theta0=theta0 ) for i in range( n_outputs ) ] 
Example 5
Project: Load-Forecasting   Author: lbenning   File: gpr.py    License: MIT License 5 votes vote down vote up
def gaussProcPred(xTrain,yTrain,xTest,covar):
    xTrainAlter = []
    for i in range(1,len(xTrain)):
        tvec = xTrain[i-1]+xTrain[i]
        xTrainAlter.append(tvec)
    xTestAlter = []
    xTestAlter.append(xTrain[len(xTrain)-1]+xTest[0])
    for i in range(1,len(xTest)):
        tvec = xTest[i-1]+xTest[i]
        xTestAlter.append(tvec)
    clfr = gaussian_process.GaussianProcess(theta0=1e-2,
        thetaL=1e-4, thetaU=1e-1, corr=covar)
    clfr.fit(xTrainAlter,yTrain[1:])
    return clfr.predict(xTestAlter, eval_MSE=True)[0] 
Example 6
Project: XQuant   Author: X0Leon   File: bayesopt.py    License: MIT License 5 votes vote down vote up
def __init__(self, f, pbounds):
        """
        参数:
        f: 需要最大化的函数,black-box
        pbounds: 字典,key为参数名称,value为最大最小值的tuple
        """
        self.pbounds = pbounds
        self.keys = list(pbounds.keys())
        self.dim = len(pbounds)
        self.bounds = []
        for key in self.pbounds.keys():
            self.bounds.append(self.pbounds[key])
        self.bounds = np.asarray(self.bounds)
        self.f = f

        self.initialized = False
        self.init_points = []
        self.x_init = []
        self.y_init = []

        self.X = None
        self.Y = None

        # 迭代次数i
        self.i = 0
        
        # scikit-learn中的GaussianProcess
        self.gp = GaussianProcess(corr=matern52,
                                  theta0=np.random.uniform(0.001, 0.05, self.dim),
                                  thetaL=1e-5 * np.ones(self.dim),
                                  thetaU=1e0 * np.ones(self.dim),
                                  random_start=30)

        # Utility喊出 
        self.util = None
        # 输出字典
        self.res = dict()
        self.res['max'] = {'max_val': None,
                           'max_params': None}
        self.res['all'] = {'values': [], 'params': []} 
Example 7
Project: AirTicketPredicting   Author: junlulocky   File: RegressionGaussianProcess.py    License: MIT License 5 votes vote down vote up
def __init__(self, isTrain):
        super(RegressionGaussianProcess, self).__init__(isTrain)
        # data preprocessing
        #self.dataPreprocessing()

        # Create Gaussian process object
        self.gp = gaussian_process.GaussianProcess(theta0=1e-2, thetaL=1e-4, thetaU=1e-1) 
Example 8
Project: twitter-stock-recommendation   Author: alvarobartt   File: test_gaussian_process.py    License: MIT License 5 votes vote down vote up
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
            random_start=10, beta0=None):
    # MLE estimation of a one-dimensional Gaussian Process model.
    # Check random start optimization.
    # Test the interpolating property.
    gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
                         theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
                         random_start=random_start, verbose=False).fit(X, y)
    y_pred, MSE = gp.predict(X, eval_MSE=True)
    y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)

    assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
                and np.allclose(MSE2, 0., atol=10)) 
Example 9
Project: twitter-stock-recommendation   Author: alvarobartt   File: test_gaussian_process.py    License: MIT License 5 votes vote down vote up
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
            random_start=10, beta0=None):
    # MLE estimation of a two-dimensional Gaussian Process model accounting for
    # anisotropy. Check random start optimization.
    # Test the interpolating property.
    b, kappa, e = 5., .5, .1
    g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
    X = np.array([[-4.61611719, -6.00099547],
                  [4.10469096, 5.32782448],
                  [0.00000000, -0.50000000],
                  [-6.17289014, -4.6984743],
                  [1.3109306, -6.93271427],
                  [-5.03823144, 3.10584743],
                  [-2.87600388, 6.74310541],
                  [5.21301203, 4.26386883]])
    y = g(X).ravel()

    thetaL = [1e-4] * 2
    thetaU = [1e-1] * 2
    gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
                         theta0=[1e-2] * 2, thetaL=thetaL,
                         thetaU=thetaU,
                         random_start=random_start, verbose=False)
    gp.fit(X, y)
    y_pred, MSE = gp.predict(X, eval_MSE=True)

    assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))

    eps = np.finfo(gp.theta_.dtype).eps
    assert_true(np.all(gp.theta_ >= thetaL - eps))  # Lower bounds of hyperparameters
    assert_true(np.all(gp.theta_ <= thetaU + eps))  # Upper bounds of hyperparameters 
Example 10
Project: twitter-stock-recommendation   Author: alvarobartt   File: test_gaussian_process.py    License: MIT License 5 votes vote down vote up
def test_wrong_number_of_outputs():
    gp = GaussianProcess()
    gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3]) 
Example 11
Project: twitter-stock-recommendation   Author: alvarobartt   File: test_gaussian_process.py    License: MIT License 5 votes vote down vote up
def test_no_normalize():
    gp = GaussianProcess(normalize=False).fit(X, y)
    y_pred = gp.predict(X)
    assert_true(np.allclose(y_pred, y)) 
Example 12
Project: twitter-stock-recommendation   Author: alvarobartt   File: test_gaussian_process.py    License: MIT License 5 votes vote down vote up
def test_mse_solving():
    # test the MSE estimate to be sane.
    # non-regression test for ignoring off-diagonals of feature covariance,
    # testing with nugget that renders covariance useless, only
    # using the mean function, with low effective rank of data
    gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
                         thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
                         optimizer='Welch', regr="linear", random_state=0)

    X, y = make_regression(n_informative=3, n_features=60, noise=50,
                           random_state=0, effective_rank=1)

    gp.fit(X, y)
    assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())