Python sklearn.linear_model.LinearRegression() Examples

The following are 30 code examples of sklearn.linear_model.LinearRegression(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sklearn.linear_model , or try the search function .
Example #1
Source File: test_estimator.py    From heamy with MIT License 6 votes vote down vote up
def test_stacking():
    model = Regressor(estimator=LinearRegression, parameters={}, dataset=RealDataset)
    ds = model.stack(10)

    assert ds.X_train.shape[0] == model.dataset.X_train.shape[0]
    assert ds.X_test.shape[0] == model.dataset.X_test.shape[0]
    assert ds.y_train.shape[0] == model.dataset.y_train.shape[0]

    model = Regressor(estimator=LinearRegression, parameters={}, dataset=RealDataset)
    ds = model.stack(10, full_test=False)
    assert np.isnan(ds.X_train).sum() == 0
    assert ds.X_train.shape[0] == model.dataset.X_train.shape[0]
    assert ds.X_test.shape[0] == model.dataset.X_test.shape[0]
    assert ds.y_train.shape[0] == model.dataset.y_train.shape[0]

    model = Regressor(estimator=LinearRegression, parameters={}, dataset=RealDataset)
    model.dataset.load()
    ds = model.stack(10, full_test=False)
    # Check cache
    assert np.isnan(ds.X_train).sum() == 0
    assert ds.X_train.shape[0] == model.dataset.X_train.shape[0]
    assert ds.X_test.shape[0] == model.dataset.X_test.shape[0]
    assert ds.y_train.shape[0] == model.dataset.y_train.shape[0] 
Example #2
Source File: test_LinearRegression.py    From differential-privacy-library with MIT License 6 votes vote down vote up
def test_same_results(self):
        from sklearn import datasets
        from sklearn.model_selection import train_test_split
        from sklearn import linear_model

        dataset = datasets.load_iris()
        X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.2)

        clf = LinearRegression(data_norm=12, epsilon=float("inf"),
                               bounds_X=([4.3, 2.0, 1.0, 0.1], [7.9, 4.4, 6.9, 2.5]), bounds_y=(0, 2))
        clf.fit(X_train, y_train)

        predict1 = clf.predict(X_test)

        clf = linear_model.LinearRegression(normalize=False)
        clf.fit(X_train, y_train)

        predict2 = clf.predict(X_test)

        self.assertTrue(np.allclose(predict1, predict2)) 
Example #3
Source File: test_target.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_transform_target_regressor_2d_transformer_multioutput():
    # Check consistency with transformer accepting only 2D array and a 2D y
    # array.
    X = friedman[0]
    y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T
    transformer = StandardScaler()
    regr = TransformedTargetRegressor(regressor=LinearRegression(),
                                      transformer=transformer)
    y_pred = regr.fit(X, y).predict(X)
    assert y.shape == y_pred.shape
    # consistency forward transform
    y_tran = regr.transformer_.transform(y)
    _check_standard_scaled(y, y_tran)
    assert y.shape == y_pred.shape
    # consistency inverse transform
    assert_allclose(y, regr.transformer_.inverse_transform(
        y_tran).squeeze())
    # consistency of the regressor
    lr = LinearRegression()
    transformer2 = clone(transformer)
    lr.fit(X, transformer2.fit_transform(y))
    y_lr_pred = lr.predict(X)
    assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
    assert_allclose(regr.regressor_.coef_, lr.coef_) 
Example #4
Source File: test_LinearRegression.py    From differential-privacy-library with MIT License 6 votes vote down vote up
def test_accountant(self):
        from diffprivlib.accountant import BudgetAccountant

        acc = BudgetAccountant()
        X = np.linspace(-1, 1, 1000)
        y = X.copy()
        X = X[:, np.newaxis]

        clf = LinearRegression(epsilon=2, data_norm=1, fit_intercept=False, accountant=acc)
        clf.fit(X, y)
        self.assertEqual((2, 0), acc.total())

        with BudgetAccountant(3, 0) as acc2:
            clf = LinearRegression(epsilon=2, data_norm=1, fit_intercept=False)
            clf.fit(X, y)
            self.assertEqual((2, 0), acc2.total())

            with self.assertRaises(BudgetError):
                clf.fit(X, y) 
Example #5
Source File: tests_regression.py    From discomll with Apache License 2.0 6 votes vote down vote up
def test_lin_reg(self):
        # python -m unittest tests_regression.Tests_Regression.test_lin_reg
        from sklearn import linear_model
        from discomll.regression import linear_regression

        x_train, y_train, x_test, y_test = datasets.ex3()
        train_data, test_data = datasets.ex3_discomll()

        lin_reg = linear_model.LinearRegression()  # Create linear regression object
        lin_reg.fit(x_train, y_train)  # Train the model using the training sets
        thetas1 = [lin_reg.intercept_] + lin_reg.coef_[1:].tolist()
        prediction1 = lin_reg.predict(x_test)

        thetas_url = linear_regression.fit(train_data)
        thetas2 = [v for k, v in result_iterator(thetas_url["linreg_fitmodel"])]
        results = linear_regression.predict(test_data, thetas_url)
        prediction2 = [v[0] for k, v in result_iterator(results)]

        self.assertTrue(np.allclose(thetas1, thetas2))
        self.assertTrue(np.allclose(prediction1, prediction2)) 
Example #6
Source File: 1_linear_basic.py    From deep-learning-note with MIT License 6 votes vote down vote up
def trainModel(trainData, features, labels):
    """
    利用训练数据,估计模型参数

    参数
    ----
    trainData : DataFrame,训练数据集,包含特征和标签

    features : 特征名列表

    labels : 标签名列表

    返回
    ----
    model : LinearRegression, 训练好的线性模型
    """
    # 创建一个线性回归模型
    model = linear_model.LinearRegression()
    # 训练模型,估计模型参数
    model.fit(trainData[features], trainData[labels])
    return model 
Example #7
Source File: test_pipeline.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_pipeline_raise_set_params_error():
    # Test pipeline raises set params error message for nested models.
    pipe = Pipeline([('cls', LinearRegression())])

    # expected error message
    error_msg = ('Invalid parameter %s for estimator %s. '
                 'Check the list of available parameters '
                 'with `estimator.get_params().keys()`.')

    assert_raise_message(ValueError,
                         error_msg % ('fake', pipe),
                         pipe.set_params,
                         fake='nope')

    # nested model check
    assert_raise_message(ValueError,
                         error_msg % ("fake", pipe),
                         pipe.set_params,
                         fake__estimator='nope') 
Example #8
Source File: test_target.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_transform_target_regressor_error():
    X, y = friedman
    # provide a transformer and functions at the same time
    regr = TransformedTargetRegressor(regressor=LinearRegression(),
                                      transformer=StandardScaler(),
                                      func=np.exp, inverse_func=np.log)
    assert_raises_regex(ValueError, "'transformer' and functions"
                        " 'func'/'inverse_func' cannot both be set.",
                        regr.fit, X, y)
    # fit with sample_weight with a regressor which does not support it
    sample_weight = np.ones((y.shape[0],))
    regr = TransformedTargetRegressor(regressor=Lasso(),
                                      transformer=StandardScaler())
    assert_raises_regex(TypeError, r"fit\(\) got an unexpected keyword "
                        "argument 'sample_weight'", regr.fit, X, y,
                        sample_weight=sample_weight)
    # func is given but inverse_func is not
    regr = TransformedTargetRegressor(func=np.exp)
    assert_raises_regex(ValueError, "When 'func' is provided, 'inverse_func'"
                        " must also be provided", regr.fit, X, y) 
Example #9
Source File: test_bootstrap.py    From bayesian_bootstrap with MIT License 6 votes vote down vote up
def test_parameter_estimation_resampling_low_memory(self):
        X = np.random.uniform(0, 4, 1000)
        y = X + np.random.normal(0, 1, 1000)
        m = BayesianBootstrapBagging(LinearRegression(), 10000, 1000, low_mem=True)
        m.fit(X.reshape(-1, 1), y)
        coef_samples = [b.coef_ for b in m.base_models_]
        intercept_samples = [b.intercept_ for b in m.base_models_]
        self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)
        l, r = central_credible_interval(coef_samples, alpha=0.05)
        self.assertLess(l, 1)
        self.assertGreater(r, 1)
        l, r = highest_density_interval(coef_samples, alpha=0.05)
        self.assertLess(l, 1)
        self.assertGreater(r, 1)
        self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
        l, r = central_credible_interval(intercept_samples, alpha=0.05)
        self.assertLess(l, 0)
        self.assertGreater(r, 0)
        self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
        l, r = highest_density_interval(intercept_samples, alpha=0.05)
        self.assertLess(l, 0)
        self.assertGreater(r, 0) 
Example #10
Source File: test_bootstrap.py    From bayesian_bootstrap with MIT License 6 votes vote down vote up
def test_parameter_estimation_resampling(self):
        X = np.random.uniform(0, 4, 1000)
        y = X + np.random.normal(0, 1, 1000)
        m = BayesianBootstrapBagging(LinearRegression(), 10000, 1000, low_mem=False)
        m.fit(X.reshape(-1, 1), y)
        coef_samples = [b.coef_ for b in m.base_models_]
        intercept_samples = [b.intercept_ for b in m.base_models_]
        self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)
        l, r = central_credible_interval(coef_samples, alpha=0.05)
        self.assertLess(l, 1)
        self.assertGreater(r, 1)
        l, r = highest_density_interval(coef_samples, alpha=0.05)
        self.assertLess(l, 1)
        self.assertGreater(r, 1)
        self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
        l, r = central_credible_interval(intercept_samples, alpha=0.05)
        self.assertLess(l, 0)
        self.assertGreater(r, 0)
        self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
        l, r = highest_density_interval(intercept_samples, alpha=0.05)
        self.assertLess(l, 0)
        self.assertGreater(r, 0) 
Example #11
Source File: test_bootstrap.py    From bayesian_bootstrap with MIT License 6 votes vote down vote up
def test_parameter_estimation_bayes_low_memory(self):
        X = np.random.uniform(0, 4, 1000)
        y = X + np.random.normal(0, 1, 1000)
        m = BayesianBootstrapBagging(LinearRegression(), 10000, low_mem=True)
        m.fit(X.reshape(-1, 1), y)
        coef_samples = [b.coef_ for b in m.base_models_]
        intercept_samples = [b.intercept_ for b in m.base_models_]
        self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)
        l, r = central_credible_interval(coef_samples, alpha=0.05)
        self.assertLess(l, 1)
        self.assertGreater(r, 1)
        l, r = highest_density_interval(coef_samples, alpha=0.05)
        self.assertLess(l, 1)
        self.assertGreater(r, 1)
        self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
        l, r = central_credible_interval(intercept_samples, alpha=0.05)
        self.assertLess(l, 0)
        self.assertGreater(r, 0)
        self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
        l, r = highest_density_interval(intercept_samples, alpha=0.05)
        self.assertLess(l, 0)
        self.assertGreater(r, 0) 
Example #12
Source File: test_gradient_boosting.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_gradient_boosting_with_init_pipeline():
    # Check that the init estimator can be a pipeline (see issue #13466)

    X, y = make_regression(random_state=0)
    init = make_pipeline(LinearRegression())
    gb = GradientBoostingRegressor(init=init)
    gb.fit(X, y)  # pipeline without sample_weight works fine

    with pytest.raises(
            ValueError,
            match='The initial estimator Pipeline does not support sample '
                  'weights'):
        gb.fit(X, y, sample_weight=np.ones(X.shape[0]))

    # Passing sample_weight to a pipeline raises a ValueError. This test makes
    # sure we make the distinction between ValueError raised by a pipeline that
    # was passed sample_weight, and a ValueError raised by a regular estimator
    # whose input checking failed.
    with pytest.raises(
            ValueError,
            match='nu <= 0 or nu > 1'):
        # Note that NuSVR properly supports sample_weight
        init = NuSVR(gamma='auto', nu=1.5)
        gb = GradientBoostingRegressor(init=init)
        gb.fit(X, y, sample_weight=np.ones(X.shape[0])) 
Example #13
Source File: LinearRegression_scikit-learn.py    From MachineLearning_Python with MIT License 6 votes vote down vote up
def linearRegression():
    print(u"加载数据...\n")
    data = loadtxtAndcsv_data("data.txt",",",np.float64)  #读取数据
    X = np.array(data[:,0:-1],dtype=np.float64)      # X对应0到倒数第2列                  
    y = np.array(data[:,-1],dtype=np.float64)        # y对应最后一列  
        
    # 归一化操作
    scaler = StandardScaler()   
    scaler.fit(X)
    x_train = scaler.transform(X)
    x_test = scaler.transform(np.array([1650,3]))
    
    # 线性模型拟合
    model = linear_model.LinearRegression()
    model.fit(x_train, y)
    
    #预测结果
    result = model.predict(x_test)
    print(model.coef_)       # Coefficient of the features 决策函数中的特征系数
    print(model.intercept_)  # 又名bias偏置,若设置为False,则为0
    print(result)            # 预测结果


# 加载txt和csv文件 
Example #14
Source File: test_linear.py    From m2cgen with MIT License 6 votes vote down vote up
def test_two_features():
    estimator = linear_model.LinearRegression()
    estimator.coef_ = np.array([1, 2])
    estimator.intercept_ = np.array([3])

    assembler = assemblers.SklearnLinearModelAssembler(estimator)
    actual = assembler.assemble()

    expected = ast.BinNumExpr(
        ast.BinNumExpr(
            ast.NumVal(3),
            ast.BinNumExpr(
                ast.FeatureRef(0),
                ast.NumVal(1),
                ast.BinNumOpType.MUL),
            ast.BinNumOpType.ADD),
        ast.BinNumExpr(
            ast.FeatureRef(1),
            ast.NumVal(2),
            ast.BinNumOpType.MUL),
        ast.BinNumOpType.ADD)

    assert utils.cmp_exprs(actual, expected) 
Example #15
Source File: response_matrix.py    From ocelot with GNU General Public License v3.0 6 votes vote down vote up
def retrieve_from_scan(self, df_scan):
        from sklearn.linear_model import LinearRegression

        bpm_x = [self.bpm2x_name(bpm) for bpm in self.bpm_names]
        bpm_y = [self.bpm2y_name(bpm) for bpm in self.bpm_names]
        bpm_names_xy = bpm_x + bpm_y

        x = df_scan.loc[:, self.cor_names].values
        y = df_scan.loc[:, bpm_names_xy].values

        reg = LinearRegression().fit(x, y)
        #x_test = np.eye(np.shape(x)[1])
        rm = reg.coef_
        #df_rm = pd.DataFrame(rm.T, columns=self.cor_names, index=bpm_x+bpm_y)
        self.df = self.data2df(matrix=rm, bpm_names=self.bpm_names, cor_names=self.cor_names)
        return self.df 
Example #16
Source File: test_contrasts.py    From nistats with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_fixed_effect_contrast_nonzero_effect():
    X, y = make_regression(n_features=5, n_samples=20, random_state=0)
    y = y[:, None]
    labels, results = run_glm(y, X, 'ols')
    coef = LinearRegression(fit_intercept=False).fit(X, y).coef_
    for i in range(X.shape[1]):
        contrast = np.zeros(X.shape[1])
        contrast[i] = 1.
        fixed_effect = _compute_fixed_effect_contrast([labels],
                                                      [results],
                                                      [contrast],
                                                      )
        assert_almost_equal(fixed_effect.effect_size(), coef.ravel()[i])
        fixed_effect = _compute_fixed_effect_contrast(
            [labels] * 3, [results] * 3, [contrast] * 3)
        assert_almost_equal(fixed_effect.effect_size(), coef.ravel()[i]) 
Example #17
Source File: DimensionReduction.py    From FAE with GNU General Public License v3.0 6 votes vote down vote up
def CalculateVIF2(self, df):
        # initialize dictionaries
        vif_dict, tolerance_dict = {}, {}

        # form input data for each exogenous variable
        for exog in df.columns:
            not_exog = [i for i in df.columns if i != exog]
            X, y = df[not_exog], df[exog]

            # extract r-squared from the fit
            r_squared = LinearRegression().fit(X, y).score(X, y)

            # calculate VIF
            vif = 1/(1 - r_squared)
            vif_dict[exog] = vif

            # calculate tolerance
            tolerance = 1 - r_squared
            tolerance_dict[exog] = tolerance

        # return VIF DataFrame
        df_vif = pd.DataFrame({'VIF': vif_dict, 'Tolerance': tolerance_dict})

        return df_vif 
Example #18
Source File: top_factors.py    From healthcareai-py with MIT License 6 votes vote down vote up
def prepare_fit_model_for_factors(model_type, x_train, y_train):
    """
    Given a model type, train and test data
    
    Args:
        model_type (str): 'classification' or 'regression'
        x_train:
        y_train:

    Returns:
        (sklearn.base.BaseEstimator): A fit model.
    """

    if model_type == 'classification':
        algorithm = LogisticRegression()
    elif model_type == 'regression':
        algorithm = LinearRegression()
    else:
        algorithm = None

    if algorithm is not None:
        algorithm.fit(x_train, y_train)

    return algorithm 
Example #19
Source File: test_anomaly_detectors.py    From gordo with GNU Affero General Public License v3.0 6 votes vote down vote up
def test_diff_detector_cross_validate(return_estimator: bool):
    """
    DiffBasedAnomalyDetector.cross_validate implementation should be the
    same as sklearn.model_selection.cross_validate if called the same.

    And it always will update `return_estimator` to True, as it requires
    the intermediate models to calculate the thresholds
    """
    X = np.random.random((100, 10))
    y = np.random.random((100, 1))

    model = DiffBasedAnomalyDetector(base_estimator=LinearRegression())

    cv = TimeSeriesSplit(n_splits=3)
    cv_results_da = model.cross_validate(
        X=X, y=y, cv=cv, return_estimator=return_estimator
    )
    cv_results_sk = cross_validate(model, X=X, y=y, cv=cv, return_estimator=True)

    assert cv_results_da.keys() == cv_results_sk.keys() 
Example #20
Source File: test_anomaly_detectors.py    From gordo with GNU Affero General Public License v3.0 6 votes vote down vote up
def test_diff_detector_require_thresholds(require_threshold: bool):
    """
    Should fail if requiring thresholds, but not calling cross_validate
    """
    X = pd.DataFrame(np.random.random((100, 5)))
    y = pd.DataFrame(np.random.random((100, 2)))

    model = DiffBasedAnomalyDetector(
        base_estimator=MultiOutputRegressor(LinearRegression()),
        require_thresholds=require_threshold,
    )

    model.fit(X, y)

    if require_threshold:
        # FAIL: Forgot to call .cross_validate to calculate thresholds.
        with pytest.raises(AttributeError):
            model.anomaly(X, y)

        model.cross_validate(X=X, y=y)
        model.anomaly(X, y)
    else:
        # thresholds not required
        model.anomaly(X, y) 
Example #21
Source File: test_run.py    From nyaggle with MIT License 6 votes vote down vote up
def test_experiment_sklearn_regressor(tmpdir_name):
    X, y = make_regression_df(n_samples=1024, n_num_features=10, n_cat_features=0,
                              random_state=0, id_column='user_id')

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)

    params = {
        'fit_intercept': True
    }

    result = run_experiment(params, X_train, y_train, X_test, tmpdir_name, with_auto_prep=False,
                            algorithm_type=LinearRegression)

    assert len(np.unique(result.oof_prediction)) > 5  # making sure prediction is not binarized
    assert len(np.unique(result.test_prediction)) > 5
    assert mean_squared_error(y_train, result.oof_prediction) == result.metrics[-1]

    _check_file_exists(tmpdir_name) 
Example #22
Source File: test_postprocess.py    From yatsm with MIT License 6 votes vote down vote up
def test_refit_nochange_reg(sim_nochange):
    """ Test refit ``keep_regularized=False`` (i.e., not ignoring coef == 0)
    """
    from sklearn.linear_model import LinearRegression as OLS
    estimator = OLS()

    refit = refit_record(sim_nochange, 'ols', estimator,
                         keep_regularized=False)
    assert 'ols_coef' in refit.dtype.names
    assert 'ols_rmse' in refit.dtype.names

    coef = np.array([[-3.83016528e+03, -3.83016528e+03],
                     [5.24635240e-03, 5.24635240e-03]])
    rmse = np.array([0.96794599, 0.96794599])
    np.testing.assert_allclose(refit[0]['ols_coef'], coef)
    np.testing.assert_allclose(refit[0]['ols_rmse'], rmse) 
Example #23
Source File: test_linear.py    From m2cgen with MIT License 6 votes vote down vote up
def test_single_feature():
    estimator = linear_model.LinearRegression()
    estimator.coef_ = np.array([1])
    estimator.intercept_ = np.array([3])

    assembler = assemblers.SklearnLinearModelAssembler(estimator)
    actual = assembler.assemble()

    expected = ast.BinNumExpr(
        ast.NumVal(3),
        ast.BinNumExpr(
            ast.FeatureRef(0),
            ast.NumVal(1),
            ast.BinNumOpType.MUL),
        ast.BinNumOpType.ADD)

    assert utils.cmp_exprs(actual, expected) 
Example #24
Source File: test_theil_sen.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_theil_sen_2d():
    X, y, w, c = gen_toy_problem_2d()
    # Check that Least Squares fails
    lstq = LinearRegression().fit(X, y)
    assert_greater(norm(lstq.coef_ - w), 1.0)
    # Check that Theil-Sen works
    theil_sen = TheilSenRegressor(max_subpopulation=1e3,
                                  random_state=0).fit(X, y)
    assert_array_almost_equal(theil_sen.coef_, w, 1)
    assert_array_almost_equal(theil_sen.intercept_, c, 1) 
Example #25
Source File: test_builder.py    From gordo with GNU Affero General Public License v3.0 5 votes vote down vote up
def test_model_builder_metrics_list(metrics_: Optional[List[str]]):
    model_config = {
        "sklearn.multioutput.MultiOutputRegressor": {
            "estimator": "sklearn.linear_model.LinearRegression"
        }
    }
    data_config = get_random_data()

    evaluation_config: Dict[str, Any] = {"cv_mode": "full_build"}
    if metrics_:
        evaluation_config.update({"metrics": metrics_})

    machine = Machine(
        name="model-name",
        dataset=data_config,
        model=model_config,
        evaluation=evaluation_config,
        project_name="test",
    )
    _model, machine = ModelBuilder(machine).build()

    expected_metrics = metrics_ or [
        "sklearn.metrics.explained_variance_score",
        "sklearn.metrics.r2_score",
        "sklearn.metrics.mean_squared_error",
        "sklearn.metrics.mean_absolute_error",
    ]

    assert all(
        metric.split(".")[-1].replace("_", "-")
        in machine.metadata.build_metadata.model.cross_validation.scores
        for metric in expected_metrics
    ) 
Example #26
Source File: test_pipeline.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_classes_property():
    iris = load_iris()
    X = iris.data
    y = iris.target

    reg = make_pipeline(SelectKBest(k=1), LinearRegression())
    reg.fit(X, y)
    assert_raises(AttributeError, getattr, reg, "classes_")

    clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
    assert_raises(AttributeError, getattr, clf, "classes_")
    clf.fit(X, y)
    assert_array_equal(clf.classes_, np.unique(y)) 
Example #27
Source File: example.py    From Python with MIT License 5 votes vote down vote up
def linear_model_main(X_parameters, Y_parameters, predict_value):
    # 建立线性回归模型
    regr = linear_model.LinearRegression()
    regr.fit(X_parameters, Y_parameters)
    predict_outcome = regr.predict(predict_value)
    predictions = {}
	#到平面的 截距
    predictions['intercept'] = regr.intercept_
	#到平面的系数 
    predictions['coefficient'] = regr.coef_
	# 预测值
    predictions['predicted_value'] = predict_outcome
    return predictions 
Example #28
Source File: test_estimator_checks.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_check_estimator_clones():
    # check that check_estimator doesn't modify the estimator it receives
    from sklearn.datasets import load_iris
    iris = load_iris()

    for Estimator in [GaussianMixture, LinearRegression,
                      RandomForestClassifier, NMF, SGDClassifier,
                      MiniBatchKMeans]:
        with ignore_warnings(category=(FutureWarning, DeprecationWarning)):
            # when 'est = SGDClassifier()'
            est = Estimator()
            set_checking_parameters(est)
            set_random_state(est)
            # without fitting
            old_hash = _joblib.hash(est)
            check_estimator(est)
        assert_equal(old_hash, _joblib.hash(est))

        with ignore_warnings(category=(FutureWarning, DeprecationWarning)):
            # when 'est = SGDClassifier()'
            est = Estimator()
            set_checking_parameters(est)
            set_random_state(est)
            # with fitting
            est.fit(iris.data + 10, iris.target)
            old_hash = _joblib.hash(est)
            check_estimator(est)
        assert_equal(old_hash, _joblib.hash(est)) 
Example #29
Source File: test_estimator.py    From heamy with MIT License 5 votes vote down vote up
def test_blending():
    model = Regressor(estimator=LinearRegression, parameters={}, dataset=RealDataset)
    _, _, X_t, y_t = model.dataset.split(test_size=0.2)
    ds = model.blend(proportion=0.2)
    assert ds.X_test.shape[0] == model.dataset.X_test.shape[0]
    assert ds.X_train.shape[0] == X_t.shape[0]

    # Check cache
    ds = model.blend(proportion=0.2)
    assert ds.X_test.shape[0] == model.dataset.X_test.shape[0]
    assert ds.X_train.shape[0] == X_t.shape[0] 
Example #30
Source File: test_theil_sen.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_theil_sen_1d():
    X, y, w, c = gen_toy_problem_1d()
    # Check that Least Squares fails
    lstq = LinearRegression().fit(X, y)
    assert_greater(np.abs(lstq.coef_ - w), 0.9)
    # Check that Theil-Sen works
    theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
    assert_array_almost_equal(theil_sen.coef_, w, 1)
    assert_array_almost_equal(theil_sen.intercept_, c, 1)