Python sklearn.svm.LinearSVR() Examples

The following are 27 code examples for showing how to use sklearn.svm.LinearSVR(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module sklearn.svm , or try the search function .

Example 1
Project: nyoka   Author: nyoka-pmml   File: testScoreWithAdapaSklearn.py    License: Apache License 2.0 6 votes vote down vote up
def test_15_linearsvr(self):
        print("\ntest 15 (linear svr without preprocessing)\n")
        X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()

        model = LinearSVR()
        pipeline_obj = Pipeline([
            ("model", model)
        ])
        pipeline_obj.fit(X,y)
        file_name = 'test15sklearn.pmml'
        
        skl_to_pmml(pipeline_obj, features, target, file_name)
        model_name  = self.adapa_utility.upload_to_zserver(file_name)
        predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
        model_pred = pipeline_obj.predict(X_test)
        self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True) 
Example 2
Project: nyoka   Author: nyoka-pmml   File: testScoreWithAdapaSklearn.py    License: Apache License 2.0 6 votes vote down vote up
def test_16_linearsvr(self):
        print("\ntest 16 (linear svr with preprocessing)\n")
        X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()

        model = LinearSVR()
        pipeline_obj = Pipeline([
            ("scaler", MinMaxScaler()),
            ("model", model)
        ])
        pipeline_obj.fit(X,y)
        file_name = 'test16sklearn.pmml'
        
        skl_to_pmml(pipeline_obj, features, target, file_name)
        model_name  = self.adapa_utility.upload_to_zserver(file_name)
        predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
        model_pred = pipeline_obj.predict(X_test)
        self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True) 
Example 3
Project: reveal-graph-embedding   Author: MKLab-ITI   File: classification.py    License: Apache License 2.0 6 votes vote down vote up
def meta_model_fit(X_train, y_train, svm_hardness, fit_intercept, number_of_threads, regressor_type="LinearSVR"):
    """
    Trains meta-labeler for predicting number of labels for each user.

    Based on: Tang, L., Rajan, S., & Narayanan, V. K. (2009, April).
              Large scale multi-label classification via metalabeler.
              In Proceedings of the 18th international conference on World wide web (pp. 211-220). ACM.
    """
    if regressor_type == "LinearSVR":
        if X_train.shape[0] > X_train.shape[1]:
            dual = False
        else:
            dual = True

        model = LinearSVR(C=svm_hardness, random_state=0, dual=dual,
                          fit_intercept=fit_intercept)
        y_train_meta = y_train.sum(axis=1)
        model.fit(X_train, y_train_meta)
    else:
        print("Invalid regressor type.")
        raise RuntimeError

    return model 
Example 4
Project: CausalDiscoveryToolbox   Author: FenTechSolutions   File: FSRegression.py    License: MIT License 6 votes vote down vote up
def predict_features(self, df_features, df_target, idx=0, C=.1, **kwargs):
        """For one variable, predict its neighbouring nodes.

        Args:
            df_features (pandas.DataFrame):
            df_target (pandas.Series):
            idx (int): (optional) for printing purposes
            kwargs (dict): additional options for algorithms
            C (float): Penalty parameter of the error term

        Returns:
            list: scores of each feature relatively to the target
        """
        lsvc = LinearSVR(C=C).fit(df_features.values, np.ravel(df_target.values))

        return np.abs(lsvc.coef_) 
Example 5
Project: causallib   Author: IBM   File: test_standardization.py    License: Apache License 2.0 6 votes vote down vote up
def ensure_many_models(self):
        from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
        from sklearn.neural_network import MLPRegressor
        from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
        from sklearn.neighbors import KNeighborsRegressor
        from sklearn.svm import SVR, LinearSVR

        import warnings
        from sklearn.exceptions import ConvergenceWarning
        warnings.filterwarnings('ignore', category=ConvergenceWarning)

        for learner in [GradientBoostingRegressor, RandomForestRegressor, MLPRegressor,
                        ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor,
                        KNeighborsRegressor, SVR, LinearSVR]:
            learner = learner()
            learner_name = str(learner).split("(", maxsplit=1)[0]
            with self.subTest("Test fit using {learner}".format(learner=learner_name)):
                model = self.estimator.__class__(learner)
                model.fit(self.data_lin["X"], self.data_lin["a"], self.data_lin["y"])
                self.assertTrue(True)  # Fit did not crash 
Example 6
Project: twitter-stock-recommendation   Author: alvarobartt   File: test_svm.py    License: MIT License 6 votes vote down vote up
def test_svr():
    # Test Support Vector Regression

    diabetes = datasets.load_diabetes()
    for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
                svm.NuSVR(kernel='linear', nu=.4, C=10.),
                svm.SVR(kernel='linear', C=10.),
                svm.LinearSVR(C=10.),
                svm.LinearSVR(C=10.),
                ):
        clf.fit(diabetes.data, diabetes.target)
        assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)

    # non-regression test; previously, BaseLibSVM would check that
    # len(np.unique(y)) < 2, which must only be done for SVC
    svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
    svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data))) 
Example 7
Project: onnxmltools   Author: onnx   File: test_cml_GLMRegressorConverter.py    License: MIT License 6 votes vote down vote up
def test_glm_regressor(self):
        X, y = make_regression(n_features=4, random_state=0)

        lr = LinearRegression()
        lr.fit(X, y)
        lr_coreml = coremltools.converters.sklearn.convert(lr)
        lr_onnx = convert(lr_coreml.get_spec())
        self.assertTrue(lr_onnx is not None)
        dump_data_and_model(X.astype(numpy.float32), lr, lr_onnx, basename="CmlLinearRegression-Dec4")

        svr = LinearSVR()
        svr.fit(X, y)
        svr_coreml = coremltools.converters.sklearn.convert(svr)
        svr_onnx = convert(svr_coreml.get_spec())
        self.assertTrue(svr_onnx is not None)
        dump_data_and_model(X.astype(numpy.float32), svr, svr_onnx, basename="CmlLinearSvr-Dec4") 
Example 8
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_svm.py    License: MIT License 5 votes vote down vote up
def test_svr():
    # Test Support Vector Regression

    diabetes = datasets.load_diabetes()
    for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
                svm.NuSVR(kernel='linear', nu=.4, C=10.),
                svm.SVR(kernel='linear', C=10.),
                svm.LinearSVR(C=10.),
                svm.LinearSVR(C=10.),
                ):
        clf.fit(diabetes.data, diabetes.target)
        assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)

    # non-regression test; previously, BaseLibSVM would check that
    # len(np.unique(y)) < 2, which must only be done for SVC
    svm.SVR(gamma='scale').fit(diabetes.data, np.ones(len(diabetes.data)))
    svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data))) 
Example 9
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_svm.py    License: MIT License 5 votes vote down vote up
def test_linearsvr():
    # check that SVR(kernel='linear') and LinearSVC() give
    # comparable results
    diabetes = datasets.load_diabetes()
    lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
    score1 = lsvr.score(diabetes.data, diabetes.target)

    svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
    score2 = svr.score(diabetes.data, diabetes.target)

    assert_allclose(np.linalg.norm(lsvr.coef_),
                    np.linalg.norm(svr.coef_), 1, 0.0001)
    assert_almost_equal(score1, score2, 2) 
Example 10
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_svm.py    License: MIT License 5 votes vote down vote up
def test_linearsvr_fit_sampleweight():
    # check correct result when sample_weight is 1
    # check that SVR(kernel='linear') and LinearSVC() give
    # comparable results
    diabetes = datasets.load_diabetes()
    n_samples = len(diabetes.target)
    unit_weight = np.ones(n_samples)
    lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
                                    sample_weight=unit_weight)
    score1 = lsvr.score(diabetes.data, diabetes.target)

    lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
    score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)

    assert_allclose(np.linalg.norm(lsvr.coef_),
                    np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
    assert_almost_equal(score1, score2, 2)

    # check that fit(X)  = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
    # X = X1 repeated n1 times, X2 repeated n2 times and so forth
    random_state = check_random_state(0)
    random_weight = random_state.randint(0, 10, n_samples)
    lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
                                           sample_weight=random_weight)
    score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
                               sample_weight=random_weight)

    X_flat = np.repeat(diabetes.data, random_weight, axis=0)
    y_flat = np.repeat(diabetes.target, random_weight, axis=0)
    lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat)
    score4 = lsvr_flat.score(X_flat, y_flat)

    assert_almost_equal(score3, score4, 2) 
Example 11
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_svm.py    License: MIT License 5 votes vote down vote up
def test_linearsvx_loss_penalty_deprecations():
    X, y = [[0.0], [1.0]], [0, 1]

    msg = ("loss='%s' has been deprecated in favor of "
           "loss='%s' as of 0.16. Backward compatibility"
           " for the %s will be removed in %s")

    # LinearSVC
    # loss l1 --> hinge
    assert_warns_message(DeprecationWarning,
                         msg % ("l1", "hinge", "loss='l1'", "1.0"),
                         svm.LinearSVC(loss="l1").fit, X, y)

    # loss l2 --> squared_hinge
    assert_warns_message(DeprecationWarning,
                         msg % ("l2", "squared_hinge", "loss='l2'", "1.0"),
                         svm.LinearSVC(loss="l2").fit, X, y)

    # LinearSVR
    # loss l1 --> epsilon_insensitive
    assert_warns_message(DeprecationWarning,
                         msg % ("l1", "epsilon_insensitive", "loss='l1'",
                                "1.0"),
                         svm.LinearSVR(loss="l1").fit, X, y)

    # loss l2 --> squared_epsilon_insensitive
    assert_warns_message(DeprecationWarning,
                         msg % ("l2", "squared_epsilon_insensitive",
                                "loss='l2'", "1.0"),
                         svm.LinearSVR(loss="l2").fit, X, y) 
Example 12
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_svm.py    License: MIT License 5 votes vote down vote up
def test_linear_svm_convergence_warnings():
    # Test that warnings are raised if model does not converge

    lsvc = svm.LinearSVC(random_state=0, max_iter=2)
    assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
    assert_equal(lsvc.n_iter_, 2)

    lsvr = svm.LinearSVR(random_state=0, max_iter=2)
    assert_warns(ConvergenceWarning, lsvr.fit, iris.data, iris.target)
    assert_equal(lsvr.n_iter_, 2) 
Example 13
Project: driverlessai-recipes   Author: h2oai   File: linear_svm.py    License: Apache License 2.0 5 votes vote down vote up
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs):
        X = dt.Frame(X)

        orig_cols = list(X.names)

        if self.num_classes >= 2:
            mod = linsvc(random_state=self.random_state, C=self.params["C"], penalty=self.params["penalty"],
                         loss=self.params["loss"], dual=self.params["dual"])
            kf = StratifiedKFold(n_splits=3, shuffle=True, random_state=self.random_state)
            model = CalibratedClassifierCV(base_estimator=mod, method='isotonic', cv=kf)
            lb = LabelEncoder()
            lb.fit(self.labels)
            y = lb.transform(y)
        else:
            model = LinearSVR(epsilon=self.params["epsilon"], C=self.params["C"], loss=self.params["loss"],
                              dual=self.params["dual"], random_state=self.random_state)
        self.means = dict()
        self.standard_scaler = StandardScaler()
        for col in X.names:
            XX = X[:, col]
            self.means[col] = XX.mean1()
            if self.means[col] is None:
                self.means[col] = 0
            XX.replace(None, self.means[col])
            X[:, col] = XX
            assert X[dt.isna(dt.f[col]), col].nrows == 0
        X = X.to_numpy()
        X = self.standard_scaler.fit_transform(X)
        model.fit(X, y, sample_weight=sample_weight)
        importances = np.array([0.0 for k in range(len(orig_cols))])
        if self.num_classes >= 2:
            for classifier in model.calibrated_classifiers_:
                importances += np.array(abs(classifier.base_estimator.get_coeff()))
        else:
            importances += np.array(abs(model.coef_[0]))

        self.set_model_properties(model=model,
                                  features=orig_cols,
                                  importances=importances.tolist(),  # abs(model.coef_[0])
                                  iterations=0) 
Example 14
Project: nyoka   Author: nyoka-pmml   File: _validateSchema.py    License: Apache License 2.0 5 votes vote down vote up
def test_validate_sklearn_linarsvr_models_regression(self):
        model = LinearSVR()
        pipe = Pipeline([
            ('model',model)
        ])
        pipe.fit(self.X_reg, self.y_reg)
        file_name = 'linearsvr_model_regression.pmml'
        skl_to_pmml(pipe, self.features_reg, 'target',file_name)
        self.assertEqual(self.schema.is_valid(file_name), True) 
Example 15
Project: nyoka   Author: nyoka-pmml   File: test_skl_to_pmml_UnitTest.py    License: Apache License 2.0 5 votes vote down vote up
def test_sklearn_21(self):
        df = pd.read_csv('nyoka/tests/auto-mpg.csv')
        X = df.drop(['mpg', 'car name'], axis=1)
        y = df['mpg']

        features = X.columns
        target = 'mpg'
        f_name = "linearsvr_pmml.pmml"

        model = LinearSVR()
        pipeline_obj = Pipeline([
            ('model', model)
        ])

        pipeline_obj.fit(X, y)
        skl_to_pmml(pipeline_obj, features, target, f_name)
        pmml_obj = pml.parse(f_name, True)

        # 1
        self.assertEqual(os.path.isfile(f_name), True)

        # 2
        self.assertEqual("{:.16f}".format(model.intercept_[0]),
                         "{:.16f}".format(pmml_obj.RegressionModel[0].RegressionTable[0].intercept))

        # 3
        reg_tab = pmml_obj.RegressionModel[0].RegressionTable[0].NumericPredictor
        for model_val, pmml_val in zip(model.coef_, reg_tab):
            self.assertEqual("{:.16f}".format(model_val), "{:.16f}".format(pmml_val.coefficient)) 
Example 16
Project: coremltools   Author: apple   File: _LinearSVR.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(model, features, target):
    """Convert a LinearSVR model to the protobuf spec.
    Parameters
    ----------
    model: LinearSVR
        A trained LinearSVR model.

    feature_names: [str]
        Name of the input columns.

    target: str
        Name of the output column.

    Returns
    -------
    model_spec: An object of type Model_pb.
        Protobuf representation of the model
    """
    if not (_HAS_SKLEARN):
        raise RuntimeError(
            "scikit-learn not found. scikit-learn conversion API is disabled."
        )

    # Check the scikit learn model
    _sklearn_util.check_expected_type(model, _LinearSVR)
    _sklearn_util.check_fitted(model, lambda m: hasattr(m, "coef_"))

    return _MLModel(_linear_regression._convert(model, features, target)) 
Example 17
def test_linear_svr_evaluation(self):
        """
        Check that the evaluation results are the same in scikit learn and coremltools
        """
        ARGS = [
            {},
            {"C": 0.5, "epsilon": 0.25},
            {"dual": False, "loss": "squared_epsilon_insensitive"},
            {"tol": 0.005},
            {"fit_intercept": False},
            {"intercept_scaling": 1.5},
        ]

        input_names = self.scikit_data.feature_names
        df = pd.DataFrame(self.scikit_data.data, columns=input_names)

        for cur_args in ARGS:
            print(cur_args)
            cur_model = LinearSVR(**cur_args)
            cur_model.fit(self.scikit_data["data"], self.scikit_data["target"])
            spec = convert(cur_model, input_names, "target")

            df["prediction"] = cur_model.predict(self.scikit_data.data)

            metrics = evaluate_regressor(spec, df)
            self.assertAlmostEquals(metrics["max_error"], 0) 
Example 18
Project: pycobra   Author: bhargavvader   File: cobra.py    License: MIT License 5 votes vote down vote up
def load_default(self, machine_list='basic'):
        """
        Loads 4 different scikit-learn regressors by default. The advanced list adds more machines. 

        Parameters
        ----------
        machine_list: optional, list of strings
            List of default machine names to be loaded.
        Returns
        -------
        self : returns an instance of self.
        """

        if machine_list == 'basic':
            machine_list = ['tree', 'ridge', 'random_forest', 'svm']
        if machine_list == 'advanced':
            machine_list=['lasso', 'tree', 'ridge', 'random_forest', 'svm', 'bayesian_ridge', 'sgd']

        self.estimators_ = {}
        for machine in machine_list:
            try:
                if machine == 'lasso':
                    self.estimators_['lasso'] = linear_model.LassoCV(random_state=self.random_state).fit(self.X_k_, self.y_k_)
                if machine == 'tree':
                    self.estimators_['tree'] = DecisionTreeRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
                if machine == 'ridge':
                    self.estimators_['ridge'] = linear_model.RidgeCV().fit(self.X_k_, self.y_k_)
                if machine == 'random_forest':
                    self.estimators_['random_forest'] = RandomForestRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
                if machine == 'svm':
                    self.estimators_['svm'] = LinearSVR(random_state=self.random_state).fit(self.X_k_, self.y_k_)
                if machine == 'sgd':
                    self.estimators_['sgd'] = linear_model.SGDRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
                if machine == 'bayesian_ridge':
                    self.estimators_['bayesian_ridge'] = linear_model.BayesianRidge().fit(self.X_k_, self.y_k_)
            except ValueError:
                continue
        return self 
Example 19
Project: causallib   Author: IBM   File: test_doublyrobust.py    License: Apache License 2.0 5 votes vote down vote up
def ensure_many_models(self):
        from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
        from sklearn.neural_network import MLPRegressor
        from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
        from sklearn.neighbors import KNeighborsRegressor
        from sklearn.svm import SVR, LinearSVR

        from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
        from sklearn.neural_network import MLPClassifier
        from sklearn.neighbors import KNeighborsClassifier

        from sklearn.exceptions import ConvergenceWarning
        warnings.filterwarnings('ignore', category=ConvergenceWarning)

        data = self.create_uninformative_ox_dataset()
        for propensity_learner in [GradientBoostingClassifier(n_estimators=10),
                                   RandomForestClassifier(n_estimators=100),
                                   MLPClassifier(hidden_layer_sizes=(5,)),
                                   KNeighborsClassifier(n_neighbors=20)]:
            weight_model = IPW(propensity_learner)
            propensity_learner_name = str(propensity_learner).split("(", maxsplit=1)[0]
            for outcome_learner in [GradientBoostingRegressor(n_estimators=10), RandomForestRegressor(n_estimators=10),
                                    MLPRegressor(hidden_layer_sizes=(5,)),
                                    ElasticNet(), RANSACRegressor(), HuberRegressor(), PassiveAggressiveRegressor(),
                                    KNeighborsRegressor(), SVR(), LinearSVR()]:
                outcome_learner_name = str(outcome_learner).split("(", maxsplit=1)[0]
                outcome_model = Standardization(outcome_learner)

                with self.subTest("Test fit & predict using {} & {}".format(propensity_learner_name,
                                                                            outcome_learner_name)):
                    model = self.estimator.__class__(outcome_model, weight_model)
                    model.fit(data["X"], data["a"], data["y"], refit_weight_model=False)
                    model.estimate_individual_outcome(data["X"], data["a"])
                    self.assertTrue(True)  # Fit did not crash 
Example 20
Project: sklearn-onnx   Author: onnx   File: test_sklearn_glm_regressor_converter.py    License: MIT License 5 votes vote down vote up
def test_model_linear_svr(self):
        model, X = fit_regression_model(LinearSVR())
        model_onnx = convert_sklearn(
            model, "linear SVR",
            [("input", FloatTensorType([None, X.shape[1]]))])
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            basename="SklearnLinearSvr-Dec4",
            allow_failure="StrictVersion("
            "onnxruntime.__version__)"
            "<= StrictVersion('0.2.1')",
        ) 
Example 21
Project: sklearn-onnx   Author: onnx   File: test_sklearn_glm_regressor_converter.py    License: MIT License 5 votes vote down vote up
def test_model_linear_svr_int(self):
        model, X = fit_regression_model(LinearSVR(), is_int=True)
        model_onnx = convert_sklearn(
            model, "linear SVR",
            [("input", Int64TensorType([None, X.shape[1]]))])
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            basename="SklearnLinearSvrInt-Dec4",
            allow_failure="StrictVersion("
            "onnxruntime.__version__)"
            "<= StrictVersion('0.2.1')",
        ) 
Example 22
Project: sklearn-onnx   Author: onnx   File: test_sklearn_glm_regressor_converter.py    License: MIT License 5 votes vote down vote up
def test_model_linear_svr_bool(self):
        model, X = fit_regression_model(LinearSVR(), is_bool=True)
        model_onnx = convert_sklearn(
            model, "linear SVR",
            [("input", BooleanTensorType([None, X.shape[1]]))])
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            basename="SklearnLinearSVRBool",
            allow_failure="StrictVersion("
            "onnxruntime.__version__)"
            "<= StrictVersion('0.2.1')",
        ) 
Example 23
Project: twitter-stock-recommendation   Author: alvarobartt   File: test_svm.py    License: MIT License 5 votes vote down vote up
def test_linearsvr():
    # check that SVR(kernel='linear') and LinearSVC() give
    # comparable results
    diabetes = datasets.load_diabetes()
    lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
    score1 = lsvr.score(diabetes.data, diabetes.target)

    svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
    score2 = svr.score(diabetes.data, diabetes.target)

    assert_allclose(np.linalg.norm(lsvr.coef_),
                    np.linalg.norm(svr.coef_), 1, 0.0001)
    assert_almost_equal(score1, score2, 2) 
Example 24
Project: twitter-stock-recommendation   Author: alvarobartt   File: test_svm.py    License: MIT License 5 votes vote down vote up
def test_linearsvr_fit_sampleweight():
    # check correct result when sample_weight is 1
    # check that SVR(kernel='linear') and LinearSVC() give
    # comparable results
    diabetes = datasets.load_diabetes()
    n_samples = len(diabetes.target)
    unit_weight = np.ones(n_samples)
    lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
                                    sample_weight=unit_weight)
    score1 = lsvr.score(diabetes.data, diabetes.target)

    lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
    score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)

    assert_allclose(np.linalg.norm(lsvr.coef_),
                    np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
    assert_almost_equal(score1, score2, 2)

    # check that fit(X)  = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
    # X = X1 repeated n1 times, X2 repeated n2 times and so forth
    random_state = check_random_state(0)
    random_weight = random_state.randint(0, 10, n_samples)
    lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
                                           sample_weight=random_weight)
    score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
                               sample_weight=random_weight)

    X_flat = np.repeat(diabetes.data, random_weight, axis=0)
    y_flat = np.repeat(diabetes.target, random_weight, axis=0)
    lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat)
    score4 = lsvr_flat.score(X_flat, y_flat)

    assert_almost_equal(score3, score4, 2) 
Example 25
Project: twitter-stock-recommendation   Author: alvarobartt   File: test_svm.py    License: MIT License 5 votes vote down vote up
def test_linearsvx_loss_penalty_deprecations():
    X, y = [[0.0], [1.0]], [0, 1]

    msg = ("loss='%s' has been deprecated in favor of "
           "loss='%s' as of 0.16. Backward compatibility"
           " for the %s will be removed in %s")

    # LinearSVC
    # loss l1 --> hinge
    assert_warns_message(DeprecationWarning,
                         msg % ("l1", "hinge", "loss='l1'", "1.0"),
                         svm.LinearSVC(loss="l1").fit, X, y)

    # loss l2 --> squared_hinge
    assert_warns_message(DeprecationWarning,
                         msg % ("l2", "squared_hinge", "loss='l2'", "1.0"),
                         svm.LinearSVC(loss="l2").fit, X, y)

    # LinearSVR
    # loss l1 --> epsilon_insensitive
    assert_warns_message(DeprecationWarning,
                         msg % ("l1", "epsilon_insensitive", "loss='l1'",
                                "1.0"),
                         svm.LinearSVR(loss="l1").fit, X, y)

    # loss l2 --> squared_epsilon_insensitive
    assert_warns_message(DeprecationWarning,
                         msg % ("l2", "squared_epsilon_insensitive",
                                "loss='l2'", "1.0"),
                         svm.LinearSVR(loss="l2").fit, X, y) 
Example 26
Project: twitter-stock-recommendation   Author: alvarobartt   File: test_svm.py    License: MIT License 5 votes vote down vote up
def test_svr_coef_sign():
    # Test that SVR(kernel="linear") has coef_ with the right sign.
    # Non-regression test for #2933.
    X = np.random.RandomState(21).randn(10, 3)
    y = np.random.RandomState(12).randn(10)

    for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
                svm.LinearSVR()]:
        svr.fit(X, y)
        assert_array_almost_equal(svr.predict(X),
                                  np.dot(X, svr.coef_.ravel()) + svr.intercept_) 
Example 27
Project: causallib   Author: IBM   File: test_doublyrobust.py    License: Apache License 2.0 4 votes vote down vote up
def test_many_models(self):
        from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
        from sklearn.neural_network import MLPRegressor
        from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
        from sklearn.neighbors import KNeighborsRegressor
        from sklearn.svm import SVR, LinearSVR

        from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
        from sklearn.neural_network import MLPClassifier
        from sklearn.neighbors import KNeighborsClassifier

        from sklearn.exceptions import ConvergenceWarning
        warnings.filterwarnings('ignore', category=ConvergenceWarning)

        data = self.create_uninformative_ox_dataset()

        for propensity_learner in [GradientBoostingClassifier(n_estimators=10),
                                   RandomForestClassifier(n_estimators=100),
                                   MLPClassifier(hidden_layer_sizes=(5,)),
                                   KNeighborsClassifier(n_neighbors=20)]:
            weight_model = IPW(propensity_learner)
            propensity_learner_name = str(propensity_learner).split("(", maxsplit=1)[0]
            for outcome_learner in [GradientBoostingRegressor(n_estimators=10),
                                    RandomForestRegressor(n_estimators=10),
                                    RANSACRegressor(), HuberRegressor(), SVR(), LinearSVR()]:
                outcome_learner_name = str(outcome_learner).split("(", maxsplit=1)[0]
                outcome_model = Standardization(outcome_learner)

                with self.subTest("Test fit using {} & {}".format(propensity_learner_name, outcome_learner_name)):
                    model = self.estimator.__class__(outcome_model, weight_model)
                    model.fit(data["X"], data["a"], data["y"], refit_weight_model=False)
                    self.assertTrue(True)  # Fit did not crash

            for outcome_learner in [MLPRegressor(hidden_layer_sizes=(5,)), ElasticNet(),
                                    PassiveAggressiveRegressor(), KNeighborsRegressor()]:
                outcome_learner_name = str(outcome_learner).split("(", maxsplit=1)[0]
                outcome_model = Standardization(outcome_learner)

                with self.subTest("Test fit using {} & {}".format(propensity_learner_name, outcome_learner_name)):
                    model = self.estimator.__class__(outcome_model, weight_model)
                    with self.assertRaises(TypeError):
                        # Joffe forces learning with sample_weights,
                        # not all ML models support that and so calling should fail
                        model.fit(data["X"], data["a"], data["y"], refit_weight_model=False)