Python sklearn.svm.SVR Examples
The following are 30
code examples of sklearn.svm.SVR().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.svm
, or try the search function
.

Example #1
Source File: test_bagging.py From Mastering-Elasticsearch-7.0 with MIT License | 9 votes |
def test_regression(): # Check regression for various parameter settings. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data[:50], boston.target[:50], random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "max_features": [0.5, 1.0], "bootstrap": [True, False], "bootstrap_features": [True, False]}) for base_estimator in [None, DummyRegressor(), DecisionTreeRegressor(), KNeighborsRegressor(), SVR(gamma='scale')]: for params in grid: BaggingRegressor(base_estimator=base_estimator, random_state=rng, **params).fit(X_train, y_train).predict(X_test)
Example #2
Source File: friedman_scores.py From mlens with MIT License | 7 votes |
def build_ensemble(**kwargs): """Generate ensemble.""" ens = SuperLearner(**kwargs) prep = {'Standard Scaling': [StandardScaler()], 'Min Max Scaling': [MinMaxScaler()], 'No Preprocessing': []} est = {'Standard Scaling': [ElasticNet(), Lasso(), KNeighborsRegressor()], 'Min Max Scaling': [SVR()], 'No Preprocessing': [RandomForestRegressor(random_state=SEED), GradientBoostingRegressor()]} ens.add(est, prep) ens.add(GradientBoostingRegressor(), meta=True) return ens
Example #3
Source File: test_rfe.py From Mastering-Elasticsearch-7.0 with MIT License | 7 votes |
def test_rfe_min_step(): n_features = 10 X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0) n_samples, n_features = X.shape estimator = SVR(kernel="linear") # Test when floor(step * n_features) <= 0 selector = RFE(estimator, step=0.01) sel = selector.fit(X, y) assert_equal(sel.support_.sum(), n_features // 2) # Test when step is between (0,1) and floor(step * n_features) > 0 selector = RFE(estimator, step=0.20) sel = selector.fit(X, y) assert_equal(sel.support_.sum(), n_features // 2) # Test when step is an integer selector = RFE(estimator, step=5) sel = selector.fit(X, y) assert_equal(sel.support_.sum(), n_features // 2)
Example #4
Source File: testScoreWithAdapaSklearn.py From nyoka with Apache License 2.0 | 7 votes |
def test_21_svr(self): print("\ntest 21 (SVR without preprocessing)\n") X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression() model = SVR() pipeline_obj = Pipeline([ ("model", model) ]) pipeline_obj.fit(X,y) file_name = 'test21sklearn.pmml' skl_to_pmml(pipeline_obj, features, target, file_name) model_name = self.adapa_utility.upload_to_zserver(file_name) predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file) model_pred = pipeline_obj.predict(X_test) self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
Example #5
Source File: regression_svm_alternative.py From practicalDataAnalysisCookbook with GNU General Public License v2.0 | 6 votes |
def regression_svm( x_train, y_train, x_test, y_test, logC, logGamma): ''' Estimate a SVM regressor ''' # create the regressor object svm = sv.SVR(kernel='rbf', C=0.1 * logC, gamma=0.1 * logGamma) # estimate the model svm.fit(x_train,y_train) # decision function decision_values = svm.decision_function(x_test) # return the object return mt.roc_auc(y_test, decision_values) # find the optimal values of C and gamma
Example #6
Source File: test_svm.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_svr_predict(): # Test SVR's decision_function # Sanity check, test that predict implemented in python # returns the same as the one in libsvm X = iris.data y = iris.target # linear kernel reg = svm.SVR(kernel='linear', C=0.1).fit(X, y) dec = np.dot(X, reg.coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) # rbf kernel reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y) rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
Example #7
Source File: scikitlearn.py From sia-cog with MIT License | 6 votes |
def getModels(): result = [] result.append("LinearRegression") result.append("BayesianRidge") result.append("ARDRegression") result.append("ElasticNet") result.append("HuberRegressor") result.append("Lasso") result.append("LassoLars") result.append("Rigid") result.append("SGDRegressor") result.append("SVR") result.append("MLPClassifier") result.append("KNeighborsClassifier") result.append("SVC") result.append("GaussianProcessClassifier") result.append("DecisionTreeClassifier") result.append("RandomForestClassifier") result.append("AdaBoostClassifier") result.append("GaussianNB") result.append("LogisticRegression") result.append("QuadraticDiscriminantAnalysis") return result
Example #8
Source File: test_io_types.py From coremltools with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_support_vector_regressor(self): for dtype in self.number_data_type.keys(): scikit_model = SVR(kernel="rbf") data = self.scikit_data["data"].astype(dtype) target = self.scikit_data["target"].astype(dtype) scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target) test_data = data[0].reshape(1, -1) coreml_model = create_model(spec) try: self.assertEqual( scikit_model.predict(test_data)[0], coreml_model.predict({"data": test_data})["target"], msg="{} != {} for Dtype: {}".format( scikit_model.predict(test_data)[0], coreml_model.predict({"data": test_data})["target"], dtype, ), ) except RuntimeError: print("{} not supported. ".format(dtype))
Example #9
Source File: svm.py From tslearn with BSD 2-Clause "Simplified" License | 6 votes |
def fit(self, X, y, sample_weight=None): """Fit the SVM model according to the given training data. Parameters ---------- X : array-like of shape=(n_ts, sz, d) Time series dataset. y : array-like of shape=(n_ts, ) Time series labels. sample_weight : array-like of shape (n_samples,), default=None Per-sample weights. Rescale C per sample. Higher weights force the classifier to put more emphasis on these points. """ sklearn_X, y = self._preprocess_sklearn(X, y, fit_time=True) self.svm_estimator_ = SVR( C=self.C, kernel=self.estimator_kernel_, degree=self.degree, gamma=self.gamma_, coef0=self.coef0, shrinking=self.shrinking, tol=self.tol, cache_size=self.cache_size, verbose=self.verbose, max_iter=self.max_iter ) self.svm_estimator_.fit(sklearn_X, y, sample_weight=sample_weight) return self
Example #10
Source File: ewa.py From pycobra with MIT License | 6 votes |
def load_default(self, machine_list=['lasso', 'tree', 'ridge', 'random_forest', 'svm']): """ Loads 4 different scikit-learn regressors by default. Parameters ---------- machine_list: optional, list of strings List of default machine names to be loaded. """ for machine in machine_list: try: if machine == 'lasso': self.estimators_['lasso'] = linear_model.LassoCV(random_state=self.random_state).fit(self.X_k_, self.y_k_) if machine == 'tree': self.estimators_['tree'] = DecisionTreeRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_) if machine == 'ridge': self.estimators_['ridge'] = linear_model.RidgeCV().fit(self.X_k_, self.y_k_) if machine == 'random_forest': self.estimators_['random_forest'] = RandomForestRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_) if machine == 'svm': self.estimators_['svm'] = SVR().fit(self.X_k_, self.y_k_) except ValueError: continue
Example #11
Source File: backSPIN.py From BackSPIN with BSD 2-Clause "Simplified" License | 6 votes |
def feature_selection(data,thrs, verbose=False): if thrs>= data.shape[0]: if verbose: print ("Trying to select %i features but only %i genes available." %( thrs, data.shape[0])) print ("Skipping feature selection") return arange(data.shape[0]) ix_genes = arange(data.shape[0]) threeperK = int(ceil(3*data.shape[1]/1000.)) zerotwoperK = int(floor(0.3*data.shape[1]/1000.)) # is at least 1 molecule in 0.3% of thecells, is at least 2 molecules in 0.03% of the cells condition = (sum(data>=1, 1)>= threeperK) & (sum(data>=2, 1)>=zerotwoperK) ix_genes = ix_genes[condition] mu = data[ix_genes,:].mean(1) sigma = data[ix_genes,:].std(1, ddof=1) cv = sigma/mu try: score, mu_linspace, cv_fit , params = fit_CV(mu,cv,fit_method='SVR', verbose=verbose) except ImportError: print ("WARNING: Feature selection was skipped becouse scipy is required. Install scipy to run feature selection.") return arange(data.shape[0]) return ix_genes[argsort(score)[::-1]][:thrs]
Example #12
Source File: FSRegression.py From CausalDiscoveryToolbox with MIT License | 6 votes |
def predict_features(self, df_features, df_target, idx=0, **kwargs): """For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target """ estimator = SVR(kernel='linear') selector = RFECV(estimator, step=1) selector = selector.fit(df_features.values, np.ravel(df_target.values)) return selector.grid_scores_
Example #13
Source File: test_standardization.py From causallib with Apache License 2.0 | 6 votes |
def ensure_many_models(self): from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor from sklearn.neural_network import MLPRegressor from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.svm import SVR, LinearSVR import warnings from sklearn.exceptions import ConvergenceWarning warnings.filterwarnings('ignore', category=ConvergenceWarning) for learner in [GradientBoostingRegressor, RandomForestRegressor, MLPRegressor, ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor, KNeighborsRegressor, SVR, LinearSVR]: learner = learner() learner_name = str(learner).split("(", maxsplit=1)[0] with self.subTest("Test fit using {learner}".format(learner=learner_name)): model = self.estimator.__class__(learner) model.fit(self.data_lin["X"], self.data_lin["a"], self.data_lin["y"]) self.assertTrue(True) # Fit did not crash
Example #14
Source File: models.py From ntua-slp-semeval2018 with MIT License | 6 votes |
def nbow_model(task, embeddings, word2idx): if task == "clf": algo = LogisticRegression(C=0.6, random_state=0, class_weight='balanced') elif task == "reg": algo = SVR(kernel='linear', C=0.6) else: raise ValueError("invalid task!") embeddings_features = NBOWVectorizer(aggregation=["mean"], embeddings=embeddings, word2idx=word2idx, stopwords=False) model = Pipeline([ ('embeddings-feats', embeddings_features), ('normalizer', Normalizer(norm='l2')), ('clf', algo) ]) return model
Example #15
Source File: ILearner.py From aca with MIT License | 6 votes |
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0, tol=0.001, C=1.0, epsilon=0.1, shrinking=True, cache_size=200, verbose=False, max_iter=-1): self.kernel = kernel self.C = C self.gamma = gamma self.coef0 = coef0 self.tol = tol self.epsilon = epsilon self.shrinking = shrinking self.cache_size = cache_size self.verbose = verbose self.max_iter = max_iter self.model = SVR(kernel=self.kernel, C=self.C, gamma=self.gamma, coef0=self.coef0, tol=self.tol, epsilon=self.epsilon, shrinking=self.shrinking, cache_size=self.cache_size, verbose=self.verbose, max_iter=self.max_iter)
Example #16
Source File: test_sklearn_svm_converters.py From sklearn-onnx with MIT License | 6 votes |
def test_convert_svr_linear(self): model, X = self._fit_binary_classification(SVR(kernel="linear")) model_onnx = convert_sklearn( model, "SVR", [("input", FloatTensorType([None, X.shape[1]]))]) nodes = model_onnx.graph.node self.assertIsNotNone(nodes) self._check_attributes( nodes[0], { "coefficients": None, "kernel_params": None, "kernel_type": "LINEAR", "post_transform": None, "rho": None, "support_vectors": None, }, ) dump_data_and_model(X, model, model_onnx, basename="SklearnRegSVRLinear-Dec3")
Example #17
Source File: test_sklearn_svm_converters.py From sklearn-onnx with MIT License | 6 votes |
def test_convert_nusvr(self): model, X = self._fit_binary_classification(NuSVR()) model_onnx = convert_sklearn( model, "SVR", [("input", FloatTensorType([None, X.shape[1]]))]) node = model_onnx.graph.node[0] self.assertIsNotNone(node) self._check_attributes( node, { "coefficients": None, "kernel_params": None, "kernel_type": "RBF", "post_transform": None, "rho": None, "support_vectors": None, }, ) dump_data_and_model(X, model, model_onnx, basename="SklearnRegNuSVR")
Example #18
Source File: test_sklearn_svm_converters.py From sklearn-onnx with MIT License | 6 votes |
def test_convert_svr_int(self): model, X = fit_regression_model( SVR(), is_int=True) model_onnx = convert_sklearn( model, "SVR", [("input", Int64TensorType([None, X.shape[1]]))], ) self.assertIsNotNone(model_onnx) dump_data_and_model( X, model, model_onnx, basename="SklearnSVRInt-Dec4", allow_failure="StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')" )
Example #19
Source File: test_sklearn_svm_converters.py From sklearn-onnx with MIT License | 6 votes |
def test_convert_svr_bool(self): model, X = fit_regression_model( SVR(), is_bool=True) model_onnx = convert_sklearn( model, "SVR", [("input", BooleanTensorType([None, X.shape[1]]))], ) self.assertIsNotNone(model_onnx) dump_data_and_model( X, model, model_onnx, basename="SklearnSVRBool-Dec4", allow_failure="StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')" )
Example #20
Source File: test_sklearn_feature_selection_converters.py From sklearn-onnx with MIT License | 6 votes |
def test_rfecv_int(self): model = RFECV(estimator=SVR(kernel="linear"), cv=3) X = np.array( [[1, 2, 3, 1], [0, 3, 1, 4], [3, 5, 6, 1], [1, 2, 1, 5]], dtype=np.int64, ) y = np.array([0, 1, 0, 1]) model.fit(X, y) model_onnx = convert_sklearn( model, "rfecv", [("input", Int64TensorType([None, X.shape[1]]))]) self.assertTrue(model_onnx is not None) dump_data_and_model( X, model, model_onnx, basename="SklearnRFECV", methods=["transform"], allow_failure="StrictVersion(onnx.__version__)" " < StrictVersion('1.2') or " "StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')", )
Example #21
Source File: hockey_front_to_back.py From kaggle-code with MIT License | 5 votes |
def fit(self, X, y): """load the data in, initiate the models""" self.X = X self.y = y self.opt_XGBoost_reg = xgb.XGBRegressor(**self.opt_xgb_params) self.opt_forest_reg = RandomForestRegressor(**self.opt_rf_params) self.opt_svm_reg = SVR(**self.opt_svm_params) """ fit the models """ self.opt_XGBoost_reg.fit(self.X ,self.y) self.opt_forest_reg.fit(self.X ,self.y) self.opt_svm_reg.fit(self.X ,self.y)
Example #22
Source File: vanilla_model.py From OpenChem with MIT License | 5 votes |
def __init__(self, model_type='classifier', n_ensemble=5): super(SVMQSAR, self).__init__() self.n_ensemble = n_ensemble self.model = [] self.model_type = model_type if self.model_type == 'classifier': for i in range(n_ensemble): self.model.append(SVC()) elif self.model_type == 'regressor': for i in range(n_ensemble): self.model.append(SVR()) else: raise ValueError('invalid value for argument')
Example #23
Source File: svm_regr.py From LearningX with MIT License | 5 votes |
def __init__(self): from sklearn.svm import SVR self.model = SVR()
Example #24
Source File: run_benchmarks.py From deepchem with MIT License | 5 votes |
def experiment(dataset_file, method='GraphConv', split='scaffold'): featurizer = 'ECFP' if method == 'GraphConv': featurizer = 'GraphConv' tasks, datasets, transformers = load_dataset( dataset_file, featurizer=featurizer, split=split) train, val, test = datasets model = None if method == 'GraphConv': model = GraphConvModel(len(tasks), batch_size=BATCH_SIZE, mode="regression") elif method == 'RF': def model_builder_rf(model_dir): sklearn_model = RandomForestRegressor(n_estimators=100) return dc.models.SklearnModel(sklearn_model, model_dir) model = dc.models.SingletaskToMultitask(tasks, model_builder_rf) elif method == 'SVR': def model_builder_svr(model_dir): sklearn_model = svm.SVR(kernel='linear') return dc.models.SklearnModel(sklearn_model, model_dir) model = dc.models.SingletaskToMultitask(tasks, model_builder_svr) return model, train, val, test, transformers #====================================================================== # Run Benchmarks {GC-DNN, SVR, RF}
Example #25
Source File: svr_predict.py From stock-price-prediction with MIT License | 5 votes |
def training(self, c, g): self.model = SVR(kernel= 'rbf', C= c, gamma= g) self.model.fit(self.train_date, self.train_price) # fitting the data points in the models
Example #26
Source File: svr_predict.py From stock-price-prediction with MIT License | 5 votes |
def draw(self): plt.scatter(self.dates, self.prices, color= 'black', label= 'Data') plt.plot(self.dates, self.model.predict(self.dates), color= 'red', label= 'RBF model') plt.xlabel('Date') plt.ylabel('Price') plt.title('SVR test for SPY trimmed data (2014, 2015)') #plt.legend() plt.show()
Example #27
Source File: Sklearn_SVM_Regression.py From Machine-Learning-for-Beginner-by-Python3 with MIT License | 5 votes |
def sk_svm_train(intr, labeltr, inte, kener): clf = svm.SVR(kernel=kener) # 开始训练 clf.fit(intr, labeltr) # 训练输出 tr = clf.predict(intr) # 预测输出 pr = clf.predict(inte) return tr, pr # 结果输出函数
Example #28
Source File: MachineLearning.py From ann-writer with MIT License | 5 votes |
def __init__(self): #self.clf = svm.SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3, gamma=0.0, # kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) #self.clf = linear_model.SGDClassifier() # 26% accuracy self.clf = KNeighborsClassifier() # Bad accuracy (1%) but makes sense #self.clf = svm.SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1, gamma=0.0, #kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
Example #29
Source File: scale_ens.py From mlens with MIT License | 5 votes |
def build_ensemble(kls, **kwargs): """Generate ensemble of class kls.""" ens = kls(**kwargs) ens.add([SVR(), RandomForestRegressor(), GradientBoostingRegressor(), Lasso(copy_X=False), MLPRegressor(shuffle=False, alpha=0.001)]) ens.add_meta(Lasso(copy_X=False)) return ens
Example #30
Source File: scale_cpu.py From mlens with MIT License | 5 votes |
def build_ensemble(kls, **kwargs): """Generate ensemble of class kls.""" ens = kls(**kwargs) ens.add([SVR() for _ in range(4)]) ens.add_meta(SVR()) return ens