Python sklearn.svm.SVR Examples

The following are 30 code examples of sklearn.svm.SVR(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sklearn.svm , or try the search function .
Example #1
Source File: test_bagging.py    From Mastering-Elasticsearch-7.0 with MIT License 9 votes vote down vote up
def test_regression():
    # Check regression for various parameter settings.
    rng = check_random_state(0)
    X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
                                                        boston.target[:50],
                                                        random_state=rng)
    grid = ParameterGrid({"max_samples": [0.5, 1.0],
                          "max_features": [0.5, 1.0],
                          "bootstrap": [True, False],
                          "bootstrap_features": [True, False]})

    for base_estimator in [None,
                           DummyRegressor(),
                           DecisionTreeRegressor(),
                           KNeighborsRegressor(),
                           SVR(gamma='scale')]:
        for params in grid:
            BaggingRegressor(base_estimator=base_estimator,
                             random_state=rng,
                             **params).fit(X_train, y_train).predict(X_test) 
Example #2
Source File: testScoreWithAdapaSklearn.py    From nyoka with Apache License 2.0 7 votes vote down vote up
def test_21_svr(self):
        print("\ntest 21 (SVR without preprocessing)\n")
        X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()

        model = SVR()
        pipeline_obj = Pipeline([
            ("model", model)
        ])
        pipeline_obj.fit(X,y)
        file_name = 'test21sklearn.pmml'
        
        skl_to_pmml(pipeline_obj, features, target, file_name)
        model_name  = self.adapa_utility.upload_to_zserver(file_name)
        predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
        model_pred = pipeline_obj.predict(X_test)
        self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True) 
Example #3
Source File: test_rfe.py    From Mastering-Elasticsearch-7.0 with MIT License 7 votes vote down vote up
def test_rfe_min_step():
    n_features = 10
    X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
    n_samples, n_features = X.shape
    estimator = SVR(kernel="linear")

    # Test when floor(step * n_features) <= 0
    selector = RFE(estimator, step=0.01)
    sel = selector.fit(X, y)
    assert_equal(sel.support_.sum(), n_features // 2)

    # Test when step is between (0,1) and floor(step * n_features) > 0
    selector = RFE(estimator, step=0.20)
    sel = selector.fit(X, y)
    assert_equal(sel.support_.sum(), n_features // 2)

    # Test when step is an integer
    selector = RFE(estimator, step=5)
    sel = selector.fit(X, y)
    assert_equal(sel.support_.sum(), n_features // 2) 
Example #4
Source File: friedman_scores.py    From mlens with MIT License 7 votes vote down vote up
def build_ensemble(**kwargs):
    """Generate ensemble."""

    ens = SuperLearner(**kwargs)
    prep = {'Standard Scaling': [StandardScaler()],
            'Min Max Scaling': [MinMaxScaler()],
            'No Preprocessing': []}

    est = {'Standard Scaling':
               [ElasticNet(), Lasso(), KNeighborsRegressor()],
           'Min Max Scaling':
               [SVR()],
           'No Preprocessing':
               [RandomForestRegressor(random_state=SEED),
                GradientBoostingRegressor()]}

    ens.add(est, prep)

    ens.add(GradientBoostingRegressor(), meta=True)

    return ens 
Example #5
Source File: test_sklearn_svm_converters.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_convert_svr_linear(self):
        model, X = self._fit_binary_classification(SVR(kernel="linear"))
        model_onnx = convert_sklearn(
            model, "SVR", [("input", FloatTensorType([None, X.shape[1]]))])
        nodes = model_onnx.graph.node
        self.assertIsNotNone(nodes)
        self._check_attributes(
            nodes[0],
            {
                "coefficients": None,
                "kernel_params": None,
                "kernel_type": "LINEAR",
                "post_transform": None,
                "rho": None,
                "support_vectors": None,
            },
        )
        dump_data_and_model(X,
                            model,
                            model_onnx,
                            basename="SklearnRegSVRLinear-Dec3") 
Example #6
Source File: test_sklearn_svm_converters.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_convert_svr_int(self):
        model, X = fit_regression_model(
            SVR(), is_int=True)
        model_onnx = convert_sklearn(
            model,
            "SVR",
            [("input", Int64TensorType([None, X.shape[1]]))],
        )
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            basename="SklearnSVRInt-Dec4",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')"
        ) 
Example #7
Source File: ILearner.py    From aca with MIT License 6 votes vote down vote up
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
                 tol=0.001, C=1.0, epsilon=0.1, shrinking=True, cache_size=200,
                 verbose=False, max_iter=-1):
        self.kernel = kernel
        self.C = C
        self.gamma = gamma
        self.coef0 = coef0
        self.tol = tol
        self.epsilon = epsilon
        self.shrinking = shrinking
        self.cache_size = cache_size
        self.verbose = verbose
        self.max_iter = max_iter
        self.model = SVR(kernel=self.kernel, C=self.C, gamma=self.gamma,
                         coef0=self.coef0, tol=self.tol, epsilon=self.epsilon,
                         shrinking=self.shrinking, cache_size=self.cache_size,
                         verbose=self.verbose, max_iter=self.max_iter) 
Example #8
Source File: models.py    From ntua-slp-semeval2018 with MIT License 6 votes vote down vote up
def nbow_model(task, embeddings, word2idx):
    if task == "clf":
        algo = LogisticRegression(C=0.6, random_state=0,
                                  class_weight='balanced')
    elif task == "reg":
        algo = SVR(kernel='linear', C=0.6)
    else:
        raise ValueError("invalid task!")

    embeddings_features = NBOWVectorizer(aggregation=["mean"],
                                         embeddings=embeddings,
                                         word2idx=word2idx,
                                         stopwords=False)

    model = Pipeline([
        ('embeddings-feats', embeddings_features),
        ('normalizer', Normalizer(norm='l2')),
        ('clf', algo)
    ])

    return model 
Example #9
Source File: test_sklearn_svm_converters.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_convert_nusvr(self):
        model, X = self._fit_binary_classification(NuSVR())
        model_onnx = convert_sklearn(
            model, "SVR", [("input", FloatTensorType([None, X.shape[1]]))])
        node = model_onnx.graph.node[0]
        self.assertIsNotNone(node)
        self._check_attributes(
            node,
            {
                "coefficients": None,
                "kernel_params": None,
                "kernel_type": "RBF",
                "post_transform": None,
                "rho": None,
                "support_vectors": None,
            },
        )
        dump_data_and_model(X, model, model_onnx,
                            basename="SklearnRegNuSVR") 
Example #10
Source File: test_sklearn_svm_converters.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_convert_svr_bool(self):
        model, X = fit_regression_model(
            SVR(), is_bool=True)
        model_onnx = convert_sklearn(
            model,
            "SVR",
            [("input", BooleanTensorType([None, X.shape[1]]))],
        )
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            basename="SklearnSVRBool-Dec4",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')"
        ) 
Example #11
Source File: test_sklearn_feature_selection_converters.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_rfecv_int(self):
        model = RFECV(estimator=SVR(kernel="linear"), cv=3)
        X = np.array(
            [[1, 2, 3, 1], [0, 3, 1, 4], [3, 5, 6, 1], [1, 2, 1, 5]],
            dtype=np.int64,
        )
        y = np.array([0, 1, 0, 1])
        model.fit(X, y)
        model_onnx = convert_sklearn(
            model, "rfecv", [("input", Int64TensorType([None, X.shape[1]]))])
        self.assertTrue(model_onnx is not None)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            basename="SklearnRFECV",
            methods=["transform"],
            allow_failure="StrictVersion(onnx.__version__)"
                          " < StrictVersion('1.2') or "
                          "StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
Example #12
Source File: regression_svm_alternative.py    From practicalDataAnalysisCookbook with GNU General Public License v2.0 6 votes vote down vote up
def regression_svm(
    x_train, y_train, x_test, y_test, logC, logGamma):
    '''
        Estimate a SVM regressor
    '''
    # create the regressor object
    svm = sv.SVR(kernel='rbf', 
        C=0.1 * logC, gamma=0.1 * logGamma)

    # estimate the model
    svm.fit(x_train,y_train)

    # decision function
    decision_values = svm.decision_function(x_test)

    # return the object
    return mt.roc_auc(y_test, decision_values)

# find the optimal values of C and gamma 
Example #13
Source File: test_standardization.py    From causallib with Apache License 2.0 6 votes vote down vote up
def ensure_many_models(self):
        from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
        from sklearn.neural_network import MLPRegressor
        from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
        from sklearn.neighbors import KNeighborsRegressor
        from sklearn.svm import SVR, LinearSVR

        import warnings
        from sklearn.exceptions import ConvergenceWarning
        warnings.filterwarnings('ignore', category=ConvergenceWarning)

        for learner in [GradientBoostingRegressor, RandomForestRegressor, MLPRegressor,
                        ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor,
                        KNeighborsRegressor, SVR, LinearSVR]:
            learner = learner()
            learner_name = str(learner).split("(", maxsplit=1)[0]
            with self.subTest("Test fit using {learner}".format(learner=learner_name)):
                model = self.estimator.__class__(learner)
                model.fit(self.data_lin["X"], self.data_lin["a"], self.data_lin["y"])
                self.assertTrue(True)  # Fit did not crash 
Example #14
Source File: FSRegression.py    From CausalDiscoveryToolbox with MIT License 6 votes vote down vote up
def predict_features(self, df_features, df_target, idx=0, **kwargs):
        """For one variable, predict its neighbouring nodes.

        Args:
            df_features (pandas.DataFrame):
            df_target (pandas.Series):
            idx (int): (optional) for printing purposes
            kwargs (dict): additional options for algorithms

        Returns:
            list: scores of each feature relatively to the target
        """
        estimator = SVR(kernel='linear')
        selector = RFECV(estimator, step=1)
        selector = selector.fit(df_features.values, np.ravel(df_target.values))

        return selector.grid_scores_ 
Example #15
Source File: backSPIN.py    From BackSPIN with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def feature_selection(data,thrs, verbose=False):
    if thrs>= data.shape[0]:
        if verbose:
            print ("Trying to select %i features but only %i genes available." %( thrs, data.shape[0]))
            print ("Skipping feature selection")
        return arange(data.shape[0])
    ix_genes = arange(data.shape[0])
    threeperK = int(ceil(3*data.shape[1]/1000.))
    zerotwoperK = int(floor(0.3*data.shape[1]/1000.))
    # is at least 1 molecule in 0.3% of thecells, is at least 2 molecules in 0.03% of the cells
    condition = (sum(data>=1, 1)>= threeperK) & (sum(data>=2, 1)>=zerotwoperK) 
    ix_genes = ix_genes[condition]
    
    mu = data[ix_genes,:].mean(1)
    sigma = data[ix_genes,:].std(1, ddof=1)
    cv = sigma/mu

    try:
        score, mu_linspace, cv_fit , params = fit_CV(mu,cv,fit_method='SVR', verbose=verbose)
    except ImportError:
        print ("WARNING: Feature selection was skipped becouse scipy is required. Install scipy to run feature selection.")
        return arange(data.shape[0])
 
    return ix_genes[argsort(score)[::-1]][:thrs] 
Example #16
Source File: ewa.py    From pycobra with MIT License 6 votes vote down vote up
def load_default(self, machine_list=['lasso', 'tree', 'ridge', 'random_forest', 'svm']):
        """
        Loads 4 different scikit-learn regressors by default.

        Parameters
        ----------
        machine_list: optional, list of strings
            List of default machine names to be loaded.

        """
        for machine in machine_list:
            try:
                if machine == 'lasso':
                    self.estimators_['lasso'] = linear_model.LassoCV(random_state=self.random_state).fit(self.X_k_, self.y_k_)
                if machine == 'tree':
                    self.estimators_['tree'] = DecisionTreeRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
                if machine == 'ridge':
                    self.estimators_['ridge'] = linear_model.RidgeCV().fit(self.X_k_, self.y_k_)
                if machine == 'random_forest':
                    self.estimators_['random_forest'] = RandomForestRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
                if machine == 'svm':
                    self.estimators_['svm'] = SVR().fit(self.X_k_, self.y_k_)
            except ValueError:
                continue 
Example #17
Source File: test_svm.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_svr_predict():
    # Test SVR's decision_function
    # Sanity check, test that predict implemented in python
    # returns the same as the one in libsvm

    X = iris.data
    y = iris.target

    # linear kernel
    reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)

    dec = np.dot(X, reg.coef_.T) + reg.intercept_
    assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())

    # rbf kernel
    reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)

    rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
    dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
    assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel()) 
Example #18
Source File: svm.py    From tslearn with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def fit(self, X, y, sample_weight=None):
        """Fit the SVM model according to the given training data.

        Parameters
        ----------
        X : array-like of shape=(n_ts, sz, d)
            Time series dataset.
            
        y : array-like of shape=(n_ts, )
            Time series labels.
            
        sample_weight : array-like of shape (n_samples,), default=None
            Per-sample weights. Rescale C per sample. Higher weights force the 
            classifier to put more emphasis on these points.
        """
        sklearn_X, y = self._preprocess_sklearn(X, y, fit_time=True)

        self.svm_estimator_ = SVR(
            C=self.C, kernel=self.estimator_kernel_, degree=self.degree,
            gamma=self.gamma_, coef0=self.coef0, shrinking=self.shrinking,
            tol=self.tol, cache_size=self.cache_size,
            verbose=self.verbose, max_iter=self.max_iter
        )
        self.svm_estimator_.fit(sklearn_X, y, sample_weight=sample_weight)
        return self 
Example #19
Source File: scikitlearn.py    From sia-cog with MIT License 6 votes vote down vote up
def getModels():
    result = []
    result.append("LinearRegression")
    result.append("BayesianRidge")
    result.append("ARDRegression")
    result.append("ElasticNet")
    result.append("HuberRegressor")
    result.append("Lasso")
    result.append("LassoLars")
    result.append("Rigid")
    result.append("SGDRegressor")
    result.append("SVR")
    result.append("MLPClassifier")
    result.append("KNeighborsClassifier")
    result.append("SVC")
    result.append("GaussianProcessClassifier")
    result.append("DecisionTreeClassifier")
    result.append("RandomForestClassifier")
    result.append("AdaBoostClassifier")
    result.append("GaussianNB")
    result.append("LogisticRegression")
    result.append("QuadraticDiscriminantAnalysis")
    return result 
Example #20
Source File: test_io_types.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_support_vector_regressor(self):
        for dtype in self.number_data_type.keys():
            scikit_model = SVR(kernel="rbf")
            data = self.scikit_data["data"].astype(dtype)
            target = self.scikit_data["target"].astype(dtype)
            scikit_model, spec = self._sklearn_setup(scikit_model, dtype, data, target)
            test_data = data[0].reshape(1, -1)
            coreml_model = create_model(spec)
            try:
                self.assertEqual(
                    scikit_model.predict(test_data)[0],
                    coreml_model.predict({"data": test_data})["target"],
                    msg="{} != {} for Dtype: {}".format(
                        scikit_model.predict(test_data)[0],
                        coreml_model.predict({"data": test_data})["target"],
                        dtype,
                    ),
                )
            except RuntimeError:
                print("{} not supported. ".format(dtype)) 
Example #21
Source File: bench_ml.py    From scikit-optimize with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def load_data_target(name):
    """
    Loads data and target given the name of the dataset.
    """
    if name == "Boston":
        data = load_boston()
    elif name == "Housing":
        data = fetch_california_housing()
        dataset_size = 1000 # this is necessary so that SVR does not slow down too much
        data["data"] = data["data"][:dataset_size]
        data["target"] =data["target"][:dataset_size]
    elif name == "digits":
        data = load_digits()
    elif name == "Climate Model Crashes":
        try:
            data = fetch_mldata("climate-model-simulation-crashes")
        except HTTPError as e:
            url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00252/pop_failures.dat"
            data = urlopen(url).read().split('\n')[1:]
            data = [[float(v) for v in d.split()] for d in data]
            samples = np.array(data)
            data = dict()
            data["data"] = samples[:, :-1]
            data["target"] = np.array(samples[:, -1], dtype=np.int)
    else:
        raise ValueError("dataset not supported.")
    return data["data"], data["target"] 
Example #22
Source File: baseline_ridi.py    From ronin with GNU General Public License v3.0 5 votes vote down vote up
def grid_search(args):
    features, targets, _ = get_dataset_from_list(args.root_dir, args.train_list, args, mode='train')
    print('Number of training samples:', features.shape[0])

    # Data normalization
    mean, std = np.mean(features, axis=0), np.std(features, axis=0)
    features = (features - mean) / std

    if args.c < 0:
        c_opt = [0.1, 1.0, 10.0, 100.0]
    else:
        c_opt = [args.c]
    search_dict = {'C': c_opt,
                   'epsilon': [1e-04, 1e-03, 1e-02, 1e-01],
                   'gamma': ['auto']}
    start_t = time.time()

    best_params = {}
    for i in range(targets.shape[1]):
        print('Channel {}'.format(i))
        grid_searcher = GridSearchCV(
            svm.SVR(), search_dict, cv=3, scoring='neg_mean_squared_error', n_jobs=args.num_workers, verbose=2)
        grid_searcher.fit(features, targets[:, i])
        best_params['chn_{}'.format(i)] = {'param': grid_searcher.best_params_, 'score': grid_searcher.best_score_}
    end_t = time.time()
    print('Time usage: {:.3f}'.format(end_t - start_t))
    print(best_params)

    if args.out_path is not None:
        best_params = {'best_params': best_params}
        with open(args.out_path, 'w') as f:
            json.dump(best_params, f) 
Example #23
Source File: test_sklearn_svm_converters.py    From sklearn-onnx with MIT License 5 votes vote down vote up
def test_convert_nusvr_default(self):
        model, X = self._fit_binary_classification(NuSVR())
        model_onnx = convert_sklearn(
            model, "SVR", [("input", FloatTensorType([None, X.shape[1]]))])
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(X, model, model_onnx, basename="SklearnRegNuSVR2") 
Example #24
Source File: test_utils.py    From causallib with Apache License 2.0 5 votes vote down vote up
def test_check_regression_learner_is_fitted(self):
        from sklearn.linear_model import LinearRegression
        from sklearn.tree import ExtraTreeRegressor
        from sklearn.ensemble import GradientBoostingRegressor
        from sklearn.svm import SVR
        from sklearn.datasets import make_regression
        X, y = make_regression()
        for regr in [LinearRegression(), ExtraTreeRegressor(),
                     GradientBoostingRegressor(), SVR()]:
            self.ensure_learner_is_fitted(regr, X, y) 
Example #25
Source File: evaluation.py    From cddd with MIT License 5 votes vote down vote up
def qsar_regression(emb, groups, labels):
    """Helper function that fits and scores a SVM regressor on the extracted molecular
    descriptor in a leave-one-group-out cross-validation manner.

    Args:
        emb: Embedding (molecular descriptor) that is used as input for the SVM
        groups: Array or list with n_samples entries defining the fold membership for the
        crossvalidtion.
        labels: Target values of the of the qsar task.
    Returns:
        The mean accuracy, F1-score, ROC-AUC and prescion-recall-AUC of the cross-validation.
    """
    r2 = []
    r = []
    mse = []
    mae = []
    logo = LeaveOneGroupOut()
    clf = SVR(kernel='rbf', C=5.0)
    for train_index, test_index in logo.split(emb, groups=groups):
        clf.fit(emb[train_index], labels[train_index])
        y_pred = clf.predict(emb[test_index])
        y_true = labels[test_index]
        r2.append(r2_score(y_true, y_pred))
        r.append(spearmanr(y_true, y_pred)[0])
        mse.append(mean_squared_error(y_true, y_pred))
        mae.append(mean_absolute_error(y_true, y_pred))
    return np.mean(r2), np.mean(r), np.mean(mse), np.mean(mae) 
Example #26
Source File: unit_tests.py    From pynisher with MIT License 5 votes vote down vote up
def svm_example(n_samples = 10000, n_features = 100):
	from sklearn.svm import SVR
	from sklearn.datasets import make_regression

	X,Y = make_regression(n_samples, n_features)
	m = SVR()

	m.fit(X,Y) 
Example #27
Source File: svr.py    From Load-Forecasting with MIT License 5 votes vote down vote up
def svrPredictions(xTrain,yTrain,xTest,k):
    clf = svm.SVR(C=2.0,kernel=k)
    clf.fit(xTrain,yTrain)
    return clf.predict(xTest)

# A scale invariant kernel (note only conditionally semi-definite) 
Example #28
Source File: vanilla_model.py    From OpenChem with MIT License 5 votes vote down vote up
def __init__(self, model_type='classifier', n_ensemble=5):
        super(SVMQSAR, self).__init__()
        self.n_ensemble = n_ensemble
        self.model = []
        self.model_type = model_type
        if self.model_type == 'classifier':
            for i in range(n_ensemble):
                self.model.append(SVC())
        elif self.model_type == 'regressor':
            for i in range(n_ensemble):
                self.model.append(SVR())
        else:
            raise ValueError('invalid value for argument') 
Example #29
Source File: sklearn.py    From datastories-semeval2017-task4 with MIT License 5 votes vote down vote up
def nbow_model(task, embeddings, word2idx):
    if task == "clf":
        algo = LogisticRegression(C=0.6, random_state=0,
                                  class_weight='balanced')
    elif task == "reg":
        algo = SVR(kernel='linear', C=0.6)
    else:
        raise ValueError("invalid task!")

    embeddings_features = NBOWVectorizer(aggregation=["mean"],
                                         embeddings=embeddings,
                                         word2idx=word2idx,
                                         stopwords=False)

    preprocessor = TextPreProcessor(
        backoff=['url', 'email', 'percent', 'money', 'phone', 'user', 'time',
                 'url',
                 'date', 'number'],
        include_tags={"hashtag", "allcaps", "elongated", "repeated",
                      'emphasis',
                      'censored'},
        fix_html=True,
        segmenter="twitter",
        corrector="twitter",
        unpack_hashtags=True,
        unpack_contractions=True,
        spell_correct_elong=False,
        tokenizer=SocialTokenizer(lowercase=True).tokenize,
        dicts=[emoticons])

    model = Pipeline([
        ('preprocess', CustomPreProcessor(preprocessor, to_list=True)),
        ('embeddings-feats', embeddings_features),
        ('normalizer', Normalizer(norm='l2')),
        ('clf', algo)
    ])

    return model 
Example #30
Source File: test_SVR.py    From coremltools with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_conversion_bad_inputs(self):
        # Error on converting an untrained model
        with self.assertRaises(TypeError):
            model = SVR()
            spec = sklearn_converter.convert(model, "data", "out")

        # Check the expected class during covnersion.
        with self.assertRaises(TypeError):
            model = OneHotEncoder()
            spec = sklearn_converter.convert(model, "data", "out")