Python sklearn.linear_model.Perceptron() Examples
The following are 30
code examples of sklearn.linear_model.Perceptron().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sklearn.linear_model
, or try the search function
.

Example #1
Source File: perceptron.py From scikit-multiflow with BSD 3-Clause "New" or "Revised" License | 6 votes |
def fit(self, X, y, classes=None, sample_weight=None): """ Calls the Perceptron fit function from sklearn. Parameters ---------- X: numpy.ndarray of shape (n_samples, n_features) The feature's matrix. y: Array-like The class labels for all samples in X. classes: Not used. sample_weight: Samples weight. If not provided, uniform weights are assumed. Returns ------- PerceptronMask self """ self.classifier.fit(X=X, y=y, sample_weight=sample_weight) return self
Example #2
Source File: test_bagging.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_classification(): # Check classification for various parameter settings. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "max_features": [1, 2, 4], "bootstrap": [True, False], "bootstrap_features": [True, False]}) for base_estimator in [None, DummyClassifier(), Perceptron(tol=1e-3), DecisionTreeClassifier(), KNeighborsClassifier(), SVC(gamma="scale")]: for params in grid: BaggingClassifier(base_estimator=base_estimator, random_state=rng, **params).fit(X_train, y_train).predict(X_test)
Example #3
Source File: test_kernel_pca.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_gridsearch_pipeline_precomputed(): # Test if we can do a grid-search to find parameters to separate # circles with a perceptron model using a precomputed kernel. X, y = make_circles(n_samples=400, factor=.3, noise=.05, random_state=0) kpca = KernelPCA(kernel="precomputed", n_components=2) pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))]) param_grid = dict(Perceptron__max_iter=np.arange(1, 5)) grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid) X_kernel = rbf_kernel(X, gamma=2.) grid_search.fit(X_kernel, y) assert_equal(grid_search.best_score_, 1) # 0.23. warning about tol not having its correct default value.
Example #4
Source File: test_sklearn_perceptron_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_model_perceptron_binary_class(self): model, X = fit_classification_model( Perceptron(random_state=42), 2) model_onnx = convert_sklearn( model, "scikit-learn Perceptron binary classifier", [("input", FloatTensorType([None, X.shape[1]]))], target_opset=TARGET_OPSET ) self.assertIsNotNone(model_onnx) dump_data_and_model( X.astype(np.float32), model, model_onnx, basename="SklearnPerceptronClassifierBinary-Out0", allow_failure="StrictVersion(onnx.__version__)" " < StrictVersion('1.2') or " "StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')", )
Example #5
Source File: test_sklearn_perceptron_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_model_perceptron_multi_class(self): model, X = fit_classification_model( Perceptron(random_state=42), 5) model_onnx = convert_sklearn( model, "scikit-learn Perceptron multi-class classifier", [("input", FloatTensorType([None, X.shape[1]]))], target_opset=TARGET_OPSET ) self.assertIsNotNone(model_onnx) dump_data_and_model( X.astype(np.float32), model, model_onnx, basename="SklearnPerceptronClassifierMulti-Out0", allow_failure="StrictVersion(onnx.__version__)" " < StrictVersion('1.2') or " "StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')", )
Example #6
Source File: test_sklearn_perceptron_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_model_perceptron_binary_class_int(self): model, X = fit_classification_model( Perceptron(random_state=42), 2, is_int=True) model_onnx = convert_sklearn( model, "scikit-learn Perceptron binary classifier", [("input", Int64TensorType([None, X.shape[1]]))], target_opset=TARGET_OPSET ) self.assertIsNotNone(model_onnx) dump_data_and_model( X.astype(np.int64), model, model_onnx, basename="SklearnPerceptronClassifierBinaryInt-Out0", allow_failure="StrictVersion(onnx.__version__)" " < StrictVersion('1.2') or " "StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')", )
Example #7
Source File: test_sklearn_perceptron_converter.py From sklearn-onnx with MIT License | 6 votes |
def test_model_perceptron_multi_class_int(self): model, X = fit_classification_model( Perceptron(random_state=42), 5, is_int=True) model_onnx = convert_sklearn( model, "scikit-learn Perceptron multi-class classifier", [("input", Int64TensorType([None, X.shape[1]]))], target_opset=TARGET_OPSET ) self.assertIsNotNone(model_onnx) dump_data_and_model( X.astype(np.int64), model, model_onnx, basename="SklearnPerceptronClassifierMultiInt-Out0", allow_failure="StrictVersion(onnx.__version__)" " < StrictVersion('1.2') or " "StrictVersion(onnxruntime.__version__)" " <= StrictVersion('0.2.1')", )
Example #8
Source File: test_knop.py From DESlib with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_fit(example_estimate_competence, create_pool_classifiers): X, y = example_estimate_competence[0:2] knop_test = KNOP(create_pool_classifiers) knop_test.fit(X, y) expected_scores = np.array([[0.5, 0.5], [1.0, 0.0], [0.33, 0.67]]) expected_scores = np.tile(expected_scores, (15, 1, 1)) assert np.array_equal(expected_scores, knop_test.dsel_scores_) # Assert the roc_algorithm_ is fitted to the scores (decision space) # rather than the features (feature space) expected_roc_data = knop_test.dsel_scores_[:, :, 0] assert np.array_equal(knop_test.op_knn_._fit_X, expected_roc_data) # Test if the class is raising an error when the base classifiers do not # implements the predict_proba method. Should raise an exception when the # base classifier cannot estimate posterior probabilities (predict_proba) # Using Perceptron classifier as it does not implements predict_proba.
Example #9
Source File: test_bagging.py From twitter-stock-recommendation with MIT License | 6 votes |
def test_classification(): # Check classification for various parameter settings. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "max_features": [1, 2, 4], "bootstrap": [True, False], "bootstrap_features": [True, False]}) for base_estimator in [None, DummyClassifier(), Perceptron(tol=1e-3), DecisionTreeClassifier(), KNeighborsClassifier(), SVC()]: for params in grid: BaggingClassifier(base_estimator=base_estimator, random_state=rng, **params).fit(X_train, y_train).predict(X_test)
Example #10
Source File: estimator.py From EDeN with MIT License | 5 votes |
def set_params(self, r=3, d=8, nbits=16, discrete=True, balance=False, subsample_size=200, ratio=2, normalization=False, inner_normalization=False, penalty='elasticnet'): """setter.""" self.r = r self.d = d self.nbits = nbits self.normalization = normalization self.inner_normalization = inner_normalization self.discrete = discrete self.balance = balance self.subsample_size = subsample_size self.ratio = ratio if penalty == 'perceptron': self.model = Perceptron(max_iter=5, tol=None) else: self.model = SGDClassifier( average=True, class_weight='balanced', shuffle=True, penalty=penalty, max_iter=5, tol=None) self.vectorizer = Vectorizer( r=self.r, d=self.d, normalization=self.normalization, inner_normalization=self.inner_normalization, discrete=self.discrete, nbits=self.nbits) return self
Example #11
Source File: perceptron.py From scikit-multiflow with BSD 3-Clause "New" or "Revised" License | 5 votes |
def partial_fit(self, X, y, classes=None, sample_weight=None): """ partial_fit Calls the Perceptron partial_fit from sklearn. Parameters ---------- X: numpy.ndarray of shape (n_samples, n_features) The feature's matrix. y: Array-like The class labels for all samples in X. classes: Not used. sample_weight: Samples weight. If not provided, uniform weights are assumed. Returns ------- PerceptronMask self """ self.classifier.partial_fit(X=X, y=y, classes=classes, sample_weight=sample_weight) return self
Example #12
Source File: test_base.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_base(): # Check BaseEnsemble methods. ensemble = BaggingClassifier( base_estimator=Perceptron(tol=1e-3, random_state=None), n_estimators=3) iris = load_iris() ensemble.fit(iris.data, iris.target) ensemble.estimators_ = [] # empty the list and create estimators manually ensemble._make_estimator() random_state = np.random.RandomState(3) ensemble._make_estimator(random_state=random_state) ensemble._make_estimator(random_state=random_state) ensemble._make_estimator(append=False) assert_equal(3, len(ensemble)) assert_equal(3, len(ensemble.estimators_)) assert isinstance(ensemble[0], Perceptron) assert_equal(ensemble[0].random_state, None) assert isinstance(ensemble[1].random_state, int) assert isinstance(ensemble[2].random_state, int) assert_not_equal(ensemble[1].random_state, ensemble[2].random_state) np_int_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3), n_estimators=np.int32(3)) np_int_ensemble.fit(iris.data, iris.target)
Example #13
Source File: test_base.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_base_zero_n_estimators(): # Check that instantiating a BaseEnsemble with n_estimators<=0 raises # a ValueError. ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3), n_estimators=0) iris = load_iris() assert_raise_message(ValueError, "n_estimators must be greater than zero, got 0.", ensemble.fit, iris.data, iris.target)
Example #14
Source File: test_base.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_base_not_int_n_estimators(): # Check that instantiating a BaseEnsemble with a string as n_estimators # raises a ValueError demanding n_estimators to be supplied as an integer. string_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3), n_estimators='3') iris = load_iris() assert_raise_message(ValueError, "n_estimators must be an integer", string_ensemble.fit, iris.data, iris.target) float_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3), n_estimators=3.0) assert_raise_message(ValueError, "n_estimators must be an integer", float_ensemble.fit, iris.data, iris.target)
Example #15
Source File: test_perceptron.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_perceptron_accuracy(): for data in (X, X_csr): clf = Perceptron(max_iter=100, tol=None, shuffle=False) clf.fit(data, y) score = clf.score(data, y) assert_greater(score, 0.7) # 0.23. warning about tol not having its correct default value.
Example #16
Source File: test_perceptron.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_perceptron_correctness(): y_bin = y.copy() y_bin[y != 1] = -1 clf1 = MyPerceptron(n_iter=2) clf1.fit(X, y_bin) clf2 = Perceptron(max_iter=2, shuffle=False, tol=None) clf2.fit(X, y_bin) assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
Example #17
Source File: test_perceptron.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_undefined_methods(): clf = Perceptron(max_iter=100) for meth in ("predict_proba", "predict_log_proba"): assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
Example #18
Source File: test_kernel_pca.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_gridsearch_pipeline(): # Test if we can do a grid-search to find parameters to separate # circles with a perceptron model. X, y = make_circles(n_samples=400, factor=.3, noise=.05, random_state=0) kpca = KernelPCA(kernel="rbf", n_components=2) pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))]) param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2)) grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid) grid_search.fit(X, y) assert_equal(grid_search.best_score_, 1)
Example #19
Source File: test_linear_model.py From pandas-ml with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_objectmapper(self): df = pdml.ModelFrame([]) self.assertIs(df.linear_model.ARDRegression, lm.ARDRegression) self.assertIs(df.linear_model.BayesianRidge, lm.BayesianRidge) self.assertIs(df.linear_model.ElasticNet, lm.ElasticNet) self.assertIs(df.linear_model.ElasticNetCV, lm.ElasticNetCV) self.assertIs(df.linear_model.HuberRegressor, lm.HuberRegressor) self.assertIs(df.linear_model.Lars, lm.Lars) self.assertIs(df.linear_model.LarsCV, lm.LarsCV) self.assertIs(df.linear_model.Lasso, lm.Lasso) self.assertIs(df.linear_model.LassoCV, lm.LassoCV) self.assertIs(df.linear_model.LassoLars, lm.LassoLars) self.assertIs(df.linear_model.LassoLarsCV, lm.LassoLarsCV) self.assertIs(df.linear_model.LassoLarsIC, lm.LassoLarsIC) self.assertIs(df.linear_model.LinearRegression, lm.LinearRegression) self.assertIs(df.linear_model.LogisticRegression, lm.LogisticRegression) self.assertIs(df.linear_model.LogisticRegressionCV, lm.LogisticRegressionCV) self.assertIs(df.linear_model.MultiTaskLasso, lm.MultiTaskLasso) self.assertIs(df.linear_model.MultiTaskElasticNet, lm.MultiTaskElasticNet) self.assertIs(df.linear_model.MultiTaskLassoCV, lm.MultiTaskLassoCV) self.assertIs(df.linear_model.MultiTaskElasticNetCV, lm.MultiTaskElasticNetCV) self.assertIs(df.linear_model.OrthogonalMatchingPursuit, lm.OrthogonalMatchingPursuit) self.assertIs(df.linear_model.OrthogonalMatchingPursuitCV, lm.OrthogonalMatchingPursuitCV) self.assertIs(df.linear_model.PassiveAggressiveClassifier, lm.PassiveAggressiveClassifier) self.assertIs(df.linear_model.PassiveAggressiveRegressor, lm.PassiveAggressiveRegressor) self.assertIs(df.linear_model.Perceptron, lm.Perceptron) self.assertIs(df.linear_model.RandomizedLasso, lm.RandomizedLasso) self.assertIs(df.linear_model.RandomizedLogisticRegression, lm.RandomizedLogisticRegression) self.assertIs(df.linear_model.RANSACRegressor, lm.RANSACRegressor) self.assertIs(df.linear_model.Ridge, lm.Ridge) self.assertIs(df.linear_model.RidgeClassifier, lm.RidgeClassifier) self.assertIs(df.linear_model.RidgeClassifierCV, lm.RidgeClassifierCV) self.assertIs(df.linear_model.RidgeCV, lm.RidgeCV) self.assertIs(df.linear_model.SGDClassifier, lm.SGDClassifier) self.assertIs(df.linear_model.SGDRegressor, lm.SGDRegressor) self.assertIs(df.linear_model.TheilSenRegressor, lm.TheilSenRegressor)
Example #20
Source File: test_linear_model.py From pandas-ml with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_Perceptron(self): iris = datasets.load_iris() df = pdml.ModelFrame(iris) clf1 = lm.Perceptron(alpha=0.001, n_iter=100).fit(iris.data, iris.target) clf2 = df.lm.Perceptron(alpha=0.001, n_iter=100) df.fit(clf2) expected = clf1.predict(iris.data) predicted = df.predict(clf2) self.assertIsInstance(predicted, pdml.ModelSeries) self.assert_numpy_array_almost_equal(predicted.values, expected)
Example #21
Source File: ClassificationPLA.py From AirTicketPredicting with MIT License | 5 votes |
def __init__(self, isTrain, isOutlierRemoval=0): super(ClassificationPLA, self).__init__(isTrain, isOutlierRemoval) # data preprocessing self.dataPreprocessing() # PLA object self.clf = Perceptron()
Example #22
Source File: test_a_priori.py From DESlib with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_fit(create_pool_classifiers, create_X_y): X, y = create_X_y a_priori_test = APriori(create_pool_classifiers) a_priori_test.fit(X, y) expected = np.array([[0.5, 0.5], [1.0, 0.0], [0.33, 0.67]]) expected = np.tile(expected, (15, 1, 1)) assert np.array_equal(a_priori_test.dsel_scores_, expected) # Test if the class is raising an error when the base classifiers do not # implements the predict_proba method. Should raise an exception when the # base classifier cannot estimate posterior probabilities (predict_proba) # Using Perceptron classifier as it does not implements predict_proba.
Example #23
Source File: test_a_priori.py From DESlib with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_not_predict_proba(create_X_y): X, y = create_X_y clf1 = Perceptron() clf1.fit(X, y) with pytest.raises(ValueError): APriori([clf1, clf1]).fit(X, y)
Example #24
Source File: test_mcb.py From DESlib with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_predict_proba(create_X_y): X, y = create_X_y clf1 = Perceptron() clf1.fit(X, y) MCB([clf1, clf1]).fit(X, y)
Example #25
Source File: test_lca.py From DESlib with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_predict_proba(create_X_y): X, y = create_X_y clf1 = Perceptron() clf1.fit(X, y) LCA([clf1, clf1]).fit(X, y)
Example #26
Source File: test_ola.py From DESlib with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_predict_proba(create_X_y): X, y = create_X_y clf1 = Perceptron() clf1.fit(X, y) OLA([clf1, clf1]).fit(X, y)
Example #27
Source File: test_a_posteriori.py From DESlib with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_not_predict_proba(create_X_y): X, y = create_X_y clf1 = Perceptron() clf1.fit(X, y) with pytest.raises(ValueError): APosteriori([clf1, clf1]).fit(X, y)
Example #28
Source File: test_rank.py From DESlib with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_predict_proba(create_X_y): X, y = create_X_y clf1 = Perceptron() clf1.fit(X, y) Rank([clf1, clf1]).fit(X, y)
Example #29
Source File: test_mla.py From DESlib with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_predict_proba(create_X_y): X, y = create_X_y clf1 = Perceptron() clf1.fit(X, y) MLA([clf1, clf1]).fit(X, y)
Example #30
Source File: test_integration_DFP_IH.py From DESlib with BSD 3-Clause "New" or "Revised" License | 5 votes |
def setup_classifiers(): rng = np.random.RandomState(654321) # Generate a classification dataset X, y = make_classification(n_classes=2, n_samples=1000, weights=[0.2, 0.8], random_state=rng) # split the data into training and test data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=rng) # Scale the variables to have 0 mean and unit variance scalar = StandardScaler() X_train = scalar.fit_transform(X_train) X_test = scalar.transform(X_test) # Split the data into training and DSEL for DS techniques X_train, X_dsel, y_train, y_dsel = train_test_split(X_train, y_train, test_size=0.5, random_state=rng) # Considering a pool composed of 10 base classifiers model = CalibratedClassifierCV(Perceptron(max_iter=5)) pool_classifiers = BaggingClassifier(model, n_estimators=100, random_state=rng) pool_classifiers.fit(X_train, y_train) return pool_classifiers, X_dsel, y_dsel, X_test, y_test