Python sklearn.linear_model.Perceptron() Examples

The following are 30 code examples of sklearn.linear_model.Perceptron(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sklearn.linear_model , or try the search function .
Example #1
Source File: perceptron.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fit(self, X, y, classes=None, sample_weight=None):
        """ Calls the Perceptron fit function from sklearn.

        Parameters
        ----------
        X: numpy.ndarray of shape (n_samples, n_features)
            The feature's matrix.

        y: Array-like
            The class labels for all samples in X.

        classes: Not used.

        sample_weight:
            Samples weight. If not provided, uniform weights are assumed.

        Returns
        -------
        PerceptronMask
            self

        """
        self.classifier.fit(X=X, y=y, sample_weight=sample_weight)
        return self 
Example #2
Source File: test_sklearn_perceptron_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_model_perceptron_multi_class_int(self):
        model, X = fit_classification_model(
            Perceptron(random_state=42), 5, is_int=True)
        model_onnx = convert_sklearn(
            model,
            "scikit-learn Perceptron multi-class classifier",
            [("input", Int64TensorType([None, X.shape[1]]))],
            target_opset=TARGET_OPSET
        )
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X.astype(np.int64),
            model,
            model_onnx,
            basename="SklearnPerceptronClassifierMultiInt-Out0",
            allow_failure="StrictVersion(onnx.__version__)"
                          " < StrictVersion('1.2') or "
                          "StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
Example #3
Source File: test_sklearn_perceptron_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_model_perceptron_binary_class_int(self):
        model, X = fit_classification_model(
            Perceptron(random_state=42), 2, is_int=True)
        model_onnx = convert_sklearn(
            model,
            "scikit-learn Perceptron binary classifier",
            [("input", Int64TensorType([None, X.shape[1]]))],
            target_opset=TARGET_OPSET
        )
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X.astype(np.int64),
            model,
            model_onnx,
            basename="SklearnPerceptronClassifierBinaryInt-Out0",
            allow_failure="StrictVersion(onnx.__version__)"
                          " < StrictVersion('1.2') or "
                          "StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
Example #4
Source File: test_sklearn_perceptron_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_model_perceptron_multi_class(self):
        model, X = fit_classification_model(
            Perceptron(random_state=42), 5)
        model_onnx = convert_sklearn(
            model,
            "scikit-learn Perceptron multi-class classifier",
            [("input", FloatTensorType([None, X.shape[1]]))],
            target_opset=TARGET_OPSET
        )
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X.astype(np.float32),
            model,
            model_onnx,
            basename="SklearnPerceptronClassifierMulti-Out0",
            allow_failure="StrictVersion(onnx.__version__)"
                          " < StrictVersion('1.2') or "
                          "StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
Example #5
Source File: test_bagging.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_classification():
    # Check classification for various parameter settings.
    rng = check_random_state(0)
    X_train, X_test, y_train, y_test = train_test_split(iris.data,
                                                        iris.target,
                                                        random_state=rng)
    grid = ParameterGrid({"max_samples": [0.5, 1.0],
                          "max_features": [1, 2, 4],
                          "bootstrap": [True, False],
                          "bootstrap_features": [True, False]})

    for base_estimator in [None,
                           DummyClassifier(),
                           Perceptron(tol=1e-3),
                           DecisionTreeClassifier(),
                           KNeighborsClassifier(),
                           SVC()]:
        for params in grid:
            BaggingClassifier(base_estimator=base_estimator,
                              random_state=rng,
                              **params).fit(X_train, y_train).predict(X_test) 
Example #6
Source File: test_bagging.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_classification():
    # Check classification for various parameter settings.
    rng = check_random_state(0)
    X_train, X_test, y_train, y_test = train_test_split(iris.data,
                                                        iris.target,
                                                        random_state=rng)
    grid = ParameterGrid({"max_samples": [0.5, 1.0],
                          "max_features": [1, 2, 4],
                          "bootstrap": [True, False],
                          "bootstrap_features": [True, False]})

    for base_estimator in [None,
                           DummyClassifier(),
                           Perceptron(tol=1e-3),
                           DecisionTreeClassifier(),
                           KNeighborsClassifier(),
                           SVC(gamma="scale")]:
        for params in grid:
            BaggingClassifier(base_estimator=base_estimator,
                              random_state=rng,
                              **params).fit(X_train, y_train).predict(X_test) 
Example #7
Source File: test_sklearn_perceptron_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_model_perceptron_binary_class(self):
        model, X = fit_classification_model(
            Perceptron(random_state=42), 2)
        model_onnx = convert_sklearn(
            model,
            "scikit-learn Perceptron binary classifier",
            [("input", FloatTensorType([None, X.shape[1]]))],
            target_opset=TARGET_OPSET
        )
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X.astype(np.float32),
            model,
            model_onnx,
            basename="SklearnPerceptronClassifierBinary-Out0",
            allow_failure="StrictVersion(onnx.__version__)"
                          " < StrictVersion('1.2') or "
                          "StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
        ) 
Example #8
Source File: test_kernel_pca.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_gridsearch_pipeline_precomputed():
    # Test if we can do a grid-search to find parameters to separate
    # circles with a perceptron model using a precomputed kernel.
    X, y = make_circles(n_samples=400, factor=.3, noise=.05,
                        random_state=0)
    kpca = KernelPCA(kernel="precomputed", n_components=2)
    pipeline = Pipeline([("kernel_pca", kpca),
                         ("Perceptron", Perceptron(max_iter=5))])
    param_grid = dict(Perceptron__max_iter=np.arange(1, 5))
    grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
    X_kernel = rbf_kernel(X, gamma=2.)
    grid_search.fit(X_kernel, y)
    assert_equal(grid_search.best_score_, 1)


# 0.23. warning about tol not having its correct default value. 
Example #9
Source File: test_knop.py    From DESlib with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_fit(example_estimate_competence, create_pool_classifiers):
    X, y = example_estimate_competence[0:2]

    knop_test = KNOP(create_pool_classifiers)
    knop_test.fit(X, y)
    expected_scores = np.array([[0.5, 0.5], [1.0, 0.0], [0.33, 0.67]])
    expected_scores = np.tile(expected_scores, (15, 1, 1))

    assert np.array_equal(expected_scores, knop_test.dsel_scores_)

    # Assert the roc_algorithm_ is fitted to the scores (decision space)
    # rather than the features (feature space)
    expected_roc_data = knop_test.dsel_scores_[:, :, 0]
    assert np.array_equal(knop_test.op_knn_._fit_X, expected_roc_data)


# Test if the class is raising an error when the base classifiers do not
# implements the predict_proba method. Should raise an exception when the
# base classifier cannot estimate posterior probabilities (predict_proba)
# Using Perceptron classifier as it does not implements predict_proba. 
Example #10
Source File: test_meta_des.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_not_predict_proba(create_X_y):
    X, y = create_X_y

    clf1 = Perceptron()
    clf1.fit(X, y)
    with pytest.raises(ValueError):
        meta = METADES([clf1, clf1])
        meta.fit(X, y) 
Example #11
Source File: test_des_clustering.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_predict_proba(example_estimate_competence):
    X, y = example_estimate_competence[0:2]

    clf1 = Perceptron()
    clf1.fit(X, y)
    DESClustering([clf1, clf1]).fit(X, y) 
Example #12
Source File: test_knorau.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_predict_proba(create_X_y):
    X, y = create_X_y

    clf1 = Perceptron()
    clf1.fit(X, y)
    KNORAU([clf1, clf1]).fit(X, y) 
Example #13
Source File: test_knop.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_not_predict_proba(create_X_y):
    X, y = create_X_y

    clf1 = Perceptron()
    clf1.fit(X, y)
    with pytest.raises(ValueError):
        knop = KNOP([clf1, clf1])
        knop.fit(X, y) 
Example #14
Source File: test_stacked.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_check_estimator():
    check_estimator(StackedClassifier)


# Test if the class is raising an error when the base classifiers do not
# implements the predict_proba method. Should raise an exception when the
# base classifier cannot estimate posterior probabilities (predict_proba)
# Using Perceptron classifier as it does not implements predict_proba. 
Example #15
Source File: test_knorae.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_predict_proba(create_X_y):
    X, y = create_X_y

    clf1 = Perceptron()
    clf1.fit(X, y)
    KNORAE([clf1, clf1]).fit(X, y) 
Example #16
Source File: test_probabilistic.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_not_predict_proba(create_X_y):
    X, y = create_X_y

    clf1 = Perceptron()
    clf1.fit(X, y)
    with pytest.raises(ValueError):
        BaseProbabilistic([clf1, clf1]).fit(X, y)


# Being all ones, all base classifiers are deemed competent 
Example #17
Source File: test_probabilistic.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_check_estimator_MinimumDifference():
    check_estimator(MinimumDifference)


# Test if the class is raising an error when the base classifiers do not
# implements the predict_proba method. Should raise an exception when the
# base classifier cannot estimate posterior probabilities (predict_proba)
# Using Perceptron classifier as it does not implements predict_proba. 
Example #18
Source File: test_desp.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_predict_proba(create_X_y):
    X, y = create_X_y
    clf1 = Perceptron()
    clf1.fit(X, y)
    DESP([clf1, clf1]).fit(X, y) 
Example #19
Source File: test_stacked.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_not_predict_proba_meta(create_X_y, create_pool_classifiers):
    X, y = create_X_y

    pool = create_pool_classifiers
    with pytest.raises(ValueError):
        meta_clf = StackedClassifier(pool_classifiers=pool,
                                     meta_classifier=Perceptron())
        meta_clf.fit(X, y)
        meta_clf.predict_proba(X) 
Example #20
Source File: test_a_posteriori.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_not_predict_proba(create_X_y):
    X, y = create_X_y
    clf1 = Perceptron()
    clf1.fit(X, y)
    with pytest.raises(ValueError):
        APosteriori([clf1, clf1]).fit(X, y) 
Example #21
Source File: test_des_knn.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_predict_proba():
    X = np.random.randn(15, 5)
    y = np.array([0, 1, 0, 0, 0] * 3)
    clf1 = Perceptron()
    clf1.fit(X, y)
    DESKNN([clf1, clf1, clf1]).fit(X, y) 
Example #22
Source File: test_des_mi.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_require_proba():
    X = np.random.randn(5, 5)
    y = np.array([0, 1, 0, 0, 0])
    clf1 = Perceptron()
    clf1.fit(X, y)
    DESMI([clf1, clf1, clf1]) 
Example #23
Source File: test_des_integration.py    From DESlib with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setup_classifiers(encode_labels=None):
    rng = np.random.RandomState(123456)

    X_dsel, X_test, X_train, y_dsel, y_test, y_train = load_dataset(
        encode_labels, rng)
    model = CalibratedClassifierCV(Perceptron(max_iter=5))
    # Train a pool of 100 classifiers
    pool_classifiers = BaggingClassifier(model, n_estimators=10,
                                         random_state=rng)
    pool_classifiers.fit(X_train, y_train)
    return pool_classifiers, X_dsel, y_dsel, X_test, y_test 
Example #24
Source File: test_base.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_base():
    # Check BaseEnsemble methods.
    ensemble = BaggingClassifier(
        base_estimator=Perceptron(tol=1e-3, random_state=None), n_estimators=3)

    iris = load_iris()
    ensemble.fit(iris.data, iris.target)
    ensemble.estimators_ = []  # empty the list and create estimators manually

    ensemble._make_estimator()
    random_state = np.random.RandomState(3)
    ensemble._make_estimator(random_state=random_state)
    ensemble._make_estimator(random_state=random_state)
    ensemble._make_estimator(append=False)

    assert_equal(3, len(ensemble))
    assert_equal(3, len(ensemble.estimators_))

    assert_true(isinstance(ensemble[0], Perceptron))
    assert_equal(ensemble[0].random_state, None)
    assert_true(isinstance(ensemble[1].random_state, int))
    assert_true(isinstance(ensemble[2].random_state, int))
    assert_not_equal(ensemble[1].random_state, ensemble[2].random_state)

    np_int_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3),
                                        n_estimators=np.int32(3))
    np_int_ensemble.fit(iris.data, iris.target) 
Example #25
Source File: test_base.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_base_zero_n_estimators():
    # Check that instantiating a BaseEnsemble with n_estimators<=0 raises
    # a ValueError.
    ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3),
                                 n_estimators=0)
    iris = load_iris()
    assert_raise_message(ValueError,
                         "n_estimators must be greater than zero, got 0.",
                         ensemble.fit, iris.data, iris.target) 
Example #26
Source File: test_base.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_base_not_int_n_estimators():
    # Check that instantiating a BaseEnsemble with a string as n_estimators
    # raises a ValueError demanding n_estimators to be supplied as an integer.
    string_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3),
                                        n_estimators='3')
    iris = load_iris()
    assert_raise_message(ValueError,
                         "n_estimators must be an integer",
                         string_ensemble.fit, iris.data, iris.target)
    float_ensemble = BaggingClassifier(base_estimator=Perceptron(tol=1e-3),
                                       n_estimators=3.0)
    assert_raise_message(ValueError,
                         "n_estimators must be an integer",
                         float_ensemble.fit, iris.data, iris.target) 
Example #27
Source File: test_perceptron.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_perceptron_accuracy():
    for data in (X, X_csr):
        clf = Perceptron(max_iter=100, tol=None, shuffle=False)
        clf.fit(data, y)
        score = clf.score(data, y)
        assert_greater(score, 0.7) 
Example #28
Source File: test_perceptron.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_perceptron_correctness():
    y_bin = y.copy()
    y_bin[y != 1] = -1

    clf1 = MyPerceptron(n_iter=2)
    clf1.fit(X, y_bin)

    clf2 = Perceptron(max_iter=2, shuffle=False, tol=None)
    clf2.fit(X, y_bin)

    assert_array_almost_equal(clf1.w, clf2.coef_.ravel()) 
Example #29
Source File: test_perceptron.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_undefined_methods():
    clf = Perceptron(max_iter=100)
    for meth in ("predict_proba", "predict_log_proba"):
        assert_raises(AttributeError, lambda x: getattr(clf, x), meth) 
Example #30
Source File: test_kernel_pca.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_gridsearch_pipeline():
    # Test if we can do a grid-search to find parameters to separate
    # circles with a perceptron model.
    X, y = make_circles(n_samples=400, factor=.3, noise=.05,
                        random_state=0)
    kpca = KernelPCA(kernel="rbf", n_components=2)
    pipeline = Pipeline([("kernel_pca", kpca),
                         ("Perceptron", Perceptron(max_iter=5))])
    param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
    grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
    grid_search.fit(X, y)
    assert_equal(grid_search.best_score_, 1)