Python sklearn.ensemble.VotingClassifier() Examples

The following are 30 code examples of sklearn.ensemble.VotingClassifier(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sklearn.ensemble , or try the search function .
Example #1
Source File: test_voting.py    From Mastering-Elasticsearch-7.0 with MIT License 7 votes vote down vote up
def test_notfitted():
    eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
                                        ('lr2', LogisticRegression())],
                            voting='soft')
    ereg = VotingRegressor([('dr', DummyRegressor())])
    msg = ("This %s instance is not fitted yet. Call \'fit\'"
           " with appropriate arguments before using this method.")
    assert_raise_message(NotFittedError, msg % 'VotingClassifier',
                         eclf.predict, X)
    assert_raise_message(NotFittedError, msg % 'VotingClassifier',
                         eclf.predict_proba, X)
    assert_raise_message(NotFittedError, msg % 'VotingClassifier',
                         eclf.transform, X)
    assert_raise_message(NotFittedError, msg % 'VotingRegressor',
                         ereg.predict, X_r)
    assert_raise_message(NotFittedError, msg % 'VotingRegressor',
                         ereg.transform, X_r) 
Example #2
Source File: test_sklearn_voting_classifier_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_voting_hard_multi(self):
        # predict_proba is not defined when voting is hard.
        model = VotingClassifier(
            voting="hard",
            flatten_transform=False,
            estimators=[
                ("lr", LogisticRegression()),
                ("lr2", DecisionTreeClassifier()),
            ],
        )
        dump_multiple_classification(
            model,
            suffix="Hard",
            comparable_outputs=[0],
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.5.0')",
            target_opset=TARGET_OPSET
        ) 
Example #3
Source File: classifiers.py    From soweego with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, num_features, **kwargs):
        super(VotingClassifier, self).__init__()

        kwargs = {**constants.VOTING_CLASSIFIER_PARAMS, **kwargs}

        voting = kwargs.pop('voting')

        self.num_features = num_features

        estimators = []
        for clf in constants.CLASSIFIERS_FOR_ENSEMBLE:
            model = utils.init_model(clf, num_features=num_features, **kwargs)

            estimators.append((clf, model.kernel))

        # use as kernel the VotingClassifier coming from sklearn
        self.kernel = SKVotingClassifier(
            estimators=estimators, voting=voting, n_jobs=None
        ) 
Example #4
Source File: __init__.py    From marconibot with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, api, lobes=False):
        """
        lobes = a dict of classifiers to use in the VotingClassifier
            defaults to RandomForestClassifier and DecisionTreeClassifier
        """
        self.api = api
        if not lobes:
            lobes = {'rf': RandomForestClassifier(n_estimators=7,
                                                  random_state=666),
                     'dt': DecisionTreeClassifier()
                     }
        self.lobe = VotingClassifier(
            estimators=[(lobe, lobes[lobe]) for lobe in lobes],
            voting='hard',
            n_jobs=-1)
        self._trained = False
        self.split = splitTrainTestData
        self.prep = prepDataframe 
Example #5
Source File: test_sklearn_voting_classifier_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_voting_soft_multi_weighted42(self):
        model = VotingClassifier(
            voting="soft",
            flatten_transform=False,
            weights=numpy.array([27, 0.3, 0.5, 0.5]),
            estimators=[
                ("lr", LogisticRegression()),
                ("lra", LogisticRegression()),
                ("lrb", LogisticRegression()),
                ("lr2", LogisticRegression()),
            ],
        )
        dump_multiple_classification(
            model,
            suffix="Weighted42Soft",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
            target_opset=TARGET_OPSET
        ) 
Example #6
Source File: test_sklearn_voting_classifier_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_voting_soft_multi_weighted4(self):
        model = VotingClassifier(
            voting="soft",
            flatten_transform=False,
            weights=numpy.array([2.7, 0.3, 0.5, 0.5]),
            estimators=[
                ("lr", LogisticRegression()),
                ("lra", LogisticRegression()),
                ("lrb", LogisticRegression()),
                ("lr2", LogisticRegression()),
            ],
        )
        dump_multiple_classification(
            model,
            suffix="Weighted4Soft",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
            target_opset=TARGET_OPSET
        ) 
Example #7
Source File: test_sklearn_voting_classifier_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_voting_soft_multi_weighted(self):
        model = VotingClassifier(
            voting="soft",
            flatten_transform=False,
            weights=numpy.array([1.8, 0.2]),
            estimators=[
                ("lr", LogisticRegression()),
                ("lr2", LogisticRegression()),
            ],
        )
        dump_multiple_classification(
            model,
            suffix="WeightedSoft",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
            target_opset=TARGET_OPSET
        ) 
Example #8
Source File: test_sklearn_voting_classifier_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_voting_soft_multi_string(self):
        model = VotingClassifier(
            voting="soft",
            flatten_transform=False,
            estimators=[
                ("lr", LogisticRegression()),
                ("lr2", LogisticRegression()),
            ],
        )
        dump_multiple_classification(
            model, label_string=True,
            suffix="Soft",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
            target_opset=TARGET_OPSET
        ) 
Example #9
Source File: test_sklearn_voting_classifier_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_voting_soft_multi(self):
        model = VotingClassifier(
            voting="soft",
            flatten_transform=False,
            estimators=[
                ("lr", LogisticRegression()),
                ("lr2", LogisticRegression()),
            ],
        )
        dump_multiple_classification(
            model,
            suffix="Soft",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
            target_opset=TARGET_OPSET
        ) 
Example #10
Source File: test_voting.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_parallel_fit():
    """Check parallel backend of VotingClassifier on toy dataset."""
    clf1 = LogisticRegression(random_state=123)
    clf2 = RandomForestClassifier(random_state=123)
    clf3 = GaussianNB()
    X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
    y = np.array([1, 1, 2, 2])

    eclf1 = VotingClassifier(estimators=[
        ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
        voting='soft',
        n_jobs=1).fit(X, y)
    eclf2 = VotingClassifier(estimators=[
        ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
        voting='soft',
        n_jobs=2).fit(X, y)

    assert_array_equal(eclf1.predict(X), eclf2.predict(X))
    assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X)) 
Example #11
Source File: test_sklearn_voting_classifier_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_voting_soft_binary_weighted(self):
        model = VotingClassifier(
            voting="soft",
            flatten_transform=False,
            weights=numpy.array([1.8, 0.2]),
            estimators=[
                ("lr", LogisticRegression()),
                ("lr2", LogisticRegression(fit_intercept=False)),
            ],
        )
        dump_binary_classification(
            model,
            suffix="WeightedSoft",
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
            target_opset=TARGET_OPSET
        ) 
Example #12
Source File: test_voting_classifier.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_parallel_fit():
    """Check parallel backend of VotingClassifier on toy dataset."""
    clf1 = LogisticRegression(random_state=123)
    clf2 = RandomForestClassifier(random_state=123)
    clf3 = GaussianNB()
    X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
    y = np.array([1, 1, 2, 2])

    eclf1 = VotingClassifier(estimators=[
        ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
        voting='soft',
        n_jobs=1).fit(X, y)
    eclf2 = VotingClassifier(estimators=[
        ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
        voting='soft',
        n_jobs=2).fit(X, y)

    assert_array_equal(eclf1.predict(X), eclf2.predict(X))
    assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X)) 
Example #13
Source File: test_sklearn_voting_classifier_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_voting_soft_binary(self):
        model = VotingClassifier(
            voting="soft",
            flatten_transform=False,
            estimators=[
                ("lr", LogisticRegression()),
                ("lr2", LogisticRegression(fit_intercept=False)),
            ],
        )
        dump_binary_classification(
            model,
            suffix="Soft",
            comparable_outputs=[0, 1],
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.2.1')",
            target_opset=TARGET_OPSET
        ) 
Example #14
Source File: test_voting_classifier.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_set_params():
    """set_params should be able to set estimators"""
    clf1 = LogisticRegression(random_state=123, C=1.0)
    clf2 = RandomForestClassifier(random_state=123, max_depth=None)
    clf3 = GaussianNB()
    eclf1 = VotingClassifier([('lr', clf1), ('rf', clf2)], voting='soft',
                             weights=[1, 2])
    eclf1.fit(X, y)
    eclf2 = VotingClassifier([('lr', clf1), ('nb', clf3)], voting='soft',
                             weights=[1, 2])
    eclf2.set_params(nb=clf2).fit(X, y)
    assert_false(hasattr(eclf2, 'nb'))

    assert_array_equal(eclf1.predict(X), eclf2.predict(X))
    assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
    assert_equal(eclf2.estimators[0][1].get_params(), clf1.get_params())
    assert_equal(eclf2.estimators[1][1].get_params(), clf2.get_params())

    eclf1.set_params(lr__C=10.0)
    eclf2.set_params(nb__max_depth=5)

    assert_true(eclf1.estimators[0][1].get_params()['C'] == 10.0)
    assert_true(eclf2.estimators[1][1].get_params()['max_depth'] == 5)
    assert_equal(eclf1.get_params()["lr__C"],
                 eclf1.get_params()["lr"].get_params()['C']) 
Example #15
Source File: test_sklearn_voting_classifier_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_voting_hard_binary_weights(self):
        model = VotingClassifier(
            voting="hard",
            flatten_transform=False,
            weights=numpy.array([1000, 1]),
            estimators=[
                ("lr", LogisticRegression()),
                ("lr2", LogisticRegression(fit_intercept=False)),
            ],
        )
        # predict_proba is not defined when voting is hard.
        dump_binary_classification(
            model,
            suffix="WeightsHard",
            comparable_outputs=[0],
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.5.0')",
            target_opset=TARGET_OPSET
        ) 
Example #16
Source File: test_sklearn_voting_classifier_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_voting_hard_binary(self):
        model = VotingClassifier(
            voting="hard",
            flatten_transform=False,
            estimators=[
                ("lr", LogisticRegression()),
                ("lr2", LogisticRegression(fit_intercept=False)),
            ],
        )
        # predict_proba is not defined when voting is hard.
        dump_binary_classification(
            model,
            suffix="Hard",
            comparable_outputs=[0],
            allow_failure="StrictVersion(onnxruntime.__version__)"
                          " <= StrictVersion('0.5.0')",
            target_opset=TARGET_OPSET
        ) 
Example #17
Source File: test_voting_classifier.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_predictproba_hardvoting():
    eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
                                        ('lr2', LogisticRegression())],
                            voting='hard')
    msg = "predict_proba is not available when voting='hard'"
    assert_raise_message(AttributeError, msg, eclf.predict_proba, X) 
Example #18
Source File: test_ensemble.py    From pandas-ml with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_objectmapper(self):
        df = pdml.ModelFrame([])
        self.assertIs(df.ensemble.AdaBoostClassifier,
                      ensemble.AdaBoostClassifier)
        self.assertIs(df.ensemble.AdaBoostRegressor,
                      ensemble.AdaBoostRegressor)
        self.assertIs(df.ensemble.BaggingClassifier,
                      ensemble.BaggingClassifier)
        self.assertIs(df.ensemble.BaggingRegressor,
                      ensemble.BaggingRegressor)
        self.assertIs(df.ensemble.ExtraTreesClassifier,
                      ensemble.ExtraTreesClassifier)
        self.assertIs(df.ensemble.ExtraTreesRegressor,
                      ensemble.ExtraTreesRegressor)

        self.assertIs(df.ensemble.GradientBoostingClassifier,
                      ensemble.GradientBoostingClassifier)
        self.assertIs(df.ensemble.GradientBoostingRegressor,
                      ensemble.GradientBoostingRegressor)

        self.assertIs(df.ensemble.IsolationForest,
                      ensemble.IsolationForest)

        self.assertIs(df.ensemble.RandomForestClassifier,
                      ensemble.RandomForestClassifier)
        self.assertIs(df.ensemble.RandomTreesEmbedding,
                      ensemble.RandomTreesEmbedding)
        self.assertIs(df.ensemble.RandomForestRegressor,
                      ensemble.RandomForestRegressor)

        self.assertIs(df.ensemble.VotingClassifier,
                      ensemble.VotingClassifier) 
Example #19
Source File: test_voting_classifier.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_estimator_init():
    eclf = VotingClassifier(estimators=[])
    msg = ('Invalid `estimators` attribute, `estimators` should be'
           ' a list of (string, estimator) tuples')
    assert_raise_message(AttributeError, msg, eclf.fit, X, y)

    clf = LogisticRegression(random_state=1)

    eclf = VotingClassifier(estimators=[('lr', clf)], voting='error')
    msg = ('Voting must be \'soft\' or \'hard\'; got (voting=\'error\')')
    assert_raise_message(ValueError, msg, eclf.fit, X, y)

    eclf = VotingClassifier(estimators=[('lr', clf)], weights=[1, 2])
    msg = ('Number of classifiers and weights must be equal'
           '; got 2 weights, 1 estimators')
    assert_raise_message(ValueError, msg, eclf.fit, X, y)

    eclf = VotingClassifier(estimators=[('lr', clf), ('lr', clf)],
                            weights=[1, 2])
    msg = "Names provided are not unique: ['lr', 'lr']"
    assert_raise_message(ValueError, msg, eclf.fit, X, y)

    eclf = VotingClassifier(estimators=[('lr__', clf)])
    msg = "Estimator names must not contain __: got ['lr__']"
    assert_raise_message(ValueError, msg, eclf.fit, X, y)

    eclf = VotingClassifier(estimators=[('estimators', clf)])
    msg = "Estimator names conflict with constructor arguments: ['estimators']"
    assert_raise_message(ValueError, msg, eclf.fit, X, y) 
Example #20
Source File: voting_classifier.py    From lale with Apache License 2.0 5 votes vote down vote up
def __init__(self, estimators=None, voting='hard', weights=None, n_jobs=None, flatten_transform=True):
        self._hyperparams = {
            'estimators': estimators,
            'voting': voting,
            'weights': weights,
            'n_jobs': n_jobs,
            'flatten_transform': flatten_transform}
        self._wrapped_model = SKLModel(**self._hyperparams) 
Example #21
Source File: test_voting_classifier.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_notfitted():
    eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
                                        ('lr2', LogisticRegression())],
                            voting='soft')
    msg = ("This VotingClassifier instance is not fitted yet. Call \'fit\'"
           " with appropriate arguments before using this method.")
    assert_raise_message(NotFittedError, msg, eclf.predict_proba, X) 
Example #22
Source File: test_voting_classifier.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_majority_label_iris():
    """Check classification by majority label on dataset iris."""
    clf1 = LogisticRegression(random_state=123)
    clf2 = RandomForestClassifier(random_state=123)
    clf3 = GaussianNB()
    eclf = VotingClassifier(estimators=[
                ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
                voting='hard')
    scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
    assert_almost_equal(scores.mean(), 0.95, decimal=2) 
Example #23
Source File: test_voting_classifier.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_tie_situation():
    """Check voting classifier selects smaller class label in tie situation."""
    clf1 = LogisticRegression(random_state=123)
    clf2 = RandomForestClassifier(random_state=123)
    eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
                            voting='hard')
    assert_equal(clf1.fit(X, y).predict(X)[73], 2)
    assert_equal(clf2.fit(X, y).predict(X)[73], 1)
    assert_equal(eclf.fit(X, y).predict(X)[73], 1) 
Example #24
Source File: test_voting_classifier.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_predict_on_toy_problem():
    """Manually check predicted class labels for toy dataset."""
    clf1 = LogisticRegression(random_state=123)
    clf2 = RandomForestClassifier(random_state=123)
    clf3 = GaussianNB()

    X = np.array([[-1.1, -1.5],
                  [-1.2, -1.4],
                  [-3.4, -2.2],
                  [1.1, 1.2],
                  [2.1, 1.4],
                  [3.1, 2.3]])

    y = np.array([1, 1, 1, 2, 2, 2])

    assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
    assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
    assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))

    eclf = VotingClassifier(estimators=[
                            ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
                            voting='hard',
                            weights=[1, 1, 1])
    assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))

    eclf = VotingClassifier(estimators=[
                            ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
                            voting='soft',
                            weights=[1, 1, 1])
    assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2])) 
Example #25
Source File: test_voting_classifier.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_multilabel():
    """Check if error is raised for multilabel classification."""
    X, y = make_multilabel_classification(n_classes=2, n_labels=1,
                                          allow_unlabeled=False,
                                          random_state=123)
    clf = OneVsRestClassifier(SVC(kernel='linear'))

    eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')

    try:
        eclf.fit(X, y)
    except NotImplementedError:
        return 
Example #26
Source File: test_voting_classifier.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_gridsearch():
    """Check GridSearch support."""
    clf1 = LogisticRegression(random_state=1)
    clf2 = RandomForestClassifier(random_state=1)
    clf3 = GaussianNB()
    eclf = VotingClassifier(estimators=[
                ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
                voting='soft')

    params = {'lr__C': [1.0, 100.0],
              'voting': ['soft', 'hard'],
              'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}

    grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
    grid.fit(iris.data, iris.target) 
Example #27
Source File: test_voting_classifier.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_sample_weight_kwargs():
    """Check that VotingClassifier passes sample_weight as kwargs"""
    class MockClassifier(BaseEstimator, ClassifierMixin):
        """Mock Classifier to check that sample_weight is received as kwargs"""
        def fit(self, X, y, *args, **sample_weight):
            assert_true('sample_weight' in sample_weight)

    clf = MockClassifier()
    eclf = VotingClassifier(estimators=[('mock', clf)], voting='soft')

    # Should not raise an error.
    eclf.fit(X, y, sample_weight=np.ones((len(y),))) 
Example #28
Source File: test_voting_classifier.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_estimator_weights_format():
    # Test estimator weights inputs as list and array
    clf1 = LogisticRegression(random_state=123)
    clf2 = RandomForestClassifier(random_state=123)
    eclf1 = VotingClassifier(estimators=[
                ('lr', clf1), ('rf', clf2)],
                weights=[1, 2],
                voting='soft')
    eclf2 = VotingClassifier(estimators=[
                ('lr', clf1), ('rf', clf2)],
                weights=np.array((1, 2)),
                voting='soft')
    eclf1.fit(X, y)
    eclf2.fit(X, y)
    assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X)) 
Example #29
Source File: test_voting_classifier.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_transform():
    """Check transform method of VotingClassifier on toy dataset."""
    clf1 = LogisticRegression(random_state=123)
    clf2 = RandomForestClassifier(random_state=123)
    clf3 = GaussianNB()
    X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
    y = np.array([1, 1, 2, 2])

    eclf1 = VotingClassifier(estimators=[
        ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
        voting='soft').fit(X, y)
    eclf2 = VotingClassifier(estimators=[
        ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
        voting='soft',
        flatten_transform=True).fit(X, y)
    eclf3 = VotingClassifier(estimators=[
        ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
        voting='soft',
        flatten_transform=False).fit(X, y)

    warn_msg = ("'flatten_transform' default value will be "
                "changed to True in 0.21."
                "To silence this warning you may"
                " explicitly set flatten_transform=False.")
    res = assert_warns_message(DeprecationWarning, warn_msg,
                               eclf1.transform, X)
    assert_array_equal(res.shape, (3, 4, 2))
    assert_array_equal(eclf2.transform(X).shape, (4, 6))
    assert_array_equal(eclf3.transform(X).shape, (3, 4, 2))
    assert_array_equal(res.swapaxes(0, 1).reshape((4, 6)),
                       eclf2.transform(X))
    assert_array_equal(eclf3.transform(X).swapaxes(0, 1).reshape((4, 6)),
                       eclf2.transform(X)) 
Example #30
Source File: test_voting.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_estimator_weights_format():
    # Test estimator weights inputs as list and array
    clf1 = LogisticRegression(random_state=123)
    clf2 = RandomForestClassifier(random_state=123)
    eclf1 = VotingClassifier(estimators=[
                ('lr', clf1), ('rf', clf2)],
                weights=[1, 2],
                voting='soft')
    eclf2 = VotingClassifier(estimators=[
                ('lr', clf1), ('rf', clf2)],
                weights=np.array((1, 2)),
                voting='soft')
    eclf1.fit(X, y)
    eclf2.fit(X, y)
    assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))