Python sklearn.linear_model.Ridge() Examples

The following are 30 code examples for showing how to use sklearn.linear_model.Ridge(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module sklearn.linear_model , or try the search function .

Example 1
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_validation.py    License: MIT License 6 votes vote down vote up
def test_cross_val_score_with_score_func_regression():
    X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
                           random_state=0)
    reg = Ridge()

    # Default score of the Ridge regression estimator
    scores = cross_val_score(reg, X, y, cv=5)
    assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)

    # R2 score (aka. determination coefficient) - should be the
    # same as the default estimator score
    r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
    assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)

    # Mean squared error; this is a loss function, so "scores" are negative
    neg_mse_scores = cross_val_score(reg, X, y, cv=5,
                                     scoring="neg_mean_squared_error")
    expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
    assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)

    # Explained variance
    scoring = make_scorer(explained_variance_score)
    ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
    assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) 
Example 2
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_search.py    License: MIT License 6 votes vote down vote up
def test_classes__property():
    # Test that classes_ property matches best_estimator_.classes_
    X = np.arange(100).reshape(10, 10)
    y = np.array([0] * 5 + [1] * 5)
    Cs = [.1, 1, 10]

    grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
    grid_search.fit(X, y)
    assert_array_equal(grid_search.best_estimator_.classes_,
                       grid_search.classes_)

    # Test that regressors do not have a classes_ attribute
    grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
    grid_search.fit(X, y)
    assert not hasattr(grid_search, 'classes_')

    # Test that the grid searcher has no classes_ attribute before it's fit
    grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
    assert not hasattr(grid_search, 'classes_')

    # Test that the grid searcher has no classes_ attribute without a refit
    grid_search = GridSearchCV(LinearSVC(random_state=0),
                               {'C': Cs}, refit=False)
    grid_search.fit(X, y)
    assert not hasattr(grid_search, 'classes_') 
Example 3
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_search.py    License: MIT License 6 votes vote down vote up
def test_empty_cv_iterator_error():
    # Use global X, y

    # create cv
    cv = KFold(n_splits=3).split(X)

    # pop all of it, this should cause the expected ValueError
    [u for u in cv]
    # cv is empty now

    train_size = 100
    ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]},
                               cv=cv, n_jobs=-1)

    # assert that this raises an error
    with pytest.raises(ValueError,
                       match='No fits were performed. '
                             'Was the CV iterator empty\\? '
                             'Were there no candidates\\?'):
        ridge.fit(X[:train_size], y[:train_size]) 
Example 4
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_search.py    License: MIT License 6 votes vote down vote up
def test_random_search_bad_cv():
    # Use global X, y

    class BrokenKFold(KFold):
        def get_n_splits(self, *args, **kw):
            return 1

    # create bad cv
    cv = BrokenKFold(n_splits=3)

    train_size = 100
    ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]},
                               cv=cv, n_jobs=-1)

    # assert that this raises an error
    with pytest.raises(ValueError,
                       match='cv.split and cv.get_n_splits returned '
                             'inconsistent results. Expected \\d+ '
                             'splits, got \\d+'):
        ridge.fit(X[:train_size], y[:train_size]) 
Example 5
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_common.py    License: MIT License 6 votes vote down vote up
def _tested_estimators():
    for name, Estimator in all_estimators():
        if issubclass(Estimator, BiclusterMixin):
            continue
        if name.startswith("_"):
            continue
        # FIXME _skip_test should be used here (if we could)

        required_parameters = getattr(Estimator, "_required_parameters", [])
        if len(required_parameters):
            if required_parameters in (["estimator"], ["base_estimator"]):
                if issubclass(Estimator, RegressorMixin):
                    estimator = Estimator(Ridge())
                else:
                    estimator = Estimator(LinearDiscriminantAnalysis())
            else:
                warnings.warn("Can't instantiate estimator {} which requires "
                              "parameters {}".format(name,
                                                     required_parameters),
                              SkipTestWarning)
                continue
        else:
            estimator = Estimator()
        yield name, estimator 
Example 6
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_multioutput.py    License: MIT License 6 votes vote down vote up
def test_base_chain_random_order():
    # Fit base chain with random order
    X, Y = generate_multilabel_dataset_with_correlations()
    for chain in [ClassifierChain(LogisticRegression()),
                  RegressorChain(Ridge())]:
        chain_random = clone(chain).set_params(order='random', random_state=42)
        chain_random.fit(X, Y)
        chain_fixed = clone(chain).set_params(order=chain_random.order_)
        chain_fixed.fit(X, Y)
        assert_array_equal(chain_fixed.order_, chain_random.order_)
        assert_not_equal(list(chain_random.order), list(range(4)))
        assert_equal(len(chain_random.order_), 4)
        assert_equal(len(set(chain_random.order_)), 4)
        # Randomly ordered chain should behave identically to a fixed order
        # chain with the same order.
        for est1, est2 in zip(chain_random.estimators_,
                              chain_fixed.estimators_):
            assert_array_almost_equal(est1.coef_, est2.coef_) 
Example 7
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_multioutput.py    License: MIT License 6 votes vote down vote up
def test_base_chain_crossval_fit_and_predict():
    # Fit chain with cross_val_predict and verify predict
    # performance
    X, Y = generate_multilabel_dataset_with_correlations()

    for chain in [ClassifierChain(LogisticRegression()),
                  RegressorChain(Ridge())]:
        chain.fit(X, Y)
        chain_cv = clone(chain).set_params(cv=3)
        chain_cv.fit(X, Y)
        Y_pred_cv = chain_cv.predict(X)
        Y_pred = chain.predict(X)

        assert Y_pred_cv.shape == Y_pred.shape
        assert not np.all(Y_pred == Y_pred_cv)
        if isinstance(chain, ClassifierChain):
            assert jaccard_score(Y, Y_pred_cv, average='samples') > .4
        else:
            assert mean_squared_error(Y, Y_pred_cv) < .25 
Example 8
Project: SparkADMM   Author: yahoo   File: SparseLinearRegressionSolver.py    License: Apache License 2.0 6 votes vote down vote up
def solveSingle(self,inputDF,outputDict,rho,beta_target):
        I,J,V,Y=[],[],[],[]
        fd = {} # mapping feature names to consecutive integers, starting with 0
        for i,(id, x) in enumerate(inputDF.items()):
            l = outputDict.get(id)
            for k,v in x.items():
                I.append(i)
                J.append(k)
                V.append(v)
                upd(fd,k)
            Y.append(l)
        J = map(lambda k: fd[k], J)
        X = sparse.coo_matrix((V,(I,J)),shape=(I[-1]+1,len(fd)))
        fd_reverse = [k for k,v in sorted(fd.items(), key = lambda t: t[1])]
        # y_new = y - X . beta_target
        # converting a proximal least square problem to a ridge regression
        ZmUl = np.array([beta_target.get(k,0) for k in fd_reverse])
        y_new = np.array(Y) - X * ZmUl
        ridge = Ridge(alpha =  rho , fit_intercept=False)
        ret = ridge.fit(X,y_new)
        #ret = self.lr.fit(X,y_new)
        # ordered list of feature names according to their integer ids in fd
        #raise ValueError('fd_reverse = %s \n X = %s \n J = %s \n I = %s \n V = %s \n Y = %s \n y_new = %s \n ret.coef_ = %s \n ZmUl = %s \n'\
        #            %(str(fd_reverse), str(X), str(J), str(I), str(V), str(Y), str(y_new), str(ret.coef_), str(ZmUl)))
        return dict(zip(fd_reverse, (ret.coef_ + ZmUl).tolist())) 
Example 9
Project: manip-ml   Author: jagielski   File: gd_poisoners.py    License: MIT License 6 votes vote down vote up
def learn_model(self, x, y, clf, lam = None):
        if (lam is None and self.initlam != -1): # hack for first training
            lam = self.initlam
        if clf is None:
            if lam is None:
                clf = linear_model.LassoCV(max_iter=10000)
                clf.fit(x, y)
                lam = clf.alpha_
            clf = linear_model.Lasso(alpha = lam, \
                                 max_iter = 10000, \
                                 warm_start = True)
        clf.fit(x, y)
        return clf, lam


############################################################################################
# Implements GD Poisoning for Ridge Linear Regression
############################################################################################ 
Example 10
Project: nni   Author: microsoft   File: main.py    License: MIT License 6 votes vote down vote up
def get_model(PARAMS):
    '''Get model according to parameters'''
    model_dict = {
        'LinearRegression': LinearRegression(),
        'Ridge': Ridge(),
        'Lars': Lars(),
        'ARDRegression': ARDRegression()

    }
    if not model_dict.get(PARAMS['model_name']):
        LOG.exception('Not supported model!')
        exit(1)

    model = model_dict[PARAMS['model_name']]
    model.normalize = bool(PARAMS['normalize'])

    return model 
Example 11
Project: Quadflor   Author: quadflor   File: neural_net.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self,
                 probabilistic_estimator,
                 stepsize=0.01,
                 verbose=0,
                 fit_intercept=False,
                 sparse_output=True,
                 **ridge_params
                 ):
        """
        Arguments:
            probabilistic_estimator -- Estimator capable of predict_proba

        Keyword Arguments:
            average -- averaging method for f1 score
            stepsize -- stepsize for the exhaustive search of optimal threshold
            fit_intercept -- fit intercept in Ridge regression
            sparse_output -- Predict returns csr in favor of ndarray
            **ridge_params -- Passed down to Ridge regression
        """
        self.model = probabilistic_estimator
        self.verbose = verbose
        self.ridge = Ridge(fit_intercept=fit_intercept, **ridge_params)
        self.stepsize = stepsize
        self.sparse_output = sparse_output 
Example 12
Project: hyperparameter_hunter   Author: HunterMcGushion   File: test_support.py    License: MIT License 6 votes vote down vote up
def test_do_not_validate(env_boston):
    exp = CVExperiment(
        model_initializer=Ridge,
        model_init_params={},
        feature_engineer=FeatureEngineer([standard_scale], do_validate=False),
    )

    for step in exp.feature_engineer.steps:
        assert step.original_hashes == {}
        assert step.updated_hashes == {}


##################################################
# `FeatureEngineer.inverse_transform` TypeError Tests
##################################################
# noinspection PyUnusedLocal 
Example 13
Project: hyperparameter_hunter   Author: HunterMcGushion   File: test_support.py    License: MIT License 6 votes vote down vote up
def test_feature_engineer_list_experiment_inequality(env_boston, steps_0, steps_1):
    """Test that the `feature_engineer` attribute constructed by
    :class:`~hyperparameter_hunter.experiments.CVExperiment` is NOT the same when given a list as
    input vs. a :class:`~hyperparameter_hunter.feature_engineering.FeatureEngineer` when the two are
    actually different. This is an insanity test to make sure that the related test in this module,
    :func:`test_feature_engineer_list_experiment_equality`, is not simply equating everything"""
    exp_0 = CVExperiment(Ridge, feature_engineer=steps_0)
    exp_1 = CVExperiment(Ridge, feature_engineer=FeatureEngineer(steps_1))
    assert exp_0.feature_engineer != exp_1.feature_engineer

    # Repeat above, but switch which steps are wrapped in `FeatureEngineer`
    exp_2 = CVExperiment(Ridge, feature_engineer=steps_1)
    exp_3 = CVExperiment(Ridge, feature_engineer=FeatureEngineer(steps_0))
    assert exp_2.feature_engineer != exp_3.feature_engineer


##################################################
# OptPros: `FeatureEngineer` as List
##################################################
#################### Equality #################### 
Example 14
Project: scikit-lego   Author: koaning   File: test_estimatortransformer.py    License: MIT License 6 votes vote down vote up
def test_shape(random_xy_dataset_regr):
    X, y = random_xy_dataset_regr
    m = X.shape[0]
    pipeline = Pipeline(
        [
            (
                "ml_features",
                FeatureUnion(
                    [
                        ("model_1", EstimatorTransformer(LinearRegression())),
                        ("model_2", EstimatorTransformer(Ridge())),
                    ]
                ),
            )
        ]
    )

    assert pipeline.fit(X, y).transform(X).shape == (m, 2) 
Example 15
Project: dataiku-contrib   Author: dataiku   File: explanation.py    License: Apache License 2.0 5 votes vote down vote up
def iter_explain(self, instances_df, nh_size):

        [Xs, Ys, isSparse] = self.preprocessor.generate_samples(nh_size)
        [Xe, Ye, isSparse] = self.preprocessor.preprocess(instances_df)

        sample_weights = self.compute_sample_weights_to_instance(Xe, Xs)
        classes = self.preprocessor.get_classes()
        predictor_features = self.preprocessor.get_predictor_features()
        coefs_cols = ['coef_{}'.format(c) for c in classes]
        predictor_features_df = pd.DataFrame(predictor_features, columns=['feature'])
        samples_cols = ['sample_{}'.format(s) for s in range(nh_size)]

        for row_idx, [to_exp, to_proba, w] in enumerate(izip(Xe, Ye, sample_weights)):
            Xs[0,:] = to_exp
            Ys[0,:] = to_proba
            model_regressor = Ridge(alpha=self.ridge_alpha, fit_intercept=True, random_state=self.random_state)
            #TODO: compare with train explanation learning
            model_regressor.fit(Xs,Ys, sample_weight=w)
            local_r2_score = model_regressor.score(Xs, Ys, sample_weight=None)
            intercept_np = model_regressor.intercept_
            model_coefs = model_regressor.coef_
            kernel_distance_avg = np.mean(w)
            kernel_distance_std = np.std(w)

            coefs_df = pd.DataFrame(model_coefs.T, columns=coefs_cols)
            explanation_df = pd.concat((predictor_features_df,coefs_df), axis=1)
            #TODO: optimize this
            explanation_df.insert(0, '_exp_id', row_idx)

            instance_df = pd.DataFrame(to_exp.reshape(-1, len(to_exp)), columns=predictor_features)
            instance_df['r2_score'] = local_r2_score
            instance_df['kernel_distance_avg'] = kernel_distance_avg
            instance_df['kernel_distance_std'] = kernel_distance_std
            #TODO: optimize this
            instance_df.insert(0, '_exp_id', row_idx)

            #FIXME: used only for debugging 
            #weights_df = pd.DataFrame(w.reshape(-1, len(w)), columns=samples_cols)
            #weights_df.insert(0, '_exp_id', row_idx)

            yield explanation_df, instance_df 
Example 16
Project: deep_architect   Author: negrinho   File: hashing.py    License: MIT License 5 votes vote down vote up
def _refit(self):
        if self.model == None:
            self.model = lm.Ridge(alpha=self.weight_decay_coeff)

        X = sp.vstack(self.vecs_lst, format='csr')
        y = np.array(self.vals_lst)
        self.model.fit(X, y)

    # TODO: improve 
Example 17
Project: kaggle-HomeDepot   Author: ChenglongChen   File: skl_utils.py    License: MIT License 5 votes vote down vote up
def fit(self, X, y):
        sdim, fdim = X.shape
        for i in range(self.n_estimators):
            ridge = Ridge(alpha=self.alpha, normalize=self.normalize, random_state=self.random_state)
            fidx = self._random_feature_idx(fdim, self.random_state+i*100)
            sidx = self._random_sample_idx(sdim, self.random_state+i*10)
            X_tmp = X[sidx][:,fidx]
            if self.poly:
                X_tmp = PolynomialFeatures(degree=2).fit_transform(X_tmp)[:,1:]
            ridge.fit(X_tmp, y[sidx])
            self.ridge_list[i] = ridge
            self.feature_idx_list[i] = fidx
        return self 
Example 18
Project: nyaggle   Author: nyanp   File: test_cross_validate.py    License: MIT License 5 votes vote down vote up
def test_cv_sklean_regression():
    X, y = make_regression(n_samples=1024, n_features=20, random_state=0)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)

    model = Ridge(alpha=1.0)

    pred_oof, pred_test, scores, _ = cross_validate(model, X_train, y_train, X_test, cv=5, eval_func=r2_score)

    print(scores)
    assert len(scores) == 5 + 1
    assert scores[-1] >= 0.95  # overall r2
    assert r2_score(y_train, pred_oof) == scores[-1]
    assert r2_score(y_test, pred_test) >= 0.95  # test r2 
Example 19
Project: causal-text-embeddings   Author: blei-lab   File: reddit_output_att.py    License: MIT License 5 votes vote down vote up
def fit_conditional_expected_outcomes(outcomes, features):
	model = Ridge()
	model.fit(features, outcomes)
	predict = model.predict(features)
	if verbose:
		print("Training MSE:", mse(outcomes, predict))
	return model 
Example 20
Project: causal-text-embeddings   Author: blei-lab   File: reddit_output_att.py    License: MIT License 5 votes vote down vote up
def fit_model(doc_embeddings, labels, is_binary=False):
	if is_binary:
		model = LogisticRegression(solver='liblinear')
	else:
		model = Ridge()
	model.fit(doc_embeddings, labels)
	return model 
Example 21
Project: causal-text-embeddings   Author: blei-lab   File: peerread_output_att.py    License: MIT License 5 votes vote down vote up
def fit_model(doc_embeddings, labels, is_binary=False):
	if is_binary:
		model = LogisticRegression(solver='liblinear')
	else:
		model = Ridge()
	model.fit(doc_embeddings, labels)
	return model 
Example 22
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_score_objects.py    License: MIT License 5 votes vote down vote up
def test_regression_scorers():
    # Test regression scorers.
    diabetes = load_diabetes()
    X, y = diabetes.data, diabetes.target
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf = Ridge()
    clf.fit(X_train, y_train)
    score1 = get_scorer('r2')(clf, X_test, y_test)
    score2 = r2_score(y_test, clf.predict(X_test))
    assert_almost_equal(score1, score2) 
Example 23
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_score_objects.py    License: MIT License 5 votes vote down vote up
def test_scoring_is_not_metric():
    assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
                         LogisticRegression(), f1_score)
    assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
                         LogisticRegression(), roc_auc_score)
    assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
                         Ridge(), r2_score)
    assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
                         KMeans(), cluster_module.adjusted_rand_score) 
Example 24
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_split.py    License: MIT License 5 votes vote down vote up
def test_nested_cv():
    # Test if nested cross validation works with different combinations of cv
    rng = np.random.RandomState(0)

    X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
    groups = rng.randint(0, 5, 15)

    cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(), StratifiedKFold(),
           StratifiedShuffleSplit(n_splits=3, random_state=0)]

    for inner_cv, outer_cv in combinations_with_replacement(cvs, 2):
        gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]},
                          cv=inner_cv, error_score='raise', iid=False)
        cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv,
                        fit_params={'groups': groups}) 
Example 25
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_validation.py    License: MIT License 5 votes vote down vote up
def test_cross_validate():
    # Compute train and test mse/r2 scores
    cv = KFold(n_splits=5)

    # Regression
    X_reg, y_reg = make_regression(n_samples=30, random_state=0)
    reg = Ridge(random_state=0)

    # Classification
    X_clf, y_clf = make_classification(n_samples=30, random_state=0)
    clf = SVC(kernel="linear", random_state=0)

    for X, y, est in ((X_reg, y_reg, reg), (X_clf, y_clf, clf)):
        # It's okay to evaluate regression metrics on classification too
        mse_scorer = check_scoring(est, 'neg_mean_squared_error')
        r2_scorer = check_scoring(est, 'r2')
        train_mse_scores = []
        test_mse_scores = []
        train_r2_scores = []
        test_r2_scores = []
        fitted_estimators = []
        for train, test in cv.split(X, y):
            est = clone(reg).fit(X[train], y[train])
            train_mse_scores.append(mse_scorer(est, X[train], y[train]))
            train_r2_scores.append(r2_scorer(est, X[train], y[train]))
            test_mse_scores.append(mse_scorer(est, X[test], y[test]))
            test_r2_scores.append(r2_scorer(est, X[test], y[test]))
            fitted_estimators.append(est)

        train_mse_scores = np.array(train_mse_scores)
        test_mse_scores = np.array(test_mse_scores)
        train_r2_scores = np.array(train_r2_scores)
        test_r2_scores = np.array(test_r2_scores)
        fitted_estimators = np.array(fitted_estimators)

        scores = (train_mse_scores, test_mse_scores, train_r2_scores,
                  test_r2_scores, fitted_estimators)

        check_cross_validate_single_metric(est, X, y, scores)
        check_cross_validate_multi_metric(est, X, y, scores) 
Example 26
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_kernel_ridge.py    License: MIT License 5 votes vote down vote up
def test_kernel_ridge():
    pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
    pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
    assert_array_almost_equal(pred, pred2) 
Example 27
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_kernel_ridge.py    License: MIT License 5 votes vote down vote up
def test_kernel_ridge_csc():
    pred = Ridge(alpha=1, fit_intercept=False,
                 solver="cholesky").fit(Xcsc, y).predict(Xcsc)
    pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
    assert_array_almost_equal(pred, pred2) 
Example 28
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_kernel_ridge.py    License: MIT License 5 votes vote down vote up
def test_kernel_ridge_singular_kernel():
    # alpha=0 causes a LinAlgError in computing the dual coefficients,
    # which causes a fallback to a lstsq solver. This is tested here.
    pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
    kr = KernelRidge(kernel="linear", alpha=0)
    ignore_warnings(kr.fit)(X, y)
    pred2 = kr.predict(X)
    assert_array_almost_equal(pred, pred2) 
Example 29
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_kernel_ridge.py    License: MIT License 5 votes vote down vote up
def test_kernel_ridge_sample_weights():
    K = np.dot(X, X.T)  # precomputed kernel
    sw = np.random.RandomState(0).rand(X.shape[0])

    pred = Ridge(alpha=1,
                 fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
    pred2 = KernelRidge(kernel="linear",
                        alpha=1).fit(X, y, sample_weight=sw).predict(X)
    pred3 = KernelRidge(kernel="precomputed",
                        alpha=1).fit(K, y, sample_weight=sw).predict(K)
    assert_array_almost_equal(pred, pred2)
    assert_array_almost_equal(pred, pred3) 
Example 30
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_kernel_ridge.py    License: MIT License 5 votes vote down vote up
def test_kernel_ridge_multi_output():
    pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
    pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
    assert_array_almost_equal(pred, pred2)

    pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
    pred3 = np.array([pred3, pred3]).T
    assert_array_almost_equal(pred2, pred3)