Python sklearn.linear_model.RANSACRegressor() Examples

The following are 30 code examples of sklearn.linear_model.RANSACRegressor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sklearn.linear_model , or try the search function .
Example #1
Source File: poiRegression.py    From python-urbanPlanning with MIT License 6 votes vote down vote up
def RANSAC_m(X_ransac,y_ransac,predFeat=False):
    ransac=RANSACRegressor(LinearRegression(),max_trials=100,min_samples=10,residual_metric=lambda x:np.sum(np.abs(x),axis=1),residual_threshold=1.0,random_state=0) #max_trials为最大迭代次数,min_samples随机抽取作为内点的最小样本数量,residual_metric传递了一个lambda函数,拟合曲线与样本点间垂直距离的绝对值,residual_threshold残差阈值,只有小于该值的样本点从加入内点inliers中,否则为外电outliers中,默认使用MAD(Median Absolute Deviation中位数决定偏差)估计内点阈值
    ransac.fit(X_ransac,y_ransac)
    print('Slope:%.3f;Intercept:%.3f'%(ransac.estimator_.coef_[0],ransac.estimator_.intercept_))  
    
    X=X_ransac
    y=y_ransac
    inlier_mask=ransac.inlier_mask_  #内点掩码
#    print(inlier_mask)
    outlier_mask=np.logical_not(inlier_mask) #外点掩码
    line_X=np.arange(0,5,0.5)
    line_y_ransac=ransac.predict(line_X[:,np.newaxis])
    plt.scatter(X[inlier_mask],y[inlier_mask],c='blue',marker='o',label='Inliers')
    plt.scatter(X[outlier_mask],y[outlier_mask],c='lightgreen',marker='s',label='OutLiers')
    plt.plot(line_X,line_y_ransac,color='red')
    plt.xlabel('hygiene_num')
    plt.ylabel('Price in $1000')
    plt.legend(loc='upper left')
    plt.show()
    
    if type(predFeat).__module__=='numpy': #判断是否有空间几何数据输入
        return ransac.predict(predFeat) 
Example #2
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_ransac_max_trials():
    base_estimator = LinearRegression()

    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=5, max_trials=0,
                                       random_state=0)
    assert_raises(ValueError, ransac_estimator.fit, X, y)

    # there is a 1e-9 chance it will take these many trials. No good reason
    # 1e-2 isn't enough, can still happen
    # 2 is the what ransac defines  as min_samples = X.shape[1] + 1
    max_trials = _dynamic_max_trials(
        len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9)
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2)
    for i in range(50):
        ransac_estimator.set_params(min_samples=2, random_state=i)
        ransac_estimator.fit(X, y)
        assert_less(ransac_estimator.n_trials_, max_trials + 1) 
Example #3
Source File: rectify.py    From facade-segmentation with MIT License 6 votes vote down vote up
def _vlines(lines, ctrs=None, lengths=None, vecs=None, angle_lo=20, angle_hi=160, ransac_options=RANSAC_OPTIONS):
    ctrs = ctrs if ctrs is not None else lines.mean(1)
    vecs = vecs if vecs is not None else lines[:, 1, :] - lines[:, 0, :]
    lengths = lengths if lengths is not None else np.hypot(vecs[:, 0], vecs[:, 1])

    angles = np.degrees(np.arccos(vecs[:, 0] / lengths))
    points = np.column_stack([ctrs[:, 0], angles])
    point_indices, = np.nonzero((angles > angle_lo) & (angles < angle_hi))
    points = points[point_indices]
    if len(points) > 2:
        model_ransac = linear_model.RANSACRegressor(**ransac_options)
        model_ransac.fit(points[:, 0].reshape(-1, 1), points[:, 1].reshape(-1, 1))
        inlier_mask = model_ransac.inlier_mask_
        valid_lines = lines[point_indices[inlier_mask], :, :]
    else:
        valid_lines = []
    return valid_lines 
Example #4
Source File: rectify.py    From facade-segmentation with MIT License 6 votes vote down vote up
def _hlines(lines, ctrs=None, lengths=None, vecs=None, angle_lo=20, angle_hi=160, ransac_options=RANSAC_OPTIONS):
    ctrs = ctrs if ctrs is not None else lines.mean(1)
    vecs = vecs if vecs is not None else lines[:, 1, :] - lines[:, 0, :]
    lengths = lengths if lengths is not None else np.hypot(vecs[:, 0], vecs[:, 1])

    angles = np.degrees(np.arccos(vecs[:, 1] / lengths))
    points = np.column_stack([ctrs[:, 1], angles])
    point_indices, = np.nonzero((angles > angle_lo) & (angles < angle_hi))
    points = points[point_indices]
    if len(points) > 2:
        model_ransac = linear_model.RANSACRegressor(**ransac_options)
        model_ransac.fit(points[:, 0].reshape(-1, 1), points[:, 1].reshape(-1, 1))
        inlier_mask = model_ransac.inlier_mask_
        valid_lines = lines[point_indices[inlier_mask], :, :]
    else:
        valid_lines = []
    return valid_lines 
Example #5
Source File: test_ransac.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_ransac_warn_exceed_max_skips():
    global cause_skip
    cause_skip = False

    def is_data_valid(X, y):
        global cause_skip
        if not cause_skip:
            cause_skip = True
            return True
        else:
            return False

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator,
                                       is_data_valid=is_data_valid,
                                       max_skips=3,
                                       max_trials=5)

    assert_warns(ConvergenceWarning, ransac_estimator.fit, X, y)
    assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
    assert_equal(ransac_estimator.n_skips_invalid_data_, 4)
    assert_equal(ransac_estimator.n_skips_invalid_model_, 0) 
Example #6
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_ransac_warn_exceed_max_skips():
    global cause_skip
    cause_skip = False

    def is_data_valid(X, y):
        global cause_skip
        if not cause_skip:
            cause_skip = True
            return True
        else:
            return False

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator,
                                       is_data_valid=is_data_valid,
                                       max_skips=3,
                                       max_trials=5)

    assert_warns(UserWarning, ransac_estimator.fit, X, y)
    assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
    assert_equal(ransac_estimator.n_skips_invalid_data_, 4)
    assert_equal(ransac_estimator.n_skips_invalid_model_, 0) 
Example #7
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_ransac_is_data_valid():
    def is_data_valid(X, y):
        assert_equal(X.shape[0], 2)
        assert_equal(y.shape[0], 2)
        return False

    rng = np.random.RandomState(0)
    X = rng.rand(10, 2)
    y = rng.rand(10, 1)

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=5,
                                       is_data_valid=is_data_valid,
                                       random_state=0)

    assert_raises(ValueError, ransac_estimator.fit, X, y) 
Example #8
Source File: test_ransac.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_ransac_multi_dimensional_targets():

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=5, random_state=0)

    # 3-D target values
    yyy = np.column_stack([y, y, y])

    # Estimate parameters of corrupted data
    ransac_estimator.fit(X, yyy)

    # Ground truth / reference inlier mask
    ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
                                   ).astype(np.bool_)
    ref_inlier_mask[outliers] = False

    assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) 
Example #9
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_ransac_multi_dimensional_targets():

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=5, random_state=0)

    # 3-D target values
    yyy = np.column_stack([y, y, y])

    # Estimate parameters of corrupted data
    ransac_estimator.fit(X, yyy)

    # Ground truth / reference inlier mask
    ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
                                   ).astype(np.bool_)
    ref_inlier_mask[outliers] = False

    assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)


# XXX: Remove in 0.20 
Example #10
Source File: test_ransac.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_ransac_max_trials():
    base_estimator = LinearRegression()

    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=5, max_trials=0,
                                       random_state=0)
    assert_raises(ValueError, ransac_estimator.fit, X, y)

    # there is a 1e-9 chance it will take these many trials. No good reason
    # 1e-2 isn't enough, can still happen
    # 2 is the what ransac defines  as min_samples = X.shape[1] + 1
    max_trials = _dynamic_max_trials(
        len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9)
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2)
    for i in range(50):
        ransac_estimator.set_params(min_samples=2, random_state=i)
        ransac_estimator.fit(X, y)
        assert_less(ransac_estimator.n_trials_, max_trials + 1) 
Example #11
Source File: test_standardization.py    From causallib with Apache License 2.0 6 votes vote down vote up
def ensure_many_models(self):
        from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
        from sklearn.neural_network import MLPRegressor
        from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
        from sklearn.neighbors import KNeighborsRegressor
        from sklearn.svm import SVR, LinearSVR

        import warnings
        from sklearn.exceptions import ConvergenceWarning
        warnings.filterwarnings('ignore', category=ConvergenceWarning)

        for learner in [GradientBoostingRegressor, RandomForestRegressor, MLPRegressor,
                        ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor,
                        KNeighborsRegressor, SVR, LinearSVR]:
            learner = learner()
            learner_name = str(learner).split("(", maxsplit=1)[0]
            with self.subTest("Test fit using {learner}".format(learner=learner_name)):
                model = self.estimator.__class__(learner)
                model.fit(self.data_lin["X"], self.data_lin["a"], self.data_lin["y"])
                self.assertTrue(True)  # Fit did not crash 
Example #12
Source File: test_ransac.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_ransac_is_data_valid():
    def is_data_valid(X, y):
        assert_equal(X.shape[0], 2)
        assert_equal(y.shape[0], 2)
        return False

    rng = np.random.RandomState(0)
    X = rng.rand(10, 2)
    y = rng.rand(10, 1)

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=5,
                                       is_data_valid=is_data_valid,
                                       random_state=0)

    assert_raises(ValueError, ransac_estimator.fit, X, y) 
Example #13
Source File: test_sklearn_glm_regressor_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_model_ransac_regressor_default(self):
        model, X = fit_regression_model(
            linear_model.RANSACRegressor())
        model_onnx = convert_sklearn(
            model, "ransac regressor",
            [("input", FloatTensorType([None, X.shape[1]]))])
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            verbose=False,
            basename="SklearnRANSACRegressor-Dec4",
            allow_failure="StrictVersion("
            "onnxruntime.__version__)"
            "<= StrictVersion('0.2.1')",
        ) 
Example #14
Source File: test_sklearn_glm_regressor_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_model_ransac_regressor_mlp(self):
        model, X = fit_regression_model(
            linear_model.RANSACRegressor(
                base_estimator=MLPRegressor(solver='lbfgs')))
        model_onnx = convert_sklearn(
            model, "ransac regressor",
            [("input", FloatTensorType([None, X.shape[1]]))])
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            verbose=False,
            basename="SklearnRANSACRegressorMLP-Dec3",
            allow_failure="StrictVersion("
            "onnxruntime.__version__)"
            "<= StrictVersion('0.2.1')",
        ) 
Example #15
Source File: test_meta.py    From m2cgen with MIT License 6 votes vote down vote up
def test_ransac_custom_base_estimator():
    base_estimator = DecisionTreeRegressor()
    estimator = linear_model.RANSACRegressor(
        base_estimator=base_estimator,
        random_state=1)
    estimator.fit([[1], [2], [3]], [1, 2, 3])

    assembler = assemblers.RANSACModelAssembler(estimator)
    actual = assembler.assemble()

    expected = ast.IfExpr(
        ast.CompExpr(
            ast.FeatureRef(0),
            ast.NumVal(2.5),
            ast.CompOpType.LTE),
        ast.NumVal(2.0),
        ast.NumVal(3.0))

    assert utils.cmp_exprs(actual, expected) 
Example #16
Source File: test_sklearn_glm_regressor_converter.py    From sklearn-onnx with MIT License 6 votes vote down vote up
def test_model_ransac_regressor_tree(self):
        model, X = fit_regression_model(
            linear_model.RANSACRegressor(
                base_estimator=GradientBoostingRegressor()))
        model_onnx = convert_sklearn(
            model, "ransac regressor",
            [("input", FloatTensorType([None, X.shape[1]]))])
        self.assertIsNotNone(model_onnx)
        dump_data_and_model(
            X,
            model,
            model_onnx,
            verbose=False,
            basename="SklearnRANSACRegressorTree-Dec3",
            allow_failure="StrictVersion("
            "onnxruntime.__version__)"
            "<= StrictVersion('0.2.1')",
        ) 
Example #17
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ransac_stop_n_inliers():
    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=5, stop_n_inliers=2,
                                       random_state=0)
    ransac_estimator.fit(X, y)

    assert_equal(ransac_estimator.n_trials_, 1) 
Example #18
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ransac_resid_thresh_no_inliers():
    # When residual_threshold=0.0 there are no inliers and a
    # ValueError with a message should be raised
    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=0.0, random_state=0,
                                       max_trials=5)

    msg = ("RANSAC could not find a valid consensus set")
    assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
    assert_equal(ransac_estimator.n_skips_no_inliers_, 5)
    assert_equal(ransac_estimator.n_skips_invalid_data_, 0)
    assert_equal(ransac_estimator.n_skips_invalid_model_, 0) 
Example #19
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ransac_is_model_valid():
    def is_model_valid(estimator, X, y):
        assert_equal(X.shape[0], 2)
        assert_equal(y.shape[0], 2)
        return False

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=5,
                                       is_model_valid=is_model_valid,
                                       random_state=0)

    assert_raises(ValueError, ransac_estimator.fit, X, y) 
Example #20
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ransac_inliers_outliers():

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=5, random_state=0)

    # Estimate parameters of corrupted data
    ransac_estimator.fit(X, y)

    # Ground truth / reference inlier mask
    ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
                                   ).astype(np.bool_)
    ref_inlier_mask[outliers] = False

    assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) 
Example #21
Source File: drive.py    From CO2MPAS-TA with European Union Public License 1.1 5 votes vote down vote up
def fit(self, currents, voltages):
        from sklearn.linear_model import RANSACRegressor
        m = RANSACRegressor(random_state=0)
        m.fit(currents[:, None], voltages)
        r0, ocv = float(m.estimator_.coef_), float(m.estimator_.intercept_)
        self.r0 = r0 * self.np / self.ns
        self.ocv = ocv / self.ns
        self.compile()
        return self 
Example #22
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ransac_score():
    X = np.arange(100)[:, None]
    y = np.zeros((100, ))
    y[0] = 1
    y[1] = 100

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=0.5, random_state=0)
    ransac_estimator.fit(X, y)

    assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
    assert_less(ransac_estimator.score(X[:2], y[:2]), 1) 
Example #23
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ransac_predict():
    X = np.arange(100)[:, None]
    y = np.zeros((100, ))
    y[0] = 1
    y[1] = 100

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=0.5, random_state=0)
    ransac_estimator.fit(X, y)

    assert_equal(ransac_estimator.predict(X), np.zeros(100)) 
Example #24
Source File: test_doublyrobust.py    From causallib with Apache License 2.0 5 votes vote down vote up
def ensure_many_models(self):
        from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
        from sklearn.neural_network import MLPRegressor
        from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
        from sklearn.neighbors import KNeighborsRegressor
        from sklearn.svm import SVR, LinearSVR

        from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
        from sklearn.neural_network import MLPClassifier
        from sklearn.neighbors import KNeighborsClassifier

        from sklearn.exceptions import ConvergenceWarning
        warnings.filterwarnings('ignore', category=ConvergenceWarning)

        data = self.create_uninformative_ox_dataset()
        for propensity_learner in [GradientBoostingClassifier(n_estimators=10),
                                   RandomForestClassifier(n_estimators=100),
                                   MLPClassifier(hidden_layer_sizes=(5,)),
                                   KNeighborsClassifier(n_neighbors=20)]:
            weight_model = IPW(propensity_learner)
            propensity_learner_name = str(propensity_learner).split("(", maxsplit=1)[0]
            for outcome_learner in [GradientBoostingRegressor(n_estimators=10), RandomForestRegressor(n_estimators=10),
                                    MLPRegressor(hidden_layer_sizes=(5,)),
                                    ElasticNet(), RANSACRegressor(), HuberRegressor(), PassiveAggressiveRegressor(),
                                    KNeighborsRegressor(), SVR(), LinearSVR()]:
                outcome_learner_name = str(outcome_learner).split("(", maxsplit=1)[0]
                outcome_model = Standardization(outcome_learner)

                with self.subTest("Test fit & predict using {} & {}".format(propensity_learner_name,
                                                                            outcome_learner_name)):
                    model = self.estimator.__class__(outcome_model, weight_model)
                    model.fit(data["X"], data["a"], data["y"], refit_weight_model=False)
                    model.estimate_individual_outcome(data["X"], data["a"])
                    self.assertTrue(True)  # Fit did not crash 
Example #25
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ransac_no_valid_data():
    def is_data_valid(X, y):
        return False

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator,
                                       is_data_valid=is_data_valid,
                                       max_trials=5)

    msg = ("RANSAC could not find a valid consensus set")
    assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
    assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
    assert_equal(ransac_estimator.n_skips_invalid_data_, 5)
    assert_equal(ransac_estimator.n_skips_invalid_model_, 0) 
Example #26
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ransac_no_valid_model():
    def is_model_valid(estimator, X, y):
        return False

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator,
                                       is_model_valid=is_model_valid,
                                       max_trials=5)

    msg = ("RANSAC could not find a valid consensus set")
    assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
    assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
    assert_equal(ransac_estimator.n_skips_invalid_data_, 0)
    assert_equal(ransac_estimator.n_skips_invalid_model_, 5) 
Example #27
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ransac_sparse_coo():
    X_sparse = sparse.coo_matrix(X)

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=5, random_state=0)
    ransac_estimator.fit(X_sparse, y)

    ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
                                   ).astype(np.bool_)
    ref_inlier_mask[outliers] = False

    assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) 
Example #28
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ransac_sparse_csr():
    X_sparse = sparse.csr_matrix(X)

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=5, random_state=0)
    ransac_estimator.fit(X_sparse, y)

    ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
                                   ).astype(np.bool_)
    ref_inlier_mask[outliers] = False

    assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) 
Example #29
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ransac_sparse_csc():
    X_sparse = sparse.csc_matrix(X)

    base_estimator = LinearRegression()
    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=5, random_state=0)
    ransac_estimator.fit(X_sparse, y)

    ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
                                   ).astype(np.bool_)
    ref_inlier_mask[outliers] = False

    assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) 
Example #30
Source File: test_ransac.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_ransac_none_estimator():

    base_estimator = LinearRegression()

    ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
                                       residual_threshold=5, random_state=0)
    ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)

    ransac_estimator.fit(X, y)
    ransac_none_estimator.fit(X, y)

    assert_array_almost_equal(ransac_estimator.predict(X),
                              ransac_none_estimator.predict(X))