Python numpy.testing.assert_approx_equal() Examples

The following are 3 code examples of numpy.testing.assert_approx_equal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy.testing , or try the search function .
Example #1
Source File: non_convex_test.py    From osqp-python with Apache License 2.0 5 votes vote down vote up
def test_non_convex_big_sigma(self):
        # Setup workspace with new sigma
        opts = {'verbose': False, 'sigma': 5}
        self.model.setup(P=self.P, q=self.q, A=self.A, l=self.l, u=self.u, **opts)

        # Solve problem
        res = self.model.solve()

        # Assert close
        self.assertEqual(res.info.status_val, constant('OSQP_NON_CVX'))
        nptest.assert_approx_equal(res.info.obj_val, np.nan) 
Example #2
Source File: non_convex_test.py    From osqp-python with Apache License 2.0 5 votes vote down vote up
def test_nan(self):
        nptest.assert_approx_equal(constant('OSQP_NAN'), np.nan) 
Example #3
Source File: test_ml_toolkit.py    From OpenOA with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_algorithms(self):
        # Test different algorithms hyperoptimization and fitting results
        # Hyperparameter optimization is based on randomized grid search, so pass criteria is not stringent
        np.random.seed(42)

        # Specify expected mean power, R2 and RMSE from the fits
        required_metrics = {'etr': (0.999852, 130.0),
                            'gbm': (0.999999, 30.0),
                            'gam': (0.983174, 1330.0)}

        # Loop through algorithms
        for a in required_metrics.keys():
            ml = MachineLearningSetup(a) # Setup ML object
            
            # Perform randomized grid search only once for efficiency
            ml.hyper_optimize(self.X, self.y, n_iter_search = 1, report = False, cv = KFold(n_splits = 2))
            
            # Predict power based on model results
            y_pred = ml.random_search.predict(self.X)

            # Compute performance metrics which we'll test
            corr = np.corrcoef(self.y, y_pred)[0,1] # Correlation between predicted and actual power
            rmse = np.sqrt(mean_squared_error(self.y, y_pred)) # RMSE between predicted and actual power

            # Mean power in GW is within 3 decimal places
            nptest.assert_approx_equal(self.y.sum()/1e6, y_pred.sum()/1e6, significant = 3, 
                                       err_msg="Sum of predicted and actual power for {} not close enough".format(a))
            
            # Test correlation of model fit
            nptest.assert_approx_equal(corr, required_metrics[a][0], significant = 4,
                                     err_msg="Correlation between {} features and response is wrong".format(a))

            # Test RMSE of model fit
            self.assertLess(rmse, required_metrics[a][1], "RMSE of {} fit is too high".format(a))