# Python scipy.stats.chi2.ppf() Examples

The following are code examples for showing how to use scipy.stats.chi2.ppf(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
```def  _ci_limits_var(self, var):
"""
Used to determine the confidence intervals for the variance.
It calls test_var and when called by an optimizer,
finds the value of sig2_0 that is chi2.ppf(significance-level)

Parameters
----------
var_test : float
Hypothesized value of the variance

Returns
-------
diff : float
The difference between the log likelihood ratio at var_test and a
pre-specified value.
"""
return self.test_var(var)[0] - self.r0 ```
Example 2
 Project: lsfm   Author: menpo   File: visualize.py    BSD 3-Clause "New" or "Revised" License 6 votes
```def visualize_pruning(w_norm, n_retained,
title='Initial model weights vs theoretical for pruning'):
fig, ax1 = plt.subplots()
ax1.set_title(title)
ax1.hist(w_norm, normed=True, bins=200, alpha=0.6, histtype='stepfilled',
range=[0, n_retained * 5])
ax1.axvline(x=n_retained, linewidth=1, color='r')
ax1.set_ylabel('PDF', color='b')

ax2 = ax1.twinx()
ax2.set_ylabel('Survival Function', color='r')

ax1.set_xlabel('w_norm')

x = np.linspace(chi2.ppf(0.001, n_retained),
chi2.ppf(0.999, n_retained), 100)
ax2.plot(x, chi2.sf(x, n_retained),
'g-', lw=1, alpha=0.6, label='chi2 pdf')
ax1.plot(x, chi2.pdf(x, n_retained),
'r-', lw=1, alpha=0.6, label='chi2 pdf') ```
Example 3
```def poisson_interval(k, sigma=1):
"""
Use chi-squared info to get the poisson interval.

Give a number of observed events, which range of observed events would have
been just as likely given a particular interval?

Based off https://stackoverflow.com/questions/14813530/
poisson-confidence-interval-with-numpy
"""
gauss = norm(0, 1).pdf
a = 1 - quad(gauss, -sigma, sigma, limit=1000)[0]
low, high = (chi2.ppf(a/2, 2*k) / 2, chi2.ppf(1-a/2, 2*k + 2) / 2)
if k == 0:
low = 0.0

return low, high ```
Example 4
```def _h_getMahalanobisRobust(dat, critical_alpha=0.01, good_rows=np.zeros(0)):
'''Calculate the Mahalanobis distance from the sample vector.'''
if good_rows.size == 0:
good_rows = np.any(~np.isnan(dat), axis=1)

try:
dat2fit = dat[good_rows]
assert not np.any(np.isnan(dat2fit))

robust_cov = MinCovDet().fit(dat2fit)
mahalanobis_dist = np.sqrt(robust_cov.mahalanobis(dat))
except ValueError:
# this step will fail if the covariance matrix is not singular. This happens if the data is not
# a unimodal symetric distribution. For example there is too many small noisy particles. Therefore
# I will take a safe option and return zeros in the mahalanobis
# distance if this is the case.
mahalanobis_dist = np.zeros(dat.shape[0])

# critial distance of the maholanobis distance using the chi-square distirbution
# https://en.wikiversity.org/wiki/Mahalanobis%27_distance
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chi2.html
maha_lim = chi2.ppf(1 - critical_alpha, dat.shape[1])
outliers = mahalanobis_dist > maha_lim

return mahalanobis_dist, outliers, maha_lim ```
Example 5
 Project: Splunking-Crime   Author: nccgroup   File: descriptive.py    GNU Affero General Public License v3.0 6 votes
```def  _ci_limits_var(self, var):
"""
Used to determine the confidence intervals for the variance.
It calls test_var and when called by an optimizer,
finds the value of sig2_0 that is chi2.ppf(significance-level)

Parameters
----------
var_test : float
Hypothesized value of the variance

Returns
-------
diff : float
The difference between the log likelihood ratio at var_test and a
pre-specified value.
"""
return self.test_var(var)[0] - self.r0 ```
Example 6
```def ci_corr(self, sig=.05, upper_bound=None, lower_bound=None):
"""
Returns the confidence intervals for the correlation coefficient

Parameters
----------
sig : float
The significance level.  Default is .05

upper_bound : float
Maximum value the upper confidence limit can be.
Default is  99% confidence limit assuming normality.

lower_bound : float
Minimum value the lower condidence limit can be.
Default is 99% confidence limit assuming normality.

Returns
-------
interval : tuple
Confidence interval for the correlation

"""
endog = self.endog
nobs = self.nobs
self.r0 = chi2.ppf(1 - sig, 1)
point_est = np.corrcoef(endog[:, 0], endog[:, 1])[0, 1]
if upper_bound is None:
upper_bound = min(.999, point_est + \
2.5 * ((1. - point_est ** 2.) / \
(nobs - 2.)) ** .5)

if lower_bound is None:
lower_bound = max(- .999, point_est - \
2.5 * (np.sqrt((1. - point_est ** 2.) / \
(nobs - 2.))))

llim = optimize.brenth(self._ci_limits_corr, lower_bound, point_est)
ulim = optimize.brenth(self._ci_limits_corr, point_est, upper_bound)
return llim, ulim ```
Example 7
 Project: statistics_introduction   Author: dhwang99   File: chi_square_test.py    GNU General Public License v3.0 5 votes
```def chi_for_war():
alpha = 0.05
years = np.linspace(0, 4, 5)
wars = np.array([223, 142, 48, 15, 4])
wars_sum = wars.sum()

#poission 参数估计, lambda_hat = 1/n * sum Xi
lambda_hat = 1./wars_sum * np.dot(wars, years)

#频度计算
fis = np.array(wars)
fis[-2] += fis[-1]

p_of_years = poisson.pmf(years, lambda_hat)
npi = p_of_years * wars_sum
npi[-2] += npi[-1]

stats = np.power(fis - npi,  2) / npi
chisq = stats[:-1].sum()

#chi 计算
df = 5 - 1 -1 - 1 # lambda_hat 是模拟数据，于是 df会减1; 合并最后一个 n*pi < 5, 于是又减了一个
alpha_ppf = chi2.ppf(1-alpha, df=df)
p = 1 - chi2.cdf(chisq, df=df)
print "Detail  : chisq: %.4f; p: %.4f; alpha: %.4f; alpha_ppf: %.4f" % (chisq, p, alpha, alpha_ppf)

#直接这么算也可以
f_obs = fis[:-1]
f_exp = npi[:-1]
#lambda_hat是模拟的，在减去1
chisq, p = chisquare(f_obs, f_exp, ddof=1)
print "Directed: chisq: %.4f; p: %.4f; alpha: %.4f; alpha_ppf: %.4f" % (chisq, p, alpha, alpha_ppf) ```
Example 8
```def cov_ellipse(cov, q=None, nsig=2):
"""
Parameters
----------
cov : (2, 2) array
Covariance matrix.
q : float, optional
Confidence level, should be in (0, 1)
nsig : int, optional
Confidence level in unit of standard deviations.
E.g. 1 stands for 68.3% and 2 stands for 95.4%.

Returns
-------
width, height, rotation :
The lengths of two axises and the rotation angle in degree
for the ellipse.
"""

if q is not None:
q = np.asarray(q)
elif nsig is not None:
q = 2 * norm.cdf(nsig) - 1
else:
raise ValueError('One of `q` and `nsig` should be specified.')
r2 = chi2.ppf(q, 2)

val, vec = np.linalg.eigh(cov)
width, height = 2 * np.sqrt(val[:, None] * r2)
rotation = np.degrees(np.arctan2(*vec[::-1, 0]))

return width, height, rotation ```
Example 9
 Project: webviz-subsurface   Author: equinor   File: _history_match.py    GNU General Public License v3.0 5 votes
```def _get_unsorted_edges():
"""P10 - P90 unsorted edge coordinates"""

retval = {"low": chi2.ppf(0.1, 1), "high": chi2.ppf(0.9, 1)}

return retval ```
Example 10
```def generate_emissions(theta,data):
def _eigsorted(cov):
vals,vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order],vecs[:,order]
xmin = np.inf; xmax = -np.inf
ymin = np.inf; ymax = -np.inf
fig = plt.figure()
for i in range(len(data)):
ax.plot(data[i][:,0],data[i][:,1],'k.')
xmin = np.min([xmin,np.min(data[i][:,0])])
xmax = np.max([xmax,np.max(data[i][:,0])])
ymin = np.min([ymin,np.min(data[i][:,1])])
ymax = np.max([ymax,np.max(data[i][:,1])])

vol = [0.25,0.5,0.75,0.95, 0.99]

ell = []
for i in range(len(theta)):
pos = theta[i].mean
vals,vecs = _eigsorted(theta[i].var)
th = np.degrees(np.arctan2(*vecs[:,0][::-1]))
for v in vol:
width,height = 2.0*np.sqrt(chi2.ppf(v,2))*np.sqrt(vals)
ell.append(Ellipse(xy=pos,width=width,height=height,angle=th))
for i,e in enumerate(ell):
e.set_facecolor(my_color_map(i))
e.set_alpha(0.5)

ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
plt.show() ```
Example 11
 Project: MLSS   Author: NICTA   File: tututils.py    GNU General Public License v2.0 5 votes
```def plot_2d_GMMs(X, labels, means, covs, percentcontour=0.66, npoints=30):
"""
Given an observation array, a label vector (integer values), and GMM mean
and covariance parameters, plot the clusters and parameters.
"""

clabels = set(labels)
K = len(clabels)

if len(means) != len(covs) != K:
raise ValueError("Expecting the number of unique labels, means and"
"covariances to be the same!")

phi = np.linspace(-np.pi, np.pi, npoints)

circle = np.array([np.sin(phi), np.cos(phi)]).T

figure(figsize=(10, 10))
gca()

colors = cm.hsv(np.arange(K)/float(K))
for k, col in zip(clabels, colors):

# points
my_members = labels == k
scatter(X[my_members, 0], X[my_members, 1], c=col, marker='o', s=20)

# means
cluster_center = means[k, :]
scatter(cluster_center[0], cluster_center[1], c=col, marker='o', s=200)

# covariance
L = la.cholesky(np.array(covs[k]) * chi2.ppf(percentcontour, [3])
+ 1e-5 * np.eye(covs[k].shape[0]))
covpoints = circle.dot(L) + means[k, :]
plot(covpoints[:, 0], covpoints[:, 1], color=col, linewidth=3)

axis('tight')
axis('equal')
title('Clusters') ```
Example 12
 Project: pyFTS   Author: PYFTS   File: ResidualAnalysis.py    GNU General Public License v3.0 5 votes
```def ljung_box_test(residuals, lags=[1,2,3], alpha=0.5):
from statsmodels.stats.diagnostic import acorr_ljungbox
from scipy.stats import chi2

stat, pval = acorr_ljungbox(residuals, lags=lags)

rows = []

for ct, Q in enumerate(stat):
lag = ct+1
p_value = 1 - chi2.cdf(Q, df=lag)
critical_value = chi2.ppf(1 - alpha, df=lag)
rows.append([lag, Q, p_value, critical_value, 'H0 accepted' if Q > critical_value else 'H0 rejected'])

return pd.DataFrame(rows, columns=['Lag','Statistic','p-Value','Critical Value', 'Result']) ```
Example 13
```def initialProminceZ0(self):
""" Generate initial item prominence based on the topic weights and topic prominence.

"""

self.topicsProminence = [np.round(i,decimals=2) for i in self.topicsProminence/np.sum(self.topicsProminence)]
counts = dict(zip(self.topics, [len(np.where(self.ItemsClass==i)[0]) for i,c in enumerate(self.topics) ]))
items = len(self.ItemsClass)
population = self.topics

# Chi square distribution with two degrees of freedom. Other power-law distributions can be used.
df = 2
mean, var, skew, kurt = chi2.stats(df, moments='mvsk')
x = np.linspace(chi2.ppf(0.01, df), chi2.ppf(0.99, df), items)
rv = chi2(df)

Z = {}
for c in self.topics: Z.update({c:[]})

# Assign topic to z prominence without replacement
for i in rv.pdf(x):
c = selectClassFromDistribution(population, self.topicsProminence)
while counts[c]<=0:
c = selectClassFromDistribution(population, self.topicsProminence)
counts[c]-=1
Z[c].append(i/0.5)

self.ItemsInitialProminence = np.zeros(self.totalNumberOfItems)
for c, category in enumerate(self.topics):
indeces = np.where(self.ItemsClass==c)[0]
self.ItemsInitialProminence[indeces] = Z[category] ```
Example 14
 Project: Splunking-Crime   Author: nccgroup   File: descriptive.py    GNU Affero General Public License v3.0 5 votes
```def ci_corr(self, sig=.05, upper_bound=None, lower_bound=None):
"""
Returns the confidence intervals for the correlation coefficient

Parameters
----------
sig : float
The significance level.  Default is .05

upper_bound : float
Maximum value the upper confidence limit can be.
Default is  99% confidence limit assuming normality.

lower_bound : float
Minimum value the lower condidence limit can be.
Default is 99% confidence limit assuming normality.

Returns
-------
interval : tuple
Confidence interval for the correlation

"""
endog = self.endog
nobs = self.nobs
self.r0 = chi2.ppf(1 - sig, 1)
point_est = np.corrcoef(endog[:, 0], endog[:, 1])[0, 1]
if upper_bound is None:
upper_bound = min(.999, point_est + \
2.5 * ((1. - point_est ** 2.) / \
(nobs - 2.)) ** .5)

if lower_bound is None:
lower_bound = max(- .999, point_est - \
2.5 * (np.sqrt((1. - point_est ** 2.) / \
(nobs - 2.))))

llim = optimize.brenth(self._ci_limits_corr, lower_bound, point_est)
ulim = optimize.brenth(self._ci_limits_corr, point_est, upper_bound)
return llim, ulim ```
Example 15
```def ci_beta(self, param_num, beta_high, beta_low, sig=.05):
"""
Returns the confidence interval for a regression
parameter in the AFT model.

Parameters
---------

param_num: int
Parameter number of interest

beta_high: float
Upper bound for the confidence interval

beta_low:
Lower bound for the confidence interval

sig: float, optional
Significance level.  Default is .05

Notes
----
If the function returns f(a) and f(b) must have different signs,
consider widening the search area by adjusting beta_low and
beta_high.

Also note that this process is computational intensive.  There
are 4 levels of optimization/solving.  From outer to inner:

1) Solving so that llr-critical value = 0
2) maximizing over nuisance parameters
3) Using  EM at each value of nuisamce parameters
4) Using the _modified_Newton optimizer at each iteration
of the EM algorithm.

Also, for very unlikely nuisance parameters, it is possible for
the EM algorithm to not converge.  This is not an indicator
that the solver did not find the correct solution.  It just means
for a specific iteration of the nuisance parameters, the optimizer
was unable to converge.

If the user desires to verify the success of the optimization,
it is recommended to test the limits using test_beta.

"""
params = self.params()
self.r0 = chi2.ppf(1 - sig, 1)
ll = optimize.brentq(self._ci_limits_beta, beta_low,
params[param_num], (param_num))
ul = optimize.brentq(self._ci_limits_beta,
params[param_num], beta_high, (param_num))
return ll, ul ```
Example 16
```def conf_int_el(self, param_num, upper_bound=None,
lower_bound=None, sig=.05, method='nm',
stochastic_exog=1):
"""
Returns the confidence interval for a regression parameter when the
regression is forced through the origin

Parameters
----------
param_num : int
The parameter number to be tested.  Note this uses python
indexing but the '0' parameter refers to the intercept term

upper_bound : float
The maximum value the upper confidence limit can be.  The
closer this is to the confidence limit, the quicker the
computation.  Default is .00001 confidence limit under normality

lower_bound : float
The minimum value the lower confidence limit can be.
Default is .00001 confidence limit under normality

sig : float, optional
The significance level.  Default .05

method : str, optional
Algorithm to optimize of nuisance params.  Can be 'nm' or
'powell'.  Default is 'nm'.

Returns
-------
ci: tuple
The confidence interval for the parameter 'param_num'
"""
r0 = chi2.ppf(1 - sig, 1)
param_num = np.array([param_num])
if upper_bound is None:
upper_bound = (np.squeeze(self.model.fit().
conf_int(.0001)[param_num])[1])
if lower_bound is None:
lower_bound = (np.squeeze(self.model.fit().conf_int(.00001)
[param_num])[0])
f = lambda b0:  self.el_test(np.array([b0]), param_num,
method=method,
stochastic_exog=stochastic_exog)[0] - r0
lowerl = optimize.brentq(f, lower_bound, self.params[param_num])
upperl = optimize.brentq(f, self.params[param_num], upper_bound)
return (lowerl, upperl) ```
Example 17
```def ci_var(self, lower_bound=None, upper_bound=None, sig=.05):
"""
Returns the confidence interval for the variance.

Parameters
----------
lower_bound : float
The minimum value the lower confidence interval can
take. The p-value from test_var(lower_bound) must be lower
than 1 - significance level. Default is .99 confidence
limit assuming normality

upper_bound : float
The maximum value the upper confidence interval
can take. The p-value from test_var(upper_bound) must be lower
than 1 - significance level.  Default is .99 confidence
limit assuming normality

sig : float
The significance level. Default is .05

Returns
--------
Interval : tuple
Confidence interval for the variance

Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> random_numbers = np.random.standard_normal(100)
>>> el_analysis = sm.emplike.DescStat(random_numbers)
>>> el_analysis.ci_var()
(0.7539322567470305, 1.229998852496268)
>>> el_analysis.ci_var(.5, 2)
(0.7539322567469926, 1.2299988524962664)

Notes
-----
If the function returns the error f(a) and f(b) must have
different signs, consider lowering lower_bound and raising
upper_bound.
"""
endog = self.endog
if upper_bound is None:
upper_bound = ((self.nobs - 1) * endog.var()) / \
(chi2.ppf(.0001, self.nobs - 1))
if lower_bound is None:
lower_bound = ((self.nobs - 1) * endog.var()) / \
(chi2.ppf(.9999, self.nobs - 1))
self.r0 = chi2.ppf(1 - sig, 1)
llim = optimize.brentq(self._ci_limits_var, lower_bound, endog.var())
ulim = optimize.brentq(self._ci_limits_var, endog.var(), upper_bound)
return llim, ulim ```
Example 18
```def ci_skew(self, sig=.05, upper_bound=None, lower_bound=None):
"""
Returns the confidence interval for skewness.

Parameters
----------
sig : float
The significance level.  Default is .05

upper_bound : float
Maximum value of skewness the upper limit can be.
Default is .99 confidence limit assuming normality.

lower_bound : float
Minimum value of skewness the lower limit can be.
Default is .99 confidence level assuming normality.

Returns
-------
Interval : tuple
Confidence interval for the skewness

Notes
-----
If function returns f(a) and f(b) must have different signs, consider
expanding lower and upper bounds
"""
nobs = self.nobs
endog = self.endog
if upper_bound is None:
upper_bound = skew(endog) + \
2.5 * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5
if lower_bound is None:
lower_bound = skew(endog) - \
2.5 * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5
self.r0 = chi2.ppf(1 - sig, 1)
llim = optimize.brentq(self._ci_limits_skew, lower_bound, skew(endog))
ulim = optimize.brentq(self._ci_limits_skew, skew(endog), upper_bound)
return   llim, ulim ```
Example 19
```def generate_error_ellipse_points(
mean: np.ndarray,
cov: np.ndarray,
cov_cholesky: np.ndarray = None,
acceptance: float = 0.99,
num_points: int = 30,
format: str = 'matplotlib') -> np.ndarray:
'''
Generate points on a level set of a bivariate Gaussian PDF, usu. for
plotting error ellipses.

Args:
mean: the distribution's mean.
cov: 2x2 array, the distribution's covariance.
cov_cholesky: optionally precomputed cholesky factorization, as output
from `np.linalg.cholesky(cov)`.
acceptance: probability mass that ellipse should contain around mean.
num_points: number of points to sample on ellipse. This is a measure of
plotting resolution.
format: use 'matplotlib' for points output as a `float64` numpy array
with rows running over x and y physical dimensions, columns over
points. Use 'opencv' for points output as a `uint32` numpy array with
rows running over points and columns running over x and y pixel
dimensions.

Returns:
Shape (2, num_points) array of points for plotting.
'''
assert mean.shape == (2,), 'Incorrect mean shape!'
assert cov.shape == (2, 2), 'Incorrect cov shape!'
assert acceptance >= 0.0 and acceptance < 1.0, \
'acceptance rate must be in [0.0, 1.0)!'

# Sample points on unit circle.
dtheta = 2.0*np.pi/num_points
thetas = np.linspace(0, 2.0*np.pi - dtheta, num_points)
if cov_cholesky is None:
cov_cholesky = np.linalg.cholesky(cov)
acceptance_factor = np.sqrt(chi2.ppf(acceptance, df=2))
cov_cholesky = acceptance_factor*cov_cholesky
points = np.zeros((2, num_points))
points[0, :] = np.cos(thetas)
points[1, :] = np.sin(thetas)

# Warp circle points into ellipse.
for i in range(num_points):
points[:, i] = cov_cholesky.dot(points[:, i]) + mean

if format == 'matplotlib':
return points
elif format == 'opencv':
return points.T.astype(np.int32)
else:
assert False, 'Format not recognized!' ```
Example 20
 Project: Splunking-Crime   Author: nccgroup   File: aft_el.py    GNU Affero General Public License v3.0 4 votes
```def ci_beta(self, param_num, beta_high, beta_low, sig=.05):
"""
Returns the confidence interval for a regression
parameter in the AFT model.

Parameters
---------

param_num: int
Parameter number of interest

beta_high: float
Upper bound for the confidence interval

beta_low:
Lower bound for the confidence interval

sig: float, optional
Significance level.  Default is .05

Notes
----
If the function returns f(a) and f(b) must have different signs,
consider widening the search area by adjusting beta_low and
beta_high.

Also note that this process is computational intensive.  There
are 4 levels of optimization/solving.  From outer to inner:

1) Solving so that llr-critical value = 0
2) maximizing over nuisance parameters
3) Using  EM at each value of nuisamce parameters
4) Using the _modified_Newton optimizer at each iteration
of the EM algorithm.

Also, for very unlikely nuisance parameters, it is possible for
the EM algorithm to not converge.  This is not an indicator
that the solver did not find the correct solution.  It just means
for a specific iteration of the nuisance parameters, the optimizer
was unable to converge.

If the user desires to verify the success of the optimization,
it is recommended to test the limits using test_beta.

"""
params = self.params()
self.r0 = chi2.ppf(1 - sig, 1)
ll = optimize.brentq(self._ci_limits_beta, beta_low,
params[param_num], (param_num))
ul = optimize.brentq(self._ci_limits_beta,
params[param_num], beta_high, (param_num))
return ll, ul ```
Example 21
 Project: Splunking-Crime   Author: nccgroup   File: originregress.py    GNU Affero General Public License v3.0 4 votes
```def conf_int_el(self, param_num, upper_bound=None,
lower_bound=None, sig=.05, method='nm',
stochastic_exog=1):
"""
Returns the confidence interval for a regression parameter when the
regression is forced through the origin

Parameters
----------
param_num : int
The parameter number to be tested.  Note this uses python
indexing but the '0' parameter refers to the intercept term

upper_bound : float
The maximum value the upper confidence limit can be.  The
closer this is to the confidence limit, the quicker the
computation.  Default is .00001 confidence limit under normality

lower_bound : float
The minimum value the lower confidence limit can be.
Default is .00001 confidence limit under normality

sig : float, optional
The significance level.  Default .05

method : str, optional
Algorithm to optimize of nuisance params.  Can be 'nm' or
'powell'.  Default is 'nm'.

Returns
-------
ci: tuple
The confidence interval for the parameter 'param_num'
"""
r0 = chi2.ppf(1 - sig, 1)
param_num = np.array([param_num])
if upper_bound is None:
upper_bound = (np.squeeze(self.model.fit().
conf_int(.0001)[param_num])[1])
if lower_bound is None:
lower_bound = (np.squeeze(self.model.fit().conf_int(.00001)
[param_num])[0])
f = lambda b0:  self.el_test(np.array([b0]), param_num,
method=method,
stochastic_exog=stochastic_exog)[0] - r0
lowerl = optimize.brentq(f, lower_bound, self.params[param_num])
upperl = optimize.brentq(f, self.params[param_num], upper_bound)
return (lowerl, upperl) ```
Example 22
 Project: Splunking-Crime   Author: nccgroup   File: descriptive.py    GNU Affero General Public License v3.0 4 votes
```def ci_var(self, lower_bound=None, upper_bound=None, sig=.05):
"""
Returns the confidence interval for the variance.

Parameters
----------
lower_bound : float
The minimum value the lower confidence interval can
take. The p-value from test_var(lower_bound) must be lower
than 1 - significance level. Default is .99 confidence
limit assuming normality

upper_bound : float
The maximum value the upper confidence interval
can take. The p-value from test_var(upper_bound) must be lower
than 1 - significance level.  Default is .99 confidence
limit assuming normality

sig : float
The significance level. Default is .05

Returns
--------
Interval : tuple
Confidence interval for the variance

Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> random_numbers = np.random.standard_normal(100)
>>> el_analysis = sm.emplike.DescStat(random_numbers)
>>> el_analysis.ci_var()
(0.7539322567470305, 1.229998852496268)
>>> el_analysis.ci_var(.5, 2)
(0.7539322567469926, 1.2299988524962664)

Notes
-----
If the function returns the error f(a) and f(b) must have
different signs, consider lowering lower_bound and raising
upper_bound.
"""
endog = self.endog
if upper_bound is None:
upper_bound = ((self.nobs - 1) * endog.var()) / \
(chi2.ppf(.0001, self.nobs - 1))
if lower_bound is None:
lower_bound = ((self.nobs - 1) * endog.var()) / \
(chi2.ppf(.9999, self.nobs - 1))
self.r0 = chi2.ppf(1 - sig, 1)
llim = optimize.brentq(self._ci_limits_var, lower_bound, endog.var())
ulim = optimize.brentq(self._ci_limits_var, endog.var(), upper_bound)
return llim, ulim ```
Example 23
 Project: Splunking-Crime   Author: nccgroup   File: descriptive.py    GNU Affero General Public License v3.0 4 votes
```def ci_skew(self, sig=.05, upper_bound=None, lower_bound=None):
"""
Returns the confidence interval for skewness.

Parameters
----------
sig : float
The significance level.  Default is .05

upper_bound : float
Maximum value of skewness the upper limit can be.
Default is .99 confidence limit assuming normality.

lower_bound : float
Minimum value of skewness the lower limit can be.
Default is .99 confidence level assuming normality.

Returns
-------
Interval : tuple
Confidence interval for the skewness

Notes
-----
If function returns f(a) and f(b) must have different signs, consider
expanding lower and upper bounds
"""
nobs = self.nobs
endog = self.endog
if upper_bound is None:
upper_bound = skew(endog) + \
2.5 * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5
if lower_bound is None:
lower_bound = skew(endog) - \
2.5 * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5
self.r0 = chi2.ppf(1 - sig, 1)
llim = optimize.brentq(self._ci_limits_skew, lower_bound, skew(endog))
ulim = optimize.brentq(self._ci_limits_skew, skew(endog), upper_bound)
return   llim, ulim ```