Python scipy.stats.norm.cdf() Examples
The following are 30
code examples of scipy.stats.norm.cdf().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.stats.norm
, or try the search function
.
Example #1
Source File: test_likelihoods.py From revrand with Apache License 2.0 | 6 votes |
def test_binom(): # Test we can at match a Binomial distribution from scipy p = 0.5 n = 5 dist = lk.Binomial() x = np.random.randint(low=0, high=n, size=(10,)) p1 = binom.logpmf(x, p=p, n=n) p2 = dist.loglike(x, p, n) np.allclose(p1, p2) p1 = binom.cdf(x, p=p, n=n) p2 = dist.cdf(x, p, n) np.allclose(p1, p2)
Example #2
Source File: likelihoods.py From revrand with Apache License 2.0 | 6 votes |
def cdf(self, y, f): r""" Cumulative density function of the likelihood. Parameters ---------- y: ndarray query quantiles, i.e.\ :math:`P(Y \leq y)`. f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) Returns ------- cdf: ndarray Cumulative density function evaluated at y. """ mu = np.exp(f) if self.tranfcn == 'exp' else softplus(f) return poisson.cdf(y, mu=mu)
Example #3
Source File: util.py From nni with MIT License | 6 votes |
def _poi(x, gp, y_max, xi): """ Possibility Of Improvement (POI) utility function Parameters ---------- x : numpy array parameters gp : GaussianProcessRegressor y_max : float maximum target value observed so far xi : float Returns ------- float """ with warnings.catch_warnings(): warnings.simplefilter("ignore") mean, std = gp.predict(x, return_std=True) z = (mean - y_max - xi)/std return norm.cdf(z)
Example #4
Source File: likelihoods.py From revrand with Apache License 2.0 | 6 votes |
def cdf(self, y, f): r""" Cumulative density function of the likelihood. Parameters ---------- y: ndarray query quantiles, i.e.\ :math:`P(Y \leq y)`. f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) Returns ------- cdf: ndarray Cumulative density function evaluated at y. """ return bernoulli.cdf(y, expit(f))
Example #5
Source File: likelihoods.py From revrand with Apache License 2.0 | 6 votes |
def cdf(self, y, f, n): r""" Cumulative density function of the likelihood. Parameters ---------- y: ndarray query quantiles, i.e.\ :math:`P(Y \leq y)`. f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) n: ndarray the total number of observations Returns ------- cdf: ndarray Cumulative density function evaluated at y. """ return binom.cdf(y, n=n, p=expit(f))
Example #6
Source File: likelihoods.py From revrand with Apache License 2.0 | 6 votes |
def cdf(self, y, f, var): r""" Cumulative density function of the likelihood. Parameters ---------- y: ndarray query quantiles, i.e.\ :math:`P(Y \leq y)`. f: ndarray latent function from the GLM prior (:math:`\mathbf{f} = \boldsymbol\Phi \mathbf{w}`) var: float, ndarray, optional The variance of the distribution, if not input, the initial value of variance is used. Returns ------- cdf: ndarray Cumulative density function evaluated at y. """ var = self._check_param(var) return norm.cdf(y, loc=f, scale=np.sqrt(var))
Example #7
Source File: lib_acquisition_function.py From nni with MIT License | 6 votes |
def _expected_improvement(x, fun_prediction, fun_prediction_args, x_bounds, x_types, samples_y_aggregation, minimize_constraints_fun): # This is only for step-wise optimization x = lib_data.match_val_type(x, x_bounds, x_types) expected_improvement = sys.maxsize if (minimize_constraints_fun is None) or ( minimize_constraints_fun(x) is True): mu, sigma = fun_prediction(x, *fun_prediction_args) loss_optimum = min(samples_y_aggregation) scaling_factor = -1 # In case sigma equals zero with numpy.errstate(divide="ignore"): Z = scaling_factor * (mu - loss_optimum) / sigma expected_improvement = scaling_factor * (mu - loss_optimum) * \ norm.cdf(Z) + sigma * norm.pdf(Z) expected_improvement = 0.0 if sigma == 0.0 else expected_improvement # We want expected_improvement to be as large as possible # (i.e., as small as possible for minimize(...)) expected_improvement = -1 * expected_improvement return expected_improvement
Example #8
Source File: util.py From nni with MIT License | 6 votes |
def _ei(x, gp, y_max, xi): """ Expected Improvement (EI) utility function Parameters ---------- x : numpy array parameters gp : GaussianProcessRegressor y_max : float maximum target value observed so far xi : float Returns ------- float """ with warnings.catch_warnings(): warnings.simplefilter("ignore") mean, std = gp.predict(x, return_std=True) z = (mean - y_max - xi)/std return (mean - y_max - xi) * norm.cdf(z) + std * norm.pdf(z)
Example #9
Source File: ScaledFScore.py From scattertext with Apache License 2.0 | 6 votes |
def _get_scaler_function(scaler_algo): scaler = None if scaler_algo == 'normcdf': scaler = lambda x: norm.cdf(x, x.mean(), x.std()) elif scaler_algo == 'lognormcdf': scaler = lambda x: norm.cdf(np.log(x), np.log(x).mean(), np.log(x).std()) elif scaler_algo == 'percentile': scaler = lambda x: rankdata(x).astype(np.float64) / len(x) elif scaler_algo == 'percentiledense': scaler = lambda x: rankdata(x, method='dense').astype(np.float64) / len(x) elif scaler_algo == 'ecdf': from statsmodels.distributions import ECDF scaler = lambda x: ECDF(x) elif scaler_algo == 'none': scaler = lambda x: x else: raise InvalidScalerException("Invalid scaler alogrithm. Must be either percentile or normcdf.") return scaler
Example #10
Source File: utils.py From finance_ml with MIT License | 6 votes |
def get_gaussian_betsize(prob, num_classes=2): """Translate probability to bettingsize Params ------ prob: array-like num_classes: int, default 2 Returns ------- array-like """ if isinstance(prob, numbers.Number): if prob != 0 and prob != 1: signal = (prob - 1. / num_classes) / (prob * (1 - prob)) else: signal = 2 * prob - 1 else: signal = prob.copy() signal[prob == 1] = 1 signal[prob == 0] = -1 cond = (prob < 1) & (prob > 0) signal[cond] = (prob[cond] - 1. / num_classes) / (prob[cond] * (1 - prob[cond])) return 2 * norm.cdf(signal) - 1
Example #11
Source File: pc_subjective_model.py From sureal with Apache License 2.0 | 6 votes |
def neg_log_likelihood_function(v, alpha): # nllf(.) = - sum_i,j log(n_ij / alpha_ij) + alpha_ij * log phi (v_i - v_j) + alpha_ji * log phi (v_j - vi) # note that if p = [1, 2, 3, 4] and M = 4, then # np.tile(p, (M, 1)).T creates patterns like # [ # [1, 1, 1, 1], # [2, 2, 2, 2], # [3, 3, 3, 3], # [4, 4, 4, 4] # ] M = alpha.shape[0] epsilon = 1e-8 / M mtx = alpha * np.log( norm.cdf( (np.tile(v, (M, 1)).T - np.tile(v, (M, 1))) ) + epsilon ) + alpha.T * np.log( norm.cdf( (np.tile(v, (M, 1)) - np.tile(v, (M, 1)).T) ) + epsilon ) return - np.sum(mtx)
Example #12
Source File: numpy_backend.py From pyhf with Apache License 2.0 | 6 votes |
def normal_cdf(self, x, mu=0, sigma=1): """ The cumulative distribution function for the Normal distribution Example: >>> import pyhf >>> pyhf.set_backend("numpy") >>> pyhf.tensorlib.normal_cdf(0.8) 0.7881446014166034 >>> values = pyhf.tensorlib.astensor([0.8, 2.0]) >>> pyhf.tensorlib.normal_cdf(values) array([0.7881446 , 0.97724987]) Args: x (`tensor` or `float`): The observed value of the random variable to evaluate the CDF for mu (`tensor` or `float`): The mean of the Normal distribution sigma (`tensor` or `float`): The standard deviation of the Normal distribution Returns: NumPy float: The CDF """ return norm.cdf(x, loc=mu, scale=sigma)
Example #13
Source File: ScaledFScoreSignificance.py From scattertext with Apache License 2.0 | 6 votes |
def get_p_vals(self, X): ''' Imputes p-values from the Z-scores of `ScaledFScore` scores. Assuming incorrectly that the scaled f-scores are normally distributed. Parameters ---------- X : np.array Array of word counts, shape (N, 2) where N is the vocab size. X[:,0] is the positive class, while X[:,1] is the negative class. Returns ------- np.array of p-values ''' z_scores = ScaledFZScore(self.scaler_algo, self.beta).get_scores(X[:,0], X[:,1]) return norm.cdf(z_scores)
Example #14
Source File: test_likelihoods.py From revrand with Apache License 2.0 | 6 votes |
def test_shapes(): N = 100 y = np.ones(N) f = np.ones(N) * 2 assert_shape = lambda x: x.shape == (N,) assert_args = lambda out, args: \ all([o.shape == a.shape if not np.isscalar(a) else np.isscalar(o) for o, a in zip(out, args)]) for like, args in zip(likelihoods, likelihood_args): lobj = like() assert_shape(lobj.loglike(y, f, *args)) assert_shape(lobj.Ey(f, *args)) assert_shape(lobj.df(y, f, *args)) assert_shape(lobj.cdf(y, f, *args)) assert_args(lobj.dp(y, f, *args), args)
Example #15
Source File: test_likelihoods.py From revrand with Apache License 2.0 | 6 votes |
def test_bernoulli(): # Test we can at match a Bernoulli distribution from scipy p = 0.5 dist = lk.Bernoulli() x = np.array([0, 1]) p1 = bernoulli.logpmf(x, p) p2 = dist.loglike(x, p) np.allclose(p1, p2) p1 = bernoulli.cdf(x, p) p2 = dist.cdf(x, p) np.allclose(p1, p2)
Example #16
Source File: pyglmnet.py From pyglmnet with MIT License | 6 votes |
def _mu(distr, z, eta, fit_intercept): """The non-linearity (inverse link).""" if distr in ['softplus', 'gamma']: mu = np.log1p(np.exp(z)) elif distr == 'poisson': mu = z.copy() beta0 = (1 - eta) * np.exp(eta) if fit_intercept else 0. mu[z > eta] = z[z > eta] * np.exp(eta) + beta0 mu[z <= eta] = np.exp(z[z <= eta]) elif distr == 'gaussian': mu = z elif distr == 'binomial': mu = expit(z) elif distr == 'probit': mu = norm.cdf(z) return mu
Example #17
Source File: test_likelihoods.py From revrand with Apache License 2.0 | 6 votes |
def test_poisson(): # Test we can at match a Binomial distribution from scipy mu = 2 dist = lk.Poisson() x = np.random.randint(low=0, high=5, size=(10,)) p1 = poisson.logpmf(x, mu) p2 = dist.loglike(x, mu) np.allclose(p1, p2) p1 = poisson.cdf(x, mu) p2 = dist.cdf(x, mu) np.allclose(p1, p2)
Example #18
Source File: acquisition.py From pyGPGO with MIT License | 6 votes |
def tExpectedImprovement(self, tau, mean, std, nu=3.0): """ Expected Improvement acquisition function. Only to be used with `tStudentProcess` surrogate. Parameters ---------- tau: float Best observed function evaluation. mean: float Point mean of the posterior process. std: float Point std of the posterior process. Returns ------- float Expected improvement. """ gamma = (mean - tau - self.eps) / (std + self.eps) return gamma * std * t.cdf(gamma, df=nu) + std * (1 + (gamma ** 2 - 1)/(nu - 1)) * t.pdf(gamma, df=nu)
Example #19
Source File: acquisition.py From pyGPGO with MIT License | 6 votes |
def ExpectedImprovement(self, tau, mean, std): """ Expected Improvement acquisition function. Parameters ---------- tau: float Best observed function evaluation. mean: float Point mean of the posterior process. std: float Point std of the posterior process. Returns ------- float Expected improvement. """ z = (mean - tau - self.eps) / (std + self.eps) return (mean - tau) * norm.cdf(z) + std * norm.pdf(z)[0]
Example #20
Source File: nonparametric.py From hypothetical with MIT License | 6 votes |
def _p_val(self): r""" Returns the p-value. Returns ------- p : float The computed p value. Notes ----- When sample sizes are large enough (:math:`n > 20`), the distribution of :math:`U` is normally distributed. """ p = 1 - norm.cdf(self.z_value) return p * 2
Example #21
Source File: acquisition.py From pyGPGO with MIT License | 6 votes |
def ProbabilityImprovement(self, tau, mean, std): """ Probability of Improvement acquisition function. Parameters ---------- tau: float Best observed function evaluation. mean: float Point mean of the posterior process. std: float Point std of the posterior process. Returns ------- float Probability of improvement. """ z = (mean - tau - self.eps) / (std + self.eps) return norm.cdf(z)
Example #22
Source File: scale.py From vnpy_crypto with MIT License | 6 votes |
def __call__(self, df_resid, nobs, resid): h = (df_resid)/nobs*(self.d**2 + (1-self.d**2)*\ Gaussian.cdf(self.d)-.5 - self.d/(np.sqrt(2*np.pi))*\ np.exp(-.5*self.d**2)) s = mad(resid) subset = lambda x: np.less(np.fabs(resid/x),self.d) chi = lambda s: subset(s)*(resid/s)**2/2+(1-subset(s))*(self.d**2/2) scalehist = [np.inf,s] niter = 1 while (np.abs(scalehist[niter-1] - scalehist[niter])>self.tol \ and niter < self.maxiter): nscale = np.sqrt(1/(nobs*h)*np.sum(chi(scalehist[-1]))*\ scalehist[-1]**2) scalehist.append(nscale) niter += 1 #if niter == self.maxiter: # raise ValueError("Huber's scale failed to converge") return scalehist[-1]
Example #23
Source File: testStatelessTechnicalAnalysers.py From Finance-Python with MIT License | 6 votes |
def testSecurityNormInvValueHolder(self): mm1 = SecurityNormInvValueHolder('open') mm2 = SecurityNormInvValueHolder('open', fullAcc=True) for i in range(len(self.aapl['close'])): data = dict(aapl=dict(open=norm.cdf(self.aapl['open'][i])), ibm=dict(open=norm.cdf(self.ibm['open'][i]))) mm1.push(data) mm2.push(data) value1 = mm1.value value2 = mm2.value for name in value1.index(): expected = norm.ppf(data[name]['open']) calculated = value1[name] self.assertAlmostEqual(expected, calculated, 6, 'at index {0}\n' 'expected: {1:.12f}\n' 'calculat: {2:.12f}' .format(i, expected, calculated)) calculated = value2[name] self.assertAlmostEqual(expected, calculated, 12, 'at index {0}\n' 'expected: {1:.12f}\n' 'calculat: {2:.12f}' .format(i, expected, calculated))
Example #24
Source File: testStatelessTechnicalAnalysers.py From Finance-Python with MIT License | 6 votes |
def testSecurityCeilValueHolder(self): mm1 = SecurityCeilValueHolder('open') for i in range(len(self.aapl['close'])): data = dict(aapl=dict(open=norm.cdf(self.aapl['open'][i])), ibm=dict(open=norm.cdf(self.ibm['open'][i]))) mm1.push(data) value1 = mm1.value for name in value1.index(): expected = math.ceil(data[name]['open']) calculated = value1[name] self.assertAlmostEqual(expected, calculated, 6, 'at index {0}\n' 'expected: {1:.12f}\n' 'calculat: {2:.12f}' .format(i, expected, calculated))
Example #25
Source File: testStatelessTechnicalAnalysers.py From Finance-Python with MIT License | 6 votes |
def testSecurityFloorValueHolder(self): mm1 = SecurityFloorValueHolder('open') for i in range(len(self.aapl['close'])): data = dict(aapl=dict(open=norm.cdf(self.aapl['open'][i])), ibm=dict(open=norm.cdf(self.ibm['open'][i]))) mm1.push(data) value1 = mm1.value for name in value1.index(): expected = math.floor(data[name]['open']) calculated = value1[name] self.assertAlmostEqual(expected, calculated, 6, 'at index {0}\n' 'expected: {1:.12f}\n' 'calculat: {2:.12f}' .format(i, expected, calculated))
Example #26
Source File: testStatelessTechnicalAnalysers.py From Finance-Python with MIT License | 6 votes |
def testSecurityRoundValueHolder(self): mm1 = SecurityRoundValueHolder('open') for i in range(len(self.aapl['close'])): data = dict(aapl=dict(open=norm.cdf(self.aapl['open'][i])), ibm=dict(open=norm.cdf(self.ibm['open'][i]))) mm1.push(data) value1 = mm1.value for name in value1.index(): expected = round(data[name]['open']) calculated = value1[name] self.assertAlmostEqual(expected, calculated, 6, 'at index {0}\n' 'expected: {1:.12f}\n' 'calculat: {2:.12f}' .format(i, expected, calculated))
Example #27
Source File: acquisition_functions.py From CatLearn with GNU General Public License v3.0 | 6 votes |
def PI(y_best, predictions, uncertainty, objective): """Probability of improvement acq. function. Parameters ---------- y_best : float Condition predictions : list Predicted means. uncertainty : list Uncertainties associated with the predictions. """ if objective == 'max': z = (predictions - y_best) / (uncertainty) return norm.cdf(z) if objective == 'min': z = -((predictions - y_best) / (uncertainty)) return norm.cdf(z)
Example #28
Source File: acquisition_functions.py From CatLearn with GNU General Public License v3.0 | 6 votes |
def EI(y_best, predictions, uncertainty, objective='max'): """Return expected improvement acq. function. Parameters ---------- y_best : float Condition predictions : list Predicted means. uncertainty : list Uncertainties associated with the predictions. """ if objective == 'max': z = (predictions - y_best) / (uncertainty) return (predictions - y_best) * norm.cdf(z) + \ uncertainty * norm.pdf( z) if objective == 'min': z = (-predictions + y_best) / (uncertainty) return -((predictions - y_best) * norm.cdf(z) - uncertainty * norm.pdf(z))
Example #29
Source File: test_multivariate.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_broadcasting(self): np.random.seed(1234) n = 4 # Construct a random covariance matrix. data = np.random.randn(n, n) cov = np.dot(data, data.T) mean = np.random.randn(n) # Construct an ndarray which can be interpreted as # a 2x3 array whose elements are random data vectors. X = np.random.randn(2, 3, n) # Check that multiple data points can be evaluated at once. desired_pdf = multivariate_normal.pdf(X, mean, cov) desired_cdf = multivariate_normal.cdf(X, mean, cov) for i in range(2): for j in range(3): actual = multivariate_normal.pdf(X[i, j], mean, cov) assert_allclose(actual, desired_pdf[i,j]) # Repeat for cdf actual = multivariate_normal.cdf(X[i, j], mean, cov) assert_allclose(actual, desired_cdf[i,j], rtol=1e-3)
Example #30
Source File: test_multivariate.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_haar(self): # Test that the eigenvalues, which lie on the unit circle in # the complex plane, are uncorrelated. # Generate samples dim = 5 samples = 1000 # Not too many, or the test takes too long np.random.seed(514) # Note that the test is sensitive to seed too xs = unitary_group.rvs(dim, size=samples) # The angles "x" of the eigenvalues should be uniformly distributed # Overall this seems to be a necessary but weak test of the distribution. eigs = np.vstack(scipy.linalg.eigvals(x) for x in xs) x = np.arctan2(eigs.imag, eigs.real) res = kstest(x.ravel(), uniform(-np.pi, 2*np.pi).cdf) assert_(res.pvalue > 0.05)