Python scipy.stats.norm.ppf() Examples

The following are 30 code examples of scipy.stats.norm.ppf(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.stats.norm , or try the search function .
Example #1
Source File: nonparametric.py    From hypothetical with MIT License 7 votes vote down vote up
def _t_value(self):
        r"""
        Returns the critical t-statistic given the input alpha-level (defaults to 0.05).

        Returns
        -------
        tval : float
            The critical t-value for using in computing the Least Significant Difference.

        Notes
        -----
        Scipy's :code:`t.ppf` method is used to compute the critical t-value.

        """
        tval = t.ppf(1 - self.alpha / 2, self.n - self.k)

        return tval 
Example #2
Source File: utils.py    From pyprocessmacro with MIT License 6 votes vote down vote up
def bias_corrected_ci(estimate, samples, conf=95):
    """
    Return the bias-corrected bootstrap confidence interval for an estimate
    :param estimate: Numerical estimate in the original sample
    :param samples: Nx1 array of bootstrapped estimates
    :param conf: Level of the desired confidence interval
    :return: Bias-corrected bootstrapped LLCI and ULCI for the estimate.
    """
    # noinspection PyUnresolvedReferences
    ptilde = ((samples < estimate) * 1).mean()
    Z = norm.ppf(ptilde)
    Zci = z_score(conf)
    Zlow, Zhigh = -Zci + 2 * Z, Zci + 2 * Z
    plow, phigh = norm._cdf(Zlow), norm._cdf(Zhigh)
    llci = np.percentile(samples, plow * 100, interpolation="lower")
    ulci = np.percentile(samples, phigh * 100, interpolation="higher")
    return llci, ulci 
Example #3
Source File: information_gain_mc.py    From RoBO with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def update(self, model):
        self.model = model

        self.sn2 = self.model.get_noise()

        # Sample representer points
        self.sampling_acquisition.update(model)
        self.sample_representer_points()

        # Omega values which are needed for the innovations
        # by sampling from a uniform grid
        self.W = norm.ppf(np.linspace(1. / (self.Np + 1),
                                      1 - 1. / (self.Np + 1),
                                      self.Np))[np.newaxis, :]

        # Compute current posterior belief at the representer points
        self.Mb, self.Vb = self.model.predict(self.zb, full_cov=True)
        self.pmin = mc_part.joint_pmin(self.Mb, self.Vb, self.Nf)
        self.logP = np.log(self.pmin) 
Example #4
Source File: unittests_evaluations.py    From Conditional_Density_Estimation with MIT License 6 votes vote down vote up
def test_conditional_value_at_risk_mc(self):
    for mu, sigma, alpha in [(1, 1, 0.05), (0.4, 0.1, 0.02), (0.1, 2, 0.01)]:
      # prepare estimator dummy
      mu1 = np.array([mu])
      sigma1 = np.identity(n=1) * sigma
      est = GaussianDummy(mean=mu1, cov=sigma1**2, ndim_x=1, ndim_y=1, has_pdf=True)
      est.fit(None, None)

      CVaR_true = mu - sigma/alpha * norm.pdf(norm.ppf(alpha))
      CVaR_est = est.conditional_value_at_risk(x_cond=np.array([[0],[1]]), alpha=alpha)

      print("CVaR True (%.2f, %.2f):"%(mu, sigma), CVaR_true)
      print("CVaR_est (%.2f, %.2f):"%(mu, sigma), CVaR_est)
      print("VaR (%.2f, %.2f):"%(mu, sigma), est.value_at_risk(x_cond=np.array([[0],[1]]), alpha=alpha))

      self.assertAlmostEqual(CVaR_est[0], CVaR_true, places=2)
      self.assertAlmostEqual(CVaR_est[1], CVaR_true, places=2) 
Example #5
Source File: EconDensity.py    From Conditional_Density_Estimation with MIT License 6 votes vote down vote up
def value_at_risk(self, x_cond, alpha=0.01, **kwargs):
    """ Computes the Value-at-Risk (VaR) of the fitted distribution. Only if ndim_y = 1

    Args:
      x_cond: different x values to condition on - numpy array of shape (n_values, ndim_x)
      alpha: quantile percentage of the distribution

    Returns:
       VaR values for each x to condition on - numpy array of shape (n_values)
    """
    assert self.ndim_y == 1, "Value at Risk can only be computed when ndim_y = 1"
    assert x_cond.ndim == 2

    VaR = norm.ppf(alpha, loc=x_cond, scale=self._std(x_cond))[:,0]
    assert VaR.shape == (x_cond.shape[0],)
    return VaR 
Example #6
Source File: LinearGaussian.py    From Conditional_Density_Estimation with MIT License 6 votes vote down vote up
def value_at_risk(self, x_cond, alpha=0.01, **kwargs):
    """ Computes the Value-at-Risk (VaR) of the fitted distribution. Only if ndim_y = 1

    Args:
      x_cond: different x values to condition on - numpy array of shape (n_values, ndim_x)
      alpha: quantile percentage of the distribution

    Returns:
       VaR values for each x to condition on - numpy array of shape (n_values)
    """
    assert self.ndim_y == 1, "Value at Risk can only be computed when ndim_y = 1"
    assert x_cond.ndim == 2

    VaR = norm.ppf(alpha, loc=self._mean(x_cond), scale=self._std(x_cond))[:,0]
    assert VaR.shape == (x_cond.shape[0],)
    return VaR 
Example #7
Source File: test_bootstrap_calcs.py    From pylogit with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_calc_bias_correction_bca(self):
        # There are 100 bootstrap replicates, already in ascending order for
        # each column. If we take row 51 to be the mle, then 50% of the
        # replicates are less than the mle, and we should have bias = 0.
        expected_result = np.zeros(self.mle_params.size)

        # Alias the function to be tested.
        func = bc.calc_bias_correction_bca

        # Perform the desired test
        func_result = func(self.bootstrap_replicates, self.mle_params)
        self.assertIsInstance(func_result, np.ndarray)
        self.assertEqual(func_result.shape, expected_result.shape)
        npt.assert_allclose(func_result, expected_result)

        # Create a fake mle that should be higher than 95% of the results
        fake_mle = self.bootstrap_replicates[95]
        expected_result_2 = norm.ppf(0.95) * np.ones(self.mle_params.size)
        func_result_2 = func(self.bootstrap_replicates, fake_mle)

        self.assertIsInstance(func_result_2, np.ndarray)
        self.assertEqual(func_result_2.shape, expected_result_2.shape)
        npt.assert_allclose(func_result_2, expected_result_2)
        return None 
Example #8
Source File: evaluation.py    From lang2program with Apache License 2.0 6 votes vote down vote up
def _confidence_interval_by_alpha(cls, p_hat, n, alpha, method='wald'):
        """Compute confidence interval for estimate of Bernoulli parameter p.

        Args:
            p_hat: maximum likelihood estimate of p
            n: samples observed
            alpha: the probability that the true p falls outside the CI

        Returns:
            left, right
        """
        prob = 1 - 0.5 * alpha
        z = norm.ppf(prob)

        compute_ci = cls._confidence_interval_by_z_wald if method == 'wald' else cls._confidence_interval_by_z_wilson

        return compute_ci(p_hat, n, z) 
Example #9
Source File: nevergrad_optimizer.py    From bayesmark with Apache License 2.0 6 votes vote down vote up
def prewarp(self, xx):
        """Extra work needed to get variables into the Gaussian space
        representation."""
        xxw = {}
        for arg_name, vv in xx.items():
            assert np.isscalar(vv)
            space = self.space[arg_name]

            if space is not None:
                # Warp so we think it is apriori uniform in [a, b]
                vv = space.warp(vv)
                assert vv.size == 1

                # Now make uniform on [0, 1], also unpack warped to scalar
                (lb, ub), = space.get_bounds()
                vv = linear_rescale(vv.item(), lb, ub, 0, 1)

                # Now make std Gaussian apriori
                vv = norm.ppf(vv)
            assert np.isscalar(vv)
            xxw[arg_name] = vv
        return xxw 
Example #10
Source File: multiclass_soft_confidence_weighted_2_diag.py    From python-online-machine-learning-library with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _init_model(self, C, eta):
        """
        Initialize model.
        """
        logger.info("init model starts")
        self.model["mu"] = defaultdict()  # model parameter mean
        self.model["S"] = defaultdict()     # model parameter covariance
        self.model["C"] = C                        # PA parameter
        self.model["eta"] = eta                  # confidence parameter
        self.model["phi"] = norm.ppf(norm.cdf(eta))      # inverse of cdf(eta)
        self.model["phi_2"] = np.power(self.model["phi"], 2)
        self.model["psi"] = 1 + self.model["phi_2"] / 2
        self.model["zeta"] = 1 + self.model["phi_2"]
        logger.info("init model finished")

        pass 
Example #11
Source File: LinearGaussian.py    From Conditional_Density_Estimation with MIT License 6 votes vote down vote up
def conditional_value_at_risk(self, x_cond, alpha=0.01, **kwargs):
    """ Computes the Conditional Value-at-Risk (CVaR) / Expected Shortfall of the fitted distribution. Only if ndim_y = 1

       Args:
         x_cond: different x values to condition on - numpy array of shape (n_values, ndim_x)
         alpha: quantile percentage of the distribution
         n_samples: number of samples for monte carlo model_fitting

       Returns:
         CVaR values for each x to condition on - numpy array of shape (n_values)
       """
    assert self.ndim_y == 1, "Value at Risk can only be computed when ndim_y = 1"
    x_cond = self._handle_input_dimensionality(x_cond)
    assert x_cond.ndim == 2

    mean = self._mean(x_cond)
    sigma = self._std(x_cond)
    CVaR = (mean - sigma * (1/alpha) * norm.pdf(norm.ppf(alpha)))[:,0]
    assert CVaR.shape == (x_cond.shape[0],)
    return CVaR 
Example #12
Source File: robust.py    From tick with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def std_mad(x):
    """Robust estimation of the standard deviation, based on the Corrected Median
    Absolute Deviation (MAD) of x.
    This computes the MAD of x, and applies the Gaussian distribution
    correction, making it a consistent estimator of the standard-deviation
    (when the sample looks Gaussian with outliers).

    Parameters
    ----------
    x : `np.ndarray`
        Input vector

    Returns
    -------
    output : `float`
        A robust estimation of the standard deviation
    """
    from scipy.stats import norm
    correction = 1 / norm.ppf(3 / 4)
    return correction * np.median(np.abs(x - np.median(x))) 
Example #13
Source File: ae.py    From qiskit-aqua with Apache License 2.0 6 votes vote down vote up
def _fisher_confint(self, alpha: float, observed: bool = False) -> List[float]:
        """Compute the Fisher information confidence interval for the MLE of the previous run.

        Args:
            alpha: Specifies the (1 - alpha) confidence level (0 < alpha < 1).
            observed: If True, the observed Fisher information is used to construct the
                confidence interval, otherwise the expected Fisher information.

        Returns:
            The Fisher information confidence interval.
        """
        shots = self._ret['shots']
        mle = self._ret['ml_value']

        # approximate the standard deviation of the MLE and construct the confidence interval
        std = np.sqrt(shots * self._compute_fisher_information(observed))
        ci = mle + norm.ppf(1 - alpha / 2) / std * np.array([-1, 1])

        # transform the confidence interval from [0, 1] to the target interval
        return [self.a_factory.value_to_estimation(bound) for bound in ci] 
Example #14
Source File: gaussian.py    From perceptron-benchmark with Apache License 2.0 6 votes vote down vote up
def predictions(self, image, forward_batch_size=32):
        from scipy.stats import norm
        image, _ = self._process_input(image)
        image_batch = np.vstack([[image]] * self._iterations)
        noise = np.random.normal(scale=self._std, size=image_batch.shape).astype(np.float32)
        image_batch += noise
        predictions = self._model.batch_predictions(image_batch)
        logits = np.argmax(predictions, axis=1)
        one_hot = np.zeros([self._iterations, self._num_classes])
        logits_one_hot = np.eye(self._num_classes)[logits]
        one_hot += logits_one_hot
        one_hot = np.sum(one_hot, axis=0)
        ranks = sorted(one_hot / np.sum(one_hot))[::-1]
        qi = ranks[0] - 1e-9
        qj = ranks[1] + 1e-9
        bound = self._std / 2. * (norm.ppf(qi) - norm.ppf(qj))
        return np.argmax(one_hot), bound 
Example #15
Source File: npc.py    From permute with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def liptak(pvalues):
    r"""
    Apply Liptak's combining function

    .. math:: \sum_i \Phi^{-1}(1-p_i)

    where $\Phi^{-1}$ is the inverse CDF of the standard normal distribution.

    Parameters
    ----------
    pvalues : array_like
        Array of p-values to combine

    Returns
    -------
    float
        Liptak's combined test statistic
    """
    return np.sum(norm.ppf(1 - pvalues)) 
Example #16
Source File: testStatelessTechnicalAnalysers.py    From Finance-Python with MIT License 6 votes vote down vote up
def testSecurityNormInvValueHolder(self):
        mm1 = SecurityNormInvValueHolder('open')
        mm2 = SecurityNormInvValueHolder('open', fullAcc=True)

        for i in range(len(self.aapl['close'])):
            data = dict(aapl=dict(open=norm.cdf(self.aapl['open'][i])),
                        ibm=dict(open=norm.cdf(self.ibm['open'][i])))
            mm1.push(data)
            mm2.push(data)

            value1 = mm1.value
            value2 = mm2.value
            for name in value1.index():
                expected = norm.ppf(data[name]['open'])
                calculated = value1[name]
                self.assertAlmostEqual(expected, calculated, 6, 'at index {0}\n'
                                                                'expected:   {1:.12f}\n'
                                                                'calculat: {2:.12f}'
                                       .format(i, expected, calculated))

                calculated = value2[name]
                self.assertAlmostEqual(expected, calculated, 12, 'at index {0}\n'
                                                                 'expected:   {1:.12f}\n'
                                                                 'calculat: {2:.12f}'
                                       .format(i, expected, calculated)) 
Example #17
Source File: nevergrad_optimizer.py    From bayesmark with Apache License 2.0 6 votes vote down vote up
def prewarp(self, xx):
        """Extra work needed to get variables into the Gaussian space
        representation."""
        xxw = {}
        for arg_name, vv in xx.items():
            assert np.isscalar(vv)
            space = self.space[arg_name]

            if space is not None:
                # Warp so we think it is apriori uniform in [a, b]
                vv = space.warp(vv)
                assert vv.size == 1

                # Now make uniform on [0, 1], also unpack warped to scalar
                (lb, ub), = space.get_bounds()
                vv = linear_rescale(vv.item(), lb, ub, 0, 1)

                # Now make std Gaussian apriori
                vv = norm.ppf(vv)
            assert np.isscalar(vv)
            xxw[arg_name] = vv
        return xxw 
Example #18
Source File: copula.py    From pycopula with Apache License 2.0 5 votes vote down vote up
def pdf(self, x):
		self._check_dimension(x)
		u_i = norm.ppf(x)
		return self._R_det**(-0.5) * np.exp(-0.5 * np.dot(u_i, np.dot(self._R_inv - np.identity(self.dim), u_i))) 
Example #19
Source File: copula.py    From pycopula with Apache License 2.0 5 votes vote down vote up
def pdf_param(self, x, R):
		self._check_dimension(x)
		if self.dim == 2 and not(hasattr(R, '__len__')):
			R = [R]
		if len(np.asarray(R).shape) == 2 and len(R) != self.dim:
			raise ValueError("Expected covariance matrix of dimension {0}.".format(self.dim))
		u = norm.ppf(x)
		
		cov = np.ones([ self.dim, self.dim ])
		idx = 0
		if len(np.asarray(R).shape) <= 1:
			if len(R) == self.dim * (self.dim - 1) / 2:
				for j in range(self.dim):
					for i in range(j + 1, self.dim):
						cov[j][i] = R[idx]
						cov[i][j] = R[idx]
						idx += 1
			else:
				raise ValueError("Expected covariance matrix, get an array.")
		
		if self.dim == 2:
			RDet = cov[0][0] * cov[1][1] - cov[0][1]**2
			RInv = 1. / RDet * np.asarray([[ cov[1][1], -cov[0][1]], [ -cov[0][1], cov[0][0] ]])
		else:
			RDet = np.linalg.det(cov)
			RInv = np.linalg.inv(cov)
		return [ RDet**(-0.5) * np.exp(-0.5 * np.dot(u_i, np.dot(RInv - np.identity(self.dim), u_i))) for u_i in u ] 
Example #20
Source File: copula.py    From pycopula with Apache License 2.0 5 votes vote down vote up
def quantile(self,  x):
		return multivariate_normal.ppf([ norm.ppf(u) for u in x ], cov=self.R) 
Example #21
Source File: copula.py    From pycopula with Apache License 2.0 5 votes vote down vote up
def cdf(self, x):
		self._check_dimension(x)
		tv = np.asarray([ scipy.stats.t.ppf(u, df=self.df) for u in x ])

		def fun(a, b):
			return multivariate_t_distribution(np.asarray([a, b]), np.asarray([0, 0]), self.R, self.df, self.dim)
			
		lim_0 = lambda x: -10
		lim_1 = lambda x: tv[1]
		return fun(x[0], x[1])
		#return scipy.integrate.dblquad(fun, -10, tv[0], lim_0, lim_1)[0] 
Example #22
Source File: mlae.py    From qiskit-aqua with Apache License 2.0 5 votes vote down vote up
def _fisher_confint(self, alpha: float = 0.05, observed: bool = False) -> List[float]:
        """Compute the `alpha` confidence interval based on the Fisher information.

        Args:
            alpha: The level of the confidence interval (must be <= 0.5), default to 0.05.
            observed: If True, use observed Fisher information.

        Returns:
            float: The alpha confidence interval based on the Fisher information
        Raises:
            AssertionError: Call run() first!
        """
        # Get the (observed) Fisher information
        fisher_information = None
        try:
            fisher_information = self._ret['fisher_information']
        except KeyError:
            raise AssertionError("Call run() first!")

        if observed:
            fisher_information = self._compute_fisher_information(observed=True)

        normal_quantile = norm.ppf(1 - alpha / 2)
        confint = np.real(self._ret['value']) + \
            normal_quantile / np.sqrt(fisher_information) * np.array([-1, 1])
        mapped_confint = [self.a_factory.value_to_estimation(bound) for bound in confint]
        return mapped_confint 
Example #23
Source File: utils.py    From pyprocessmacro with MIT License 5 votes vote down vote up
def z_score(conf):
    """
    :param conf: Desired level of confidence
    :return: The Z-score corresponding to the level of confidence desired.
    """
    return norm.ppf((100 - (100 - conf) / 2) / 100) 
Example #24
Source File: unittests_evaluations.py    From Conditional_Density_Estimation with MIT License 5 votes vote down vote up
def test_conditional_value_at_risk_sample(self):
    # prepare estimator dummy
    for mu, sigma in [(-6, 0.25), (0.4, 0.1), (22, 3)]:
      mu1 = np.array([mu])
      sigma1 = np.identity(n=1) * sigma
      est = GaussianDummy(mean=mu1, cov=sigma1**2, ndim_x=1, ndim_y=1, has_pdf=False)
      est.fit(None, None)

      alpha = 0.02

      CVaR_true = mu - sigma / alpha * norm.pdf(norm.ppf(alpha))
      CVaR_est = est.conditional_value_at_risk(x_cond=np.array([[0], [1]]), alpha=alpha, n_samples=2*10**6)

      self.assertAlmostEqual(CVaR_est[0], CVaR_true, places=2)
      self.assertAlmostEqual(CVaR_est[1], CVaR_true, places=2) 
Example #25
Source File: interpolate.py    From plat with MIT License 5 votes vote down vote up
def lerp_gaussian(val, low, high):
    """Linear interpolation with gaussian CDF"""
    low_gau = norm.cdf(low)
    high_gau = norm.cdf(high)
    lerped_gau = lerp(val, low_gau, high_gau)
    return norm.ppf(lerped_gau) 
Example #26
Source File: interpolate.py    From plat with MIT License 5 votes vote down vote up
def slerp_gaussian(val, low, high):
    """Spherical interpolation with gaussian CDF (generally not useful)"""
    offset = norm.cdf(np.zeros_like(low))  # offset is just [0.5, 0.5, ...]
    low_gau_shifted = norm.cdf(low) - offset
    high_gau_shifted = norm.cdf(high) - offset
    circle_lerped_gau = slerp(val, low_gau_shifted, high_gau_shifted)
    epsilon = 0.001
    clipped_sum = np.clip(circle_lerped_gau + offset, epsilon, 1.0 - epsilon)
    result = norm.ppf(clipped_sum)
    return result 
Example #27
Source File: calculating_var.py    From Mastering-Python-for-Finance-source-codes with MIT License 5 votes vote down vote up
def calculate_daily_VaR(P, prob, mean, sigma, 
                        days_per_year=252.):
    min_ret = norm.ppf(1-prob, 
                       mean/days_per_year, 
                       sigma/np.sqrt(days_per_year))
    return P - P*(min_ret+1) 
Example #28
Source File: utils.py    From nispat with GNU General Public License v3.0 5 votes vote down vote up
def warp_predictions(self, mu, s2, param, percentiles=[0.025, 0.975]):
        """ Compute the warped predictions from a gaussian predictive
            distribution, specifed by a mean (mu) and variance (s2)
            
            :param mu: Gassian predictive mean 
            :param s2: Predictive variance
            :param param: warping parameters
            :param percentiles: Desired percentiles of the warped likelihood

            :returns: * median - median of the predictive distribution
                      * pred_interval - predictive interval(s)
        """

        # Compute percentiles of a standard Gaussian
        N = norm
        Z = N.ppf(percentiles)
        
        # find the median (using mu = median)
        median = self.invf(mu, param)

        # compute the predictive intervals (non-stationary)
        pred_interval = np.zeros((len(mu), len(Z)))
        for i, z in enumerate(Z):
            pred_interval[:,i] = self.invf(mu + np.sqrt(s2)*z, param)

        return median, pred_interval 
Example #29
Source File: pymannkendall.py    From pyMannKendall with MIT License 5 votes vote down vote up
def __p_value(z, alpha):
    # two tail test
    p = 2*(1-norm.cdf(abs(z)))  
    h = abs(z) > norm.ppf(1-alpha/2)

    if (z < 0) and h:
        trend = 'decreasing'
    elif (z > 0) and h:
        trend = 'increasing'
    else:
        trend = 'no trend'
    
    return p, h, trend 
Example #30
Source File: utils.py    From zEpid with MIT License 5 votes vote down vote up
def normal_ppf(z):
    return norm.ppf(z, loc=0, scale=1)