Python scipy.stats.t.ppf() Examples

The following are 30 code examples of scipy.stats.t.ppf(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.stats.t , or try the search function .
Example #1
Source File: nonparametric.py    From hypothetical with MIT License 7 votes vote down vote up
def _t_value(self):
        r"""
        Returns the critical t-statistic given the input alpha-level (defaults to 0.05).

        Returns
        -------
        tval : float
            The critical t-value for using in computing the Least Significant Difference.

        Notes
        -----
        Scipy's :code:`t.ppf` method is used to compute the critical t-value.

        """
        tval = t.ppf(1 - self.alpha / 2, self.n - self.k)

        return tval 
Example #2
Source File: copulapdf.py    From copula-py with GNU General Public License v3.0 6 votes vote down vote up
def _t(u, rho, nu):
    d = u.shape[1]
    nu = float(nu)
    
    try:
        R = cholesky(rho)
    except LinAlgError:
        raise ValueError('Provided Rho matrix is not Positive Definite!')
    
    ticdf = t.ppf(u, nu)
    
    z = solve(R,ticdf.T)
    z = z.T
    logSqrtDetRho = np.sum(np.log(np.diag(R)))
    const = gammaln((nu+d)/2.0) + (d-1)*gammaln(nu/2.0) - d*gammaln((nu+1)/2.0) - logSqrtDetRho
    sq = np.power(z,2)
    summer = np.sum(np.power(z,2),axis=1)
    numer = -((nu+d)/2.0) * np.log(1.0 + np.sum(np.power(z,2),axis=1)/nu)
    denom = np.sum(-((nu+1)/2) * np.log(1 + (np.power(ticdf,2))/nu), axis=1)
    y = np.exp(const + numer - denom)
    
    return y 
Example #3
Source File: varianceReduction.py    From credit-risk-modelling with GNU General Public License v3.0 6 votes vote down vote up
def isThresholdSimple(N,M,p,c,l,myRho):
    mu = getOptimalMeanShift(c,p,l,myRho)
    theta = np.zeros(M)
    cgf = np.zeros(M)
    qZ = np.zeros([M,N])
    e = np.random.normal(0,1,[M,N])
    G = np.transpose(np.tile(np.random.normal(mu,1,M),(N,1)))
    num = (norm.ppf(p)*np.ones((M,1)))-np.sqrt(myRho)*G
    pZ = norm.cdf(np.divide(num,np.sqrt(1-myRho)))
    for n in range(0,M):
        theta[n] = vc.getSaddlePoint(pZ[n,:],c,l,0.0)
        qZ[n,:] = getQ(theta[n],c,pZ[n,:])
        cgf[n] = vc.computeCGF(theta[n],pZ[n,:],c)
    I = np.transpose(1*np.less(e,norm.ppf(qZ)))
    L = np.dot(c,I)
    rn = np.exp(-mu*G[:,0]+0.5*(mu**2))*computeRND(theta,L,cgf)
    tailProb = np.mean(np.multiply(L>l,rn)) 
    eShortfall =  np.mean(np.multiply(L*(L>l),rn))/tailProb        
    return tailProb,eShortfall 
Example #4
Source File: varContributions.py    From credit-risk-modelling with GNU General Public License v3.0 6 votes vote down vote up
def mcThresholdTDecomposition(N,M,S,p,c,rho,nu,isT,myAlpha):
    contributions = np.zeros([N,S,2])
    var = np.zeros(S)
    es = np.zeros(S)
    K = myT.ppf(p,nu)*np.ones((M,1))        
    for s in range(0,S):
        print("Iteration: %d" % (s+1))
        Y = th.getY(N,M,p,rho,nu,isT)
        myD = 1*np.less(Y,K)     
        myLoss = np.sort(np.dot(myD,c),axis=None)
        el,ul,var[s],es[s]=util.computeRiskMeasures(M,myLoss,np.array([myAlpha]))
        varVector = c*myD[np.dot(myD,c)==var[s],:]
        esVector = c*myD[np.dot(myD,c)>=var[s],:]
        contributions[:,s,0] = np.sum(varVector,0)/varVector.shape[0]
        contributions[:,s,1] = np.sum(esVector,0)/esVector.shape[0]
    return contributions,var,es 
Example #5
Source File: varContributions.py    From credit-risk-modelling with GNU General Public License v3.0 6 votes vote down vote up
def mcThresholdGDecomposition(N,M,S,p,c,rho,nu,isT,myAlpha):
    contributions = np.zeros([N,S,2])
    var = np.zeros(S)
    es = np.zeros(S)
    K = norm.ppf(p)*np.ones((M,1))        
    for s in range(0,S):
        print("Iteration: %d" % (s+1))
        Y = th.getY(N,M,p,rho,nu,isT)
        myD = 1*np.less(Y,K)     
        myLoss = np.sort(np.dot(myD,c),axis=None)
        el,ul,var[s],es[s]=util.computeRiskMeasures(M,myLoss,np.array([myAlpha]))
        varVector = c*myD[np.dot(myD,c)==var[s],:]
        esVector = c*myD[np.dot(myD,c)>=var[s],:]
        contributions[:,s,0] = np.sum(varVector,0)/varVector.shape[0]
        contributions[:,s,1] = np.sum(esVector,0)/esVector.shape[0]
    return contributions,var,es 
Example #6
Source File: varContributions.py    From credit-risk-modelling with GNU General Public License v3.0 6 votes vote down vote up
def getPy(p,y,p1,p2,whichModel,v=0):
    if whichModel==0: # Gaussian threshold
        return th.computeP(p,p1,y)
    elif whichModel==1: # beta
        return y*np.ones(len(p))
    elif whichModel==2: # CreditRisk+
        v = p*(1-p1+p1*y)
        return np.maximum(np.minimum(1-np.exp(-v),0.999),0.0001)
    elif whichModel==3: # logit
        return np.reciprocal(1+np.exp(-(p1+p2*y)))
    elif whichModel==4: # probit
        return norm.ppf(p1+p2*y)    
    elif whichModel==5: # Weibull
        return np.maximum(np.minimum(1-np.exp(-y),0.999),0.0001)*np.ones(len(p))
    if whichModel==6: # t threshold
        return th.computeP_t(p,p1,y,v,p2) 
Example #7
Source File: thresholdModels.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def oneFactorTModel(N,M,p,c,rho,nu,alpha):
    Y = getTY(N,M,p,rho,nu)
    K = myT.ppf(p,nu)*np.ones((M,1))        
    lossIndicator = 1*np.less(Y,K)     
    lossDistribution = np.sort(np.dot(lossIndicator,c),axis=None)
    el,ul,var,es=util.computeRiskMeasures(M,lossDistribution,alpha)
    return el,ul,var,es 
Example #8
Source File: varianceReduction.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def isThreshold(N,M,p,c,l,myRho,nu,shiftMean,isT,invVector=0):
    mu = 0.0
    gamma = 0.0
    if shiftMean==1:
        mu = getOptimalMeanShift(c,p,l,myRho)
    theta = np.zeros(M)
    cgf = np.zeros(M)
    qZ = np.zeros([M,N])
    G = np.transpose(np.tile(np.random.normal(mu,1,M),(N,1)))
    e = np.random.normal(0,1,[M,N])
    if isT==1:
        gamma = -2
        W = np.random.chisquare(nu,M)
        myV = W/(1-2*gamma)
        V = np.transpose(np.sqrt(np.tile(myV,(N,1))/nu))
        num = (1/V)*myT.ppf(p,nu)*np.ones((M,1))-np.multiply(np.sqrt(myRho),G)
        pZ = norm.cdf(np.divide(num,np.sqrt(1-myRho)))
    elif isT==2:
        V = np.transpose(np.sqrt(np.tile(np.random.gamma(nu,1/nu,M),(N,1))))
        num = (1/V)*invVector*np.ones((M,1))-np.multiply(np.sqrt(myRho),G)
        pZ = norm.cdf(np.divide(num,np.sqrt(1-myRho)))
    else:
        pZ = th.computeP(p,myRho,G)
    for n in range(0,M):
        theta[n] = vc.getSaddlePoint(pZ[n,:],c,l,0.0)
        qZ[n,:] = getQ(theta[n],c,pZ[n,:])
        cgf[n] = vc.computeCGF(theta[n],pZ[n,:],c)
    I = np.transpose(1*np.less(e,norm.ppf(qZ)))
    L = np.dot(c,I)
    if isT==1:
        rnChi = np.exp(-gamma*myV-(nu/2)*np.log(1-2*gamma))
    else:
        rnChi = np.ones(M)
    if shiftMean==1:
        rn = computeRND(theta,L,cgf)*np.exp(-mu*G[:,0]+0.5*(mu**2))*rnChi
    else:
        rn = computeRND(theta,L,cgf)*rnChi
    tailProb = np.mean(np.multiply(L>l,rn)) 
    eShortfall =  np.mean(np.multiply(L*(L>l),rn))/tailProb        
    return tailProb,eShortfall 
Example #9
Source File: varianceReduction.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def isThresholdT(N,M,p,c,l,myRho,nu,cm=0):
    myShift = (1-2*cm)
    mu = getOptimalMeanShift(c,p,l,myRho)
    W = np.random.chisquare(nu,M)
    myV = W/myShift
    theta = np.zeros(M)
    cgf = np.zeros(M)
    qZ = np.zeros([M,N])
    V = np.transpose(np.sqrt(np.tile(myV,(N,1))/nu))
    e = np.random.normal(0,1,[M,N])
    G = np.transpose(np.tile(np.random.normal(mu,1,M),(N,1)))
    num = V*(myT.ppf(p,nu)*np.ones((M,1)))-np.sqrt(myRho)*G
    pZ = norm.cdf(np.divide(num,np.sqrt(1-myRho)))
    for n in range(0,M):
        theta[n] = vc.getSaddlePoint(pZ[n,:],c,l,0.0)
        qZ[n,:] = getQ(theta[n],c,pZ[n,:])
        cgf[n] = vc.computeCGF(theta[n],pZ[n,:],c)
    I = np.transpose(1*np.less(e,norm.ppf(qZ)))
    L = np.dot(c,I)
    rnChi = np.exp(-cm*myV-(nu/2)*np.log(myShift))
    rnMu=np.exp(-mu*G[:,0]+0.5*(mu**2))
    rnTwist = computeRND(theta,L,cgf)
    rn = rnChi*rnMu*rnTwist
    tailProb = np.mean(np.multiply(L>l,rn)) 
    eShortfall =  np.mean(np.multiply(L*(L>l),rn))/tailProb        
    return tailProb,eShortfall 
Example #10
Source File: varianceReduction.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def isThresholdContr(N,M,p,c,l,myRho,nu,cm=0):
    myShift = (1-2*cm)
    xhat = scipy.optimize.minimize(meanShiftOF,0.01, 
                                args=(c,p,l,myRho), 
                                method='SLSQP', jac=None)                             
    mu = xhat.x    
    theta = np.zeros(M)
    cgf = np.zeros(M)
    qZ = np.zeros([M,N])
    G = np.transpose(np.tile(np.random.normal(mu,1,M),(N,1)))
    e = np.random.normal(0,1,[M,N])
    W = np.random.chisquare(nu,M)
    myV = W/myShift
    V = np.transpose(np.sqrt(np.tile(myV,(N,1))/nu))
    num = V*myT.ppf(p,nu)*np.ones((M,1))-np.multiply(np.sqrt(myRho),G)
    pZ = norm.cdf(np.divide(num,np.sqrt(1-myRho)))
    for n in range(0,M):
        theta[n] = vc.getSaddlePoint(pZ[n,:],c,l,0.0)
        qZ[n,:] = getQ(theta[n],c,pZ[n,:])
        cgf[n] = vc.computeCGF(theta[n],pZ[n,:],c)
    I = np.transpose(1*np.less(e,norm.ppf(qZ)))
    L = np.dot(c,I)         
    rnChi=np.exp(-cm*myV-(nu/2)*np.log(myShift))
    rnMu=np.exp(-mu*G[:,0]+0.5*mu**2)
    rnTwist = computeRND(theta,L,cgf)
    rn = rnChi*rnMu*rnTwist
    return I,theta,pZ,qZ,cgf,rn 
Example #11
Source File: assetCorrelation.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def transformCumulativeTransitionMatrix(K,M_c):    
    H = np.zeros([K,K])
    for n in range(0,K):
        for m in range(0,K):
            if M_c[n,m]>=0.9999999:  
                H[n,m]=5
            elif M_c[n,m]<=0.0000001:
                H[n,m] = -5
            else:
                H[n,m] = norm.ppf(M_c[n,m])
    return H 
Example #12
Source File: thresholdModels.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def oneFactorGaussianModel(N,M,p,c,rho,alpha):
    Y = getGaussianY(N,M,p,rho)
    K = norm.ppf(p)*np.ones((M,1))        
    lossIndicator = 1*np.less(Y,K)     
    lossDistribution = np.sort(np.dot(lossIndicator,c),axis=None)
    el,ul,var,es=util.computeRiskMeasures(M,lossDistribution,alpha)
    return el,ul,var,es 
Example #13
Source File: anoms.py    From anomaly-detection with GNU General Public License v3.0 5 votes vote down vote up
def _esd(x, max_outlier, alpha, direction):
    """
    The ESD test using median and MAD in the calculation of the test statistic.
    """
    x = Series(x)
    n = len(x)
    outlier_index = []
    for i in range(1, max_outlier + 1):
        median = x.median()
        mad = np.median([abs(value - median) for value in x]) * _MAD_CONSTANT
        if mad == 0:
            break
        if direction == 'both':
            ares = x.map(lambda value: abs(value - median) / mad)
        elif direction == 'pos':
            ares = x.map(lambda value: (value - median) / mad)
        elif direction == 'neg':
            ares = x.map(lambda value: (median - value) / mad)
        r_idx = ares.idxmax()
        r = ares[r_idx]
        if direction == 'both':
            p = 1.0 - alpha / (2 * (n - i + 1))
        else:
            p = 1.0 - alpha / (n - i + 1)
        crit = t.ppf(p, n-i-1)
        lam = (n-i)*crit / np.sqrt((n-i-1+crit**2) * (n-i+1))
        if logger.isEnabledFor(logging.DEBUG):
            logger.debug("%s/%s outlier. median=%s, mad=%s, r_idx=%s, r=%s, crit=%s, lam=%s" %
                         (i, max_outlier, median, mad, r_idx, r, crit, lam))
        if r > lam:
            outlier_index.append(r_idx)
            x = x.drop(r_idx)
        else:
            # The r keeps decreasing while lam keeps increasing. Therefore, when r is less than lam for the first time,
            # we can stop.
            break
    return outlier_index 
Example #14
Source File: thresholdModels.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def oneFactorThresholdLossDistribution(N,M,p,c,rho,nu,alpha,isT):
    Y = getY(N,M,p,rho,nu,isT)
    if isT==1:
        K = myT.ppf(p,nu)*np.ones((M,1))        
    else:
        K = norm.ppf(p)*np.ones((M,1))        
    lossIndicator = 1*np.less(Y,K)     
    lossDistribution = np.sort(np.dot(lossIndicator,c),axis=None)
    return lossDistribution 
Example #15
Source File: thresholdModels.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def asrfModel(myP,rho,c,alpha):
    myX = np.linspace(0.0001,0.9999,100)
    num = np.sqrt(1-rho)*norm.ppf(myX)-norm.ppf(myP)
    cdf = norm.cdf(num/np.sqrt(rho))
    pdf = util.asrfDensity(myX,myP,rho)
    varAnalytic = np.sum(c)*np.interp(alpha,cdf,myX)
    esAnalytic = asrfExpectedShortfall(alpha,myX,cdf,pdf,c,rho,myP)
    return pdf,cdf,varAnalytic,esAnalytic 
Example #16
Source File: thresholdModels.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def computeP_t(p,rho,y,w,nu):
    num = np.sqrt(w/nu)*myT.ppf(p,nu)-np.multiply(np.sqrt(rho),y)
    pZ = norm.cdf(np.divide(num,np.sqrt(1-rho)))
    return pZ 
Example #17
Source File: thresholdModels.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def computeP(p,rho,g):
    num = norm.ppf(p)-np.multiply(np.sqrt(rho),g)
    pG = norm.cdf(np.divide(num,np.sqrt(1-rho)))
    return pG 
Example #18
Source File: thresholdModels.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def buildDefaultCorrelationMatrix(a,b,pMean,regionId,nu):
    J = len(regionId)
    R = buildAssetCorrelationMatrix(a,b,regionId)    
    D = np.zeros([J,J])
    for n in range(0,J):
        p_n = pMean[n]
        for m in range(0,J):
            p_m = pMean[m]
            p_nm = bivariateTCdf(norm.ppf(p_n),norm.ppf(p_m),R[n,m],nu)
            D[n,m] = (p_nm - p_n*p_m)/math.sqrt(p_n*(1-p_n)*p_m*(1-p_m))
    return D 
Example #19
Source File: thresholdModels.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def multiFactorThresholdModel(N,M,a,b,rId,p,c,nu,alpha,isT):
    Y = getMultiFactorY(N,M,p,a,b,rId,nu,isT)
    if isT==1:
        K = myT.ppf(p,nu)*np.ones((M,1)) 
    else:
        K = norm.ppf(p)*np.ones((M,1))        
    lossIndicator = 1*np.less(Y,K)     
    lossDistribution = np.sort(np.dot(lossIndicator,c),axis=None)
    el,ul,var,es=util.computeRiskMeasures(M,lossDistribution,alpha)
    return el,ul,var,es 
Example #20
Source File: markovChain.py    From credit-risk-modelling with GNU General Public License v3.0 5 votes vote down vote up
def printSEConfidenceInterval(myP,se,T):
    K = myP.shape[0]
    coeff = myT.ppf(1-0.05/2,T-1)
    for i in range(0,K):
        for j in range(0,K):
            low = np.maximum(myP[i,j]-coeff*se[i,j],0)
            up = np.minimum(myP[i,j]+coeff*se[i,j],1)
            if j!=(K-1):
                print("[%0.2f, %0.2f]" % (low,up) + " & ", end=" ")
            else:
                print("[%0.2f, %0.2f]" % (low,up) + "\\\\", end="\n") 
Example #21
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def confidence_interval(mean, stdev, count, alpha=.01):
    """Returns the lower and upper bounds of the confidence interval of a random
    variable. Confidence is 1 - alpha (default confidence is 99%)."""
    stdval = tdistr.ppf(1 - alpha / 2, count - 1)
    lower, upper = mean + np.array([-1, 1]) * stdval * stdev / np.sqrt(count)
    return lower, upper 
Example #22
Source File: snr_source.py    From VIP with MIT License 5 votes vote down vote up
def significance(snr, rad, fwhm, student_to_gauss=True):
    """ Converts a S/N ratio (measured as in Mawet et al. 2014) into the 
    equivalent gaussian significance, i.e. the n-sigma with the same confidence 
    level as the S/N at the given separation.
     
     
    Parameters
    ----------
    snr : float or numpy array
        SNR value(s)
    rad : float or numpy array
        Radial separation(s) from the star in pixels. If an array, it should be
        the same shape as snr and provide the radial separation corresponding
        to each snr measurement.
    fwhm : float
        Full Width Half Maximum of the PSF.
    student_to_gauss : bool, optional
        Whether the conversion is from Student SNR to Gaussian significance. If 
        False, will assume the opposite: Gaussian significance to Student SNR.
    
    Returns
    -------
    sigma : float
        Gaussian significance in terms of n-sigma
    
    """
    
    if student_to_gauss:
        sigma = norm.ppf(t.cdf(snr,(rad/fwhm)*2*np.pi-2))
    else:
        sigma = t.ppf(norm.cdf(snr), (rad/fwhm)*2*np.pi-2)
        
    return sigma 
Example #23
Source File: copula.py    From pycopula with Apache License 2.0 5 votes vote down vote up
def pdf(self, x):
		self._check_dimension(x)
		u_i = norm.ppf(x)
		return self._R_det**(-0.5) * np.exp(-0.5 * np.dot(u_i, np.dot(self._R_inv - np.identity(self.dim), u_i))) 
Example #24
Source File: hypothesis.py    From hypothetical with MIT License 5 votes vote down vote up
def _clopper_pearson_interval(self):
        r"""
        Computes the Clopper-Pearson 'exact' confidence interval.

        References
        ----------
        Wikipedia contributors. (2018, July 14). Binomial proportion confidence interval.
            In Wikipedia, The Free Encyclopedia. Retrieved 00:40, August 15, 2018,
            from https://en.wikipedia.org/w/index.php?title=Binomial_proportion_confidence_interval&oldid=850256725

        """
        p = self.x / self.n

        if self.alternative == 'less':
            lower_bound = 0.0
            upper_bound = beta.ppf(1 - self.alpha, self.x + 1, self.n - self.x)
        elif self.alternative == 'greater':
            upper_bound = 1.0
            lower_bound = beta.ppf(self.alpha, self.x, self.n - self.x + 1)
        else:
            lower_bound = beta.ppf(self.alpha / 2, self.x, self.n - self.x + 1)
            upper_bound = beta.ppf(1 - self.alpha / 2, self.x + 1, self.n - self.x)

        clopper_pearson_interval = {
            'probability of success': p,
            'conf level': 1 - self.alpha,
            'interval': (lower_bound, upper_bound)
        }

        return clopper_pearson_interval 
Example #25
Source File: nonparametric.py    From hypothetical with MIT License 5 votes vote down vote up
def _normal_scores(self):
        r"""
        Calculates the normal scores used in the Van der Waerden test.

        Returns
        -------
        score_matrix : array-like
        Numpy ndarray representing the data matrix with ranked observations and computed normal test scores.

        Notes
        -----
        Let :math:`n_j`, be the number of samples for each of the :math:`k` groups where :math:`j` is the j-th group.
        :math:`N` is the number of total samples in all groups, while :math:`X_{ij}` is the i-th value of the j-th
        group. The normal scores used in the Van der Waerden test are calculated as:

        .. math::

            A_{ij} = \phi^{-1} \left( \frac{R \left( X_{ij} \right)}{N + 1} \right)

        References
        ----------
        Conover, W. J. (1999). Practical Nonparameteric Statistics (Third ed.). Wiley.

        Wikipedia contributors. "Van der Waerden test." Wikipedia, The Free Encyclopedia.
            Wikipedia, The Free Encyclopedia, 8 Feb. 2017. Web. 8 Mar. 2020.

        """
        aij = norm.ppf(list(self.ranked_matrix[:, 2] / (self.n + 1)))
        score_matrix = np.column_stack([self.ranked_matrix, aij])

        return score_matrix 
Example #26
Source File: regression.py    From pingouin with GNU General Public License v3.0 5 votes vote down vote up
def _bca(ab_estimates, sample_point, n_boot, alpha=0.05):
    """Get (1 - alpha) * 100 bias-corrected confidence interval estimate

    Note that this is similar to the "cper" module implemented in
    :py:func:`pingouin.compute_bootci`.

    Parameters
    ----------
    ab_estimates : 1d array-like
        Array with bootstrap estimates for each sample.
    sample_point : float
        Indirect effect point estimate based on full sample.
    n_boot : int
        Number of bootstrap samples
    alpha : float
        Alpha for confidence interval

    Returns
    -------
    CI : 1d array-like
        Lower limit and upper limit bias-corrected confidence interval
        estimates.
    """
    # Bias of bootstrap estimates
    z0 = norm.ppf(np.sum(ab_estimates < sample_point) / n_boot)
    # Adjusted intervals
    adjusted_ll = norm.cdf(2 * z0 + norm.ppf(alpha / 2)) * 100
    adjusted_ul = norm.cdf(2 * z0 + norm.ppf(1 - alpha / 2)) * 100
    ll = np.percentile(ab_estimates, q=adjusted_ll)
    ul = np.percentile(ab_estimates, q=adjusted_ul)
    return np.array([ll, ul]) 
Example #27
Source File: risk.py    From cryptotrader with MIT License 5 votes vote down vote up
def CVaR(mu, sig, alpha=0.01):
    return alpha ** -1 * norm.pdf(norm.ppf(alpha)) * sig - mu


# Student T CVaR 
Example #28
Source File: risk.py    From cryptotrader with MIT License 5 votes vote down vote up
def TCVaR(mu, sig, nu, h=1, alpha=0.01):
    xanu = t.ppf(alpha, nu)
    return -1 / alpha * (1 - nu) ** (-1) * (nu - 2 + xanu ** 2) * t.pdf(xanu, nu) * sig - h * mu 
Example #29
Source File: copula.py    From pycopula with Apache License 2.0 5 votes vote down vote up
def cdf(self, x):
		self._check_dimension(x)
		return multivariate_normal.cdf([ norm.ppf(u) for u in x ], cov=self.R) 
Example #30
Source File: pytorch_utils.py    From sanet_relocal_demo with GNU General Public License v3.0 5 votes vote down vote up
def print_metrics(self):
        for name, samples in self.metrics.items():
            xbar = stats.mean(samples)
            sx = stats.stdev(samples, xbar)
            tstar = student_t.ppf(1.0 - 0.025, len(samples) - 1)
            margin_of_error = tstar * sx / sqrt(len(samples))
            print("{}: {} +/- {}".format(name, xbar, margin_of_error))