Python math.exp() Examples

The following are code examples for showing how to use math.exp(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the ones you don't like. You can also save this page to your account.

Example 1
Project: facebook-message-analysis   Author: szheng17   File: conversation.py    (MIT License) View Source Project 6 votes vote down vote up
def exp_damped_minute_difference(self, dt1, dt2, alpha):
        """
        Computes exp(-alpha * t), where t is the difference between two
        datetimes in minutes.

        Args:
            dt1: A datetime such that dt1 >= dt2.
            dt2: A datetime such that dt1 >= dt2.
            alpha: A nonnegative float representing the damping factor.

        Returns:
            A float equal to exp(-alpha * t), where t is the difference between
                two datetimes in minutes.

        """
        if dt1 < dt2:
            raise ValueError('Must have dt1 >= dt2')
        if alpha < 0:
            raise ValueError('Must have alpha >= 0')
        t = self.minute_difference(dt1, dt2)
        return math.exp(-alpha * t) 
Example 2
Project: facebook-message-analysis   Author: szheng17   File: conversation.py    (MIT License) View Source Project 6 votes vote down vote up
def exp_damped_day_difference(self, dt1, dt2, alpha):
        """
        Computes exp(-alpha * t), where t is the difference between two
        datetimes in days.

        Args:
            dt1: A datetime such that dt1 >= dt2.
            dt2: A datetime such that dt1 >= dt2.
            alpha: A nonnegative float representing the damping factor.

        Returns:
            A float equal to exp(-alpha * t), where t is the difference between
                two datetimes in days.

        """
        if dt1 < dt2:
            raise ValueError('Must have dt1 >= dt2')
        if alpha < 0:
            raise ValueError('Must have alpha >= 0')
        minute_diff = self.minute_difference(dt1, dt2)
        day_diff = float(minute_diff) / (self.HOURS_PER_DAY * self.MINUTES_PER_HOUR)
        return math.exp(-alpha * day_diff) 
Example 3
Project: Lattice-Based-Signatures   Author: krishnacharya   File: Gaussian_sampling.py    (license) View Source Project 6 votes vote down vote up
def Bernoulli_exp(x):
	'''
	Description:
	Algorithm 8 in BLISS paper

	Sample according to exp(-x/f) for x E [0,2^l)
	or x is an integer in binary form of lenght l
	f is a real.

	i/p:
	x: int
	f: float
	'''
	bin_rep = map(int, list(bin(x)[2:])) # list with 0's and 1's reprsenting x. msb is first as usual
	d = len(bin_rep) # length of the current integer in binary, d < l

	# starting from l-1, as then smallest probabilities are checked first and algorithm terminates faster 
	for i in range(0, d):
		if(bin_rep[i]):
			A = Bernoulli_rv(c[d-i-1])
			if not A:
				return 0
	return 1

# uses the same fixed real f 
Example 4
Project: Python4ScientificComputing_Fundamentals   Author: bnajafi   File: PlottingSpectralEmissivities.py    (MIT License) View Source Project 6 votes vote down vote up
def spectralBlackBody(Lambda=0.7, T=5800):
    """ here is the explanation of this function"""
    import math 

    c0 = 2.9979*10**8 #m/s speed of light in vacuum
    h_Plank=6.626069*10**-34 #J.s Plank's Constant
    sigma_stefan_Boltzmann= 5.67*10**-8 #Stefan-Boltzmann Constant
    n=1 #the index of refraction of that medium
    c=c0/n# the speed of propagation of a wave in the medium 
    F=c/Lambda #the frequency of the wave
    e_wave=h_Plank*F
    E_blackBody = sigma_stefan_Boltzmann*T**4
    k_Boltzmann=1.38065*10**-23 #J/K Boltzmann Constant
    
    #Plank's Law: 
    C1=2*math.pi*h_Plank*c0**2*(10**24)#J*s*m^2/s^2--W*m2 --->W 
    C2=h_Plank*c0/k_Boltzmann*(10**6) #microm m/K

    EmissiveSpectral= C1/(Lambda**5*(math.exp(C2/(Lambda*T))-1))
    outPut = {"EmissiveSpectral":EmissiveSpectral,"E_blackBody":E_blackBody}
    return outPut 
Example 5
Project: ssbio   Author: SBRG   File: kinetic_folding_rate.py    (MIT License) View Source Project 6 votes vote down vote up
def get_folding_rate_for_seq(seq, secstruct, temp, refT=37.0):
    """Scale the predicted kinetic folding rate of a protein to temperature T, based on the relationship ln(k_f)?1/T

    Args:
        seq (str, Seq, SeqRecord): Amino acid sequence
        secstruct (str): Structural class: ``all-alpha``, ``all-beta``, ``mixed``, or ``unknown``
        temp (float): Temperature in degrees C
        refT (float): Reference temperature, default to 37 C

    Returns:
        float: Kinetic folding rate k_f at temperature T.

    """

    # Not much data available on this slope value, however its effect on growth rate in a model is very small
    slope = 22000

    # Get folding rate for the reference temperature
    ref_rate = get_foldrate(seq, secstruct)
    preFactor = float(ref_rate) + slope / (float(refT) + 273.15)

    # Calculate folding rate at desired temperature
    rate = math.exp(preFactor - slope / (float(temp) + 273.15))

    return rate 
Example 6
Project: pyrsss   Author: butala   File: rms_model.py    (MIT License) View Source Project 6 votes vote down vote up
def fit_power_law(x, y):
    """
    """
    ln_x = NP.log(x)
    ln_y = NP.log(y)
    # least squares solution
    A = NP.empty((len(x), 2))
    A[:, 0] = 1
    A[:, 1] = ln_x
    #b_ls = NP.linalg.lstsq(A, ln_y)[0]
    # total least-squares solution
    X = NP.empty((len(x), 3))
    X[:, :2] = A
    X[:, 2] = ln_y
    U, S, V = NP.linalg.svd(X, 1)
    b_tls = (V[-1, :] / -V[-1, -1])[:2]
    alpha = math.exp(b_tls[0])
    beta = b_tls[1]
    return alpha, beta 
Example 7
Project: AerialCrackDetection_Keras   Author: TTMRonald   File: roi.py    (license) View Source Project 6 votes vote down vote up
def apply_regr(x, y, w, h, tx, ty, tw, th):
	try:
		cx = x + w/2.
		cy = y + h/2.
		cx1 = tx * w + cx
		cy1 = ty * h + cy
		w1 = math.exp(tw) * w
		h1 = math.exp(th) * h
		x1 = cx1 - w1/2.
		y1 = cy1 - h1/2.
		x1 = int(round(x1))
		y1 = int(round(y1))
		w1 = int(round(w1))
		h1 = int(round(h1))

		return x1, y1, w1, h1

	except ValueError:
		return x, y, w, h
	except OverflowError:
		return x, y, w, h
	except Exception as e:
		print(e)
		return x, y, w, h 
Example 8
Project: NarshaTech   Author: KimJangHyeon   File: zoom.py    (license) View Source Project 6 votes vote down vote up
def pixel_to_lonlat(self, px, zoom):
        "Converts a pixel to a longitude, latitude pair at the given zoom level."
        if len(px) != 2:
            raise TypeError('Pixel should be a sequence of two elements.')

        # Getting the number of pixels for the given zoom level.
        npix = self._npix[zoom]

        # Calculating the longitude value, using the degrees per pixel.
        lon = (px[0] - npix) / self._degpp[zoom]

        # Calculating the latitude value.
        lat = RTOD * (2 * atan(exp((px[1] - npix) / (-1.0 * self._radpp[zoom]))) - 0.5 * pi)

        # Returning the longitude, latitude coordinate pair.
        return (lon, lat) 
Example 9
Project: FrankWolfe   Author: neu-spiral   File: modularDistFW.py    (license) View Source Project 6 votes vote down vote up
def  gen_comm_info(self,main_rdd):
        def cominfo(tpl):
            p=[]
            for ((tx,lam),index) in tpl:
                p.append(np.matrix(tx).T*lam)
            return p    
        def findDim(tpl):
            for ((tx,lam),index) in tpl:
                d = len(tx)
            return d
        d = main_rdd.mapValues(findDim).values().reduce(lambda x,y:x)
        c=main_rdd.flatMapValues(cominfo).map(lambda (key,value):value).reduce(lambda x,y:x+y)
        V=matrix(0.0,(d,1))
        for j in range(d):
            V[j]=math.exp(-self.C*self.r[j]*c[j,0])    
        return d,V 
Example 10
Project: p2pool-bch   Author: amarian12   File: math.py    (license) View Source Project 6 votes vote down vote up
def erf(x):
    # save the sign of x
    sign = 1
    if x < 0:
        sign = -1
    x = abs(x)
    
    # constants
    a1 =  0.254829592
    a2 = -0.284496736
    a3 =  1.421413741
    a4 = -1.453152027
    a5 =  1.061405429
    p  =  0.3275911
    
    # A&S formula 7.1.26
    t = 1.0/(1.0 + p*x)
    y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
    return sign*y # erf(-x) = -erf(x) 
Example 11
Project: keras-frcnn   Author: yhenon   File: roi_helpers.py    (license) View Source Project 6 votes vote down vote up
def apply_regr(x, y, w, h, tx, ty, tw, th):
	try:
		cx = x + w/2.
		cy = y + h/2.
		cx1 = tx * w + cx
		cy1 = ty * h + cy
		w1 = math.exp(tw) * w
		h1 = math.exp(th) * h
		x1 = cx1 - w1/2.
		y1 = cy1 - h1/2.
		x1 = int(round(x1))
		y1 = int(round(y1))
		w1 = int(round(w1))
		h1 = int(round(h1))

		return x1, y1, w1, h1

	except ValueError:
		return x, y, w, h
	except OverflowError:
		return x, y, w, h
	except Exception as e:
		print(e)
		return x, y, w, h 
Example 12
Project: allennlp   Author: allenai   File: conditional_random_field_test.py    (license) View Source Project 6 votes vote down vote up
def test_forward_works_without_mask(self):
        log_likelihood = self.crf(self.logits, self.tags).data[0]

        # Now compute the log-likelihood manually
        manual_log_likelihood = 0.0

        # For each instance, manually compute the numerator
        # (which is just the score for the logits and actual tags)
        # and the denominator
        # (which is the log-sum-exp of the scores for the logits across all possible tags)
        for logits_i, tags_i in zip(self.logits, self.tags):
            numerator = self.score(logits_i.data, tags_i.data)
            all_scores = [self.score(logits_i.data, tags_j) for tags_j in itertools.product(range(5), repeat=3)]
            denominator = math.log(sum(math.exp(score) for score in all_scores))
            # And include them in the manual calculation.
            manual_log_likelihood += numerator - denominator

        # The manually computed log likelihood should equal the result of crf.forward.
        assert manual_log_likelihood == approx(log_likelihood) 
Example 13
Project: fem   Author: mlp6   File: GaussExc.py    (license) View Source Project 6 votes vote down vote up
def calc_gauss_amp(node_xyz, center=(0.0, 0.0, -2.0), sigma=(1.0, 1.0, 1.0),
                   amp=1.0, amp_cut=0.05, sym="qsym"):
    """calculated the Gaussian amplitude at the node

    :param node_xyz: list of x,y,z node coordinates
    :param center: list of x,y,z for Gaussian center
    :param sigma: list of x,y,z Guassian width
    :param amp: peak Gaussian source amplitude
    :param amp_cut: lower threshold (pct of max) for amplitude creating a
                    point load
    :param qsym: mesh symemetry (qsym, hsym, none)
    :returns: nodeGaussAmp - point load amplitude at the specified node
    """
    from math import pow, exp
    exp1 = pow((node_xyz[1] - center[0]) / sigma[0], 2)
    exp2 = pow((node_xyz[2] - center[1]) / sigma[1], 2)
    exp3 = pow((node_xyz[3] - center[2]) / sigma[2], 2)
    nodeGaussAmp = amp * exp(-(exp1 + exp2 + exp3))

    if (nodeGaussAmp / amp) < amp_cut:
        nodeGaussAmp = None
    else:
        nodeGaussAmp = sym_scale_amp(node_xyz, nodeGaussAmp, sym)

    return nodeGaussAmp 
Example 14
Project: numba-examples   Author: numba   File: blackscholes_numba.py    (license) View Source Project 6 votes vote down vote up
def black_scholes_numba(stockPrice, optionStrike,
                        optionYears, Riskfree, Volatility):
    callResult = np.empty_like(stockPrice)
    putResult = np.empty_like(stockPrice)

    S = stockPrice
    X = optionStrike
    T = optionYears
    R = Riskfree
    V = Volatility
    for i in range(len(S)):
        sqrtT = math.sqrt(T[i])
        d1 = (math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) / (V * sqrtT)
        d2 = d1 - V * sqrtT
        cndd1 = cnd_numba(d1)
        cndd2 = cnd_numba(d2)

        expRT = math.exp((-1. * R) * T[i])
        callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2)
        putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1))

    return callResult, putResult 
Example 15
Project: numba-examples   Author: numba   File: blackscholes_cuda.py    (license) View Source Project 6 votes vote down vote up
def black_scholes_cuda_kernel(callResult, putResult, S, X,
                       T, R, V):
    #    S = stockPrice
    #    X = optionStrike
    #    T = optionYears
    #    R = Riskfree
    #    V = Volatility
    i = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
    if i >= S.shape[0]:
        return
    sqrtT = math.sqrt(T[i])
    d1 = (math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) / (V * sqrtT)
    d2 = d1 - V * sqrtT
    cndd1 = cnd_cuda(d1)
    cndd2 = cnd_cuda(d2)

    expRT = math.exp((-1. * R) * T[i])
    callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2)
    putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1)) 
Example 16
Project: smt-for-gec   Author: cnap   File: gleu.py    (license) View Source Project 6 votes vote down vote up
def gleu(self, stats, smooth=False):
        """Compute GLEU from collected statistics obtained by call(s) to gleu_stats"""
        # smooth 0 counts for sentence-level scores
        if smooth:
            stats = [s if s != 0 else 1 for s in stats]
        if len(filter(lambda x: x == 0, stats)) > 0:
            return 0
        (c, r) = stats[:2]
        log_gleu_prec = sum([math.log(float(x) / y)
                             for x, y in zip(stats[2::2], stats[3::2])]) / 4
        for i, (x, y) in enumerate(zip(stats[2::2], stats[3::2])) :
            pass
            #print 'Precision', i+1, '=', x, '/', y, '=', 1.*x/y
#        log_gleu_prec = sum([math.log(float(x) / y)
#                             for x, y in zip(stats[2::2], stats[3::2])]) / 4

        return math.exp(min([0, 1 - float(r) / c]) + log_gleu_prec) 
Example 17
Project: MIT-Thesis   Author: alec-heif   File: rddsampler.py    (license) View Source Project 6 votes vote down vote up
def getPoissonSample(self, mean):
        # Using Knuth's algorithm described in
        # http://en.wikipedia.org/wiki/Poisson_distribution
        if mean < 20.0:
            # one exp and k+1 random calls
            l = math.exp(-mean)
            p = self._random.random()
            k = 0
            while p > l:
                k += 1
                p *= self._random.random()
        else:
            # switch to the log domain, k+1 expovariate (random + log) calls
            p = self._random.expovariate(mean)
            k = 0
            while p < 1.0:
                k += 1
                p += self._random.expovariate(mean)
        return k 
Example 18
Project: nojs   Author: chrisdickinson   File: perf_tests_results_helper.py    (license) View Source Project 6 votes vote down vote up
def GeomMeanAndStdDevFromHistogram(histogram_json):
  histogram = json.loads(histogram_json)
  # Handle empty histograms gracefully.
  if not 'buckets' in histogram:
    return 0.0, 0.0
  count = 0
  sum_of_logs = 0
  for bucket in histogram['buckets']:
    if 'high' in bucket:
      bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
    else:
      bucket['mean'] = bucket['low']
    if bucket['mean'] > 0:
      sum_of_logs += math.log(bucket['mean']) * bucket['count']
      count += bucket['count']

  if count == 0:
    return 0.0, 0.0

  sum_of_squares = 0
  geom_mean = math.exp(sum_of_logs / count)
  for bucket in histogram['buckets']:
    if bucket['mean'] > 0:
      sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count']
  return geom_mean, math.sqrt(sum_of_squares / count) 
Example 19
Project: pyqha   Author: mauropalumbo75   File: alphagruneisen.py    (license) View Source Project 6 votes vote down vote up
def c_qv2(T,omega):
    x = omega * kb1 / T 
    expx = math.exp(-x)   # exponential term
    x2 = math.pow(x,2)

    return x2*K_BOLTZMANN_RY*expx/math.pow(expx-1.0,2)


################################################################################
# 
# This function computes the thermal expansions alpha using the Gruneisein 
# parameters
# more comments to be added
# First with min0, freq and grun T-independent
#
# More ibrav types to be implemented 
Example 20
Project: pyqha   Author: mauropalumbo75   File: alphagruneisenp.py    (license) View Source Project 6 votes vote down vote up
def c_qv_python(T,omega):
    """
    This function calculates the mode contribution to the heat capacity at a given T
    and omega. A similar (faster) function should be available as C extension.
    """
    #print ("Python c_qv")
    if (T<1E-9 or omega<1E-9):
        return 0.0
    x = omega * KB1 / T 
    expx = math.exp(-x)   # exponential term
    x2 = math.pow(x,2)
    if expx>1E-3:           # compute normally
        return x2*K_BOLTZMANN_RY*expx/math.pow(expx-1.0,2)
    else:                   # Taylor series
        return K_BOLTZMANN_RY*expx* (x/math.pow(x-0.5*math.pow(x,2)+ 
        0.16666666666666667*math.pow(x,3)+0.04166666666666666667*math.pow(x,4),2))

################################################################################
# 
# If available use a c version of the function c_qv, else use the (slower)
# Python version
# 
Example 21
Project: microbiome-summer-school-2017   Author: aldro61   File: bench.py    (license) View Source Project 6 votes vote down vote up
def GS_kernel_precomp_P(str1, str2, psiDict, sigmaPos, sigmaAA, L, P):
    len_str1 = len(str1)
    len_str2 = len(str2)

    A = np.zeros((len_str1, len_str2))
    for i in xrange(len_str1):
        for j in xrange(len_str2):
            try:
                A[i, j] = psiDict[str1[i], str2[j]]
            except:
                if str1[i] != str2[j]:
                    A[i, j] = 4.0
    A /= -2.0 * (sigmaAA ** 2.0)
    A = np.exp(A)

    B = np.zeros((len_str1, len_str2))
    for i in xrange(len_str1):
        for j in xrange(len_str2):
            tmp = 1.0
            for l in xrange(L):
                if i + l < len_str1 and j + l < len_str2:
                    tmp *= A[i + l, j + l]
                    B[i, j] += tmp

    return np.sum(P * B) 
Example 22
Project: Computer-graphics   Author: Panda-Lewandowski   File: lab10.py    (license) View Source Project 6 votes vote down vote up
def __init__(self):
        QtWidgets.QWidget.__init__(self)
        uic.loadUi("window.ui", self)
        self.scene = QGraphicsScene(0, 0, 711, 601)
        self.scene.win = self
        self.view.setScene(self.scene)
        self.image = QImage(710, 600, QImage.Format_Alpha8)
        self.image.fill(black)
        self.pen = QPen(black)
        self.draw.clicked.connect(lambda: draw(self))
        self.dial_x.valueChanged.connect(lambda: draw(self))
        self.dial_y.valueChanged.connect(lambda: draw(self))
        self.dial_z.valueChanged.connect(lambda: draw(self))
        self.funcs.addItem("cos(x) * sin(z)")
        self.funcs.addItem("2 * cos(x * z)")
        self.funcs.addItem("exp(sin(sqrt(x^2 + z^2)))")
        self.funcs.addItem("x^2 / 20 + z^2 / 20")
        self.funcs.addItem("|sin(x) * sin(z)|") 
Example 23
Project: DHP   Author: YuhangSong   File: suppor_lib.py    (license) View Source Project 6 votes vote down vote up
def get_transfered_data(lon, lat, theta, data_frame, max_distance_on_position=1.0*math.pi, max_distance_on_degree=180.0, final_discount_to=10**(-4)):

    distance_on_position = haversine(lon1=lon,
                                     lat1=lat,
                                     lon2=data_frame.p[0],
                                     lat2=data_frame.p[1])

    distance_on_degree = abs(theta - data_frame.theta)
    if(distance_on_degree>180):
        distance_on_degree = distance_on_degree - 180

    thegma_2_on_position = -0.5*(max_distance_on_position**2)/math.log(final_discount_to)
    thegma_2_on_degree = -0.5*(max_distance_on_degree**2)/math.log(final_discount_to)

    '''guassion trustworthy transfer'''
    prob = 1.0 * math.exp(-1.0 / 2.0 * (distance_on_position**2) / (thegma_2_on_position)) * math.exp(-1.0 / 2.0 * (distance_on_degree**2) / (thegma_2_on_degree))

    return prob 
Example 24
Project: bandit-nmt   Author: khanhptnk   File: Bleu.py    (license) View Source Project 6 votes vote down vote up
def _compute_bleu(p, len_pred, len_gold, smooth):
    # Brevity penalty.
    log_brevity = 1 - max(1, (len_gold + smooth) / (len_pred + smooth))
    log_score = 0
    ngrams = len(p) - 1
    for n in range(1, ngrams + 1):
        if p[n][1] > 0:
            if p[n][0] == 0:
                p[n][0] = 1e-16
            log_precision = math.log((p[n][0] + smooth) / (p[n][1] + smooth))
            log_score += log_precision
    log_score /= ngrams
    return math.exp(log_score + log_brevity)


# Calculate BLEU of prefixes of pred. 
Example 25
Project: drl.pth   Author: seba-1511   File: policies.py    (license) View Source Project 5 votes vote down vote up
def __init__(self, model, action_size=1, init_value=0.0, *args, **kwargs):
        super(DiagonalGaussianPolicy, self).__init__(model, *args, **kwargs)
        self.init_value = init_value
        self.logstd = th.zeros((1, action_size)) + self.init_value
        self.logstd = P(self.logstd)
        self.halflog2pie = V(T([2 * pi * exp(1)])) * 0.5
        self.halflog2pi = V(T([2.0 * pi])) * 0.5
        self.pi = V(T([pi])) 
Example 26
Project: drl.pth   Author: seba-1511   File: policies.py    (license) View Source Project 5 votes vote down vote up
def _normal(self, x, mean, logstd):
        std = logstd.exp()
        std_sq = std.pow(2)
        a = (-(x - mean).pow(2) / (2 * std_sq)).exp()
        b = (2 * std_sq * self.pi.expand_as(std_sq)).sqrt()
        return a / b 
Example 27
Project: drl.pth   Author: seba-1511   File: policies.py    (license) View Source Project 5 votes vote down vote up
def forward(self, x, *args, **kwargs):
        action = super(DiagonalGaussianPolicy, self).forward(x, *args, **kwargs)
        size = action.raw.size()
        std = self.logstd.exp().expand_as(action.raw)
        value = action.raw + std * V(th.randn(size))
        value = value.detach()
        action.value = value
#        action.logstd = self.logstd.clone()
        action.logstd = self.logstd
        action.prob = lambda: self._normal(value, action.raw, action.logstd)
        action.entropy = action.logstd + self.halflog2pie
        var = std.pow(2)
        action.compute_log_prob = lambda a: (- ((a - action.raw).pow(2) / (2.0 * var)) - self.halflog2pi - action.logstd).mean(1)
        action.log_prob = action.compute_log_prob(value)
        return action 
Example 28
Project: facebook-message-analysis   Author: szheng17   File: conversation.py    (MIT License) View Source Project 5 votes vote down vote up
def get_user_to_damped_n_messages(self, dt_max, alpha):
        """
        Maps each user to the number of messages before a reference datetime, 
        where each message count is exponentially damped by a constant times
        the difference between the reference datetime and the datetime of the
        message.
        
        Args:
            dt_max: A datetime representing the max datetime of messages
                to consider.
            alpha: A nonnegative float representing the damping factor.

        Returns:
            user_to_damped_n_messages: A dict mapping each user in
                self.users_union to the damped number of messages by that user
                before dt_max. The contribution of a message is a float equal
                to exp(-alpha * t), where t is the difference in days between
                dt_max and the datetime of the message.
        """
        if alpha < 0:
            raise ValueError('Must have alpha >= 0')
        try:
            # Only keep messages with datetimes <= dt_max
            filtered = self.filter_by_datetime(end_dt=dt_max)
        except EmptyConversationError:
            # Map all users to 0 if dt_max occurs before all messages
            return self.get_user_to_message_statistic(lambda x: 0)
        damped_message_count = lambda x: self.exp_damped_day_difference(dt_max, x.timestamp, alpha)
        user_to_damped_n_messages = filtered.get_user_to_message_statistic(damped_message_count)
        return user_to_damped_n_messages 
Example 29
Project: facebook-message-analysis   Author: szheng17   File: conversation.py    (MIT License) View Source Project 5 votes vote down vote up
def damped_n_messages(self, dt_max, alpha):
        """
        Computes the sum of damped message counts before a reference datetime,
        where each damped message count is exponentially damped by a constant
        times the difference between the reference datetime and the datetime of
        the message.

        Args:
            dt_max: A datetime representing the max datetime of messages to
                consider.
            alpha: A nonnegative float representing the damping factor.

        Returns:
            damped_n_messages_total: A float equal to the sum of damped message
                counts before dt_max. The contribution of a message is
                exp(-alpha * t), where t is the difference in days between
                dt_max and the datetime of the message.
        """
        if alpha < 0:
            raise ValueError('Must have alpha >= 0')
        try:
            # Only keep messages with datetimes <= dt_max
            filtered = self.filter_by_datetime(end_dt=dt_max)
        except EmptyConversationError:
            # dt_max occurs before all messages
            return 0
        damped_message_count = lambda x: self.exp_damped_day_difference(dt_max, x.timestamp, alpha)
        damped_n_messages_total = filtered.sum_conversation_message_statistic(damped_message_count)
        return damped_n_messages_total 
Example 30
Project: Causality   Author: vcla   File: causal_grammar_summerdata.py    (MIT License) View Source Project 5 votes vote down vote up
def weibull(t1, t2, lam, k):
	return 1 - exp(pow(t1 / lam,k) - pow(t2 / lam, k)) 
Example 31
Project: python-   Author: secondtonone1   File: random.py    (license) View Source Project 5 votes vote down vote up
def lognormvariate(self, mu, sigma):
        """Log normal distribution.

        If you take the natural logarithm of this distribution, you'll get a
        normal distribution with mean mu and standard deviation sigma.
        mu can have any value, and sigma must be greater than zero.

        """
        return _exp(self.normalvariate(mu, sigma))

## -------------------- exponential distribution -------------------- 
Example 32
Project: zipline-chinese   Author: zhanghan1990   File: period.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def calculate_max_drawdown(self):
        compounded_returns = []
        cur_return = 0.0
        for r in self.algorithm_returns:
            try:
                cur_return += math.log(1.0 + r)
            # this is a guard for a single day returning -100%, if returns are
            # greater than -1.0 it will throw an error because you cannot take
            # the log of a negative number
            except ValueError:
                log.debug("{cur} return, zeroing the returns".format(
                    cur=cur_return))
                cur_return = 0.0
            compounded_returns.append(cur_return)

        cur_max = None
        max_drawdown = None
        for cur in compounded_returns:
            if cur_max is None or cur > cur_max:
                cur_max = cur

            drawdown = (cur - cur_max)
            if max_drawdown is None or drawdown < max_drawdown:
                max_drawdown = drawdown

        if max_drawdown is None:
            return 0.0

        return 1.0 - math.exp(max_drawdown) 
Example 33
Project: kinect-2-libras   Author: inessadl   File: random.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def lognormvariate(self, mu, sigma):
        """Log normal distribution.

        If you take the natural logarithm of this distribution, you'll get a
        normal distribution with mean mu and standard deviation sigma.
        mu can have any value, and sigma must be greater than zero.

        """
        return _exp(self.normalvariate(mu, sigma))

## -------------------- exponential distribution -------------------- 
Example 34
Project: Lattice-Based-Signatures   Author: krishnacharya   File: Gaussian_sampling.py    (license) View Source Project 5 votes vote down vote up
def Bernoulli_cosh(x):
	'''
	Sample according to 1/cosh(x/f)
	Extends corollary 6.4 from BLISS paper
	'''
	powx = abs(x)
	while(True):
		A = Bernoulli_exp(powx) # each iteration this changes as randomness comes from Bernoulli_exp exp(-|x|/f)
		if(A):
			return 1
		B = Bernoulli_rv(0.5) or Bernoulli_exp(powx)  # has to be seperate Bernoulli_exp(powx) call as we dont want dependence on A
		if not(B):			
			return 0 
Example 35
Project: Lattice-Based-Signatures   Author: krishnacharya   File: BLISS.py    (license) View Source Project 5 votes vote down vote up
def Sign(**kwargs):
	'''
	Algorithm 1, Pg 12 of BLISS paper
	o/p:
	z,c 
	'''
	msg, A, S, m, n, sd, q, M, kappa = kwargs['msg'], kwargs['A'], kwargs['S'], kwargs['m'], kwargs['n'], kwargs['sd'], kwargs['q'], kwargs['M'], kwargs['kappa']
	m_bar = m + n
	D = DiscreteGaussianDistributionLatticeSampler(ZZ**m_bar, sd)
	count = 0
	while(True):
		y = np.array(D()) # m' x 1 
 		reduced_Ay = util.vector_to_Zq(np.matmul(A, y), 2*q)
		c = hash_iterative(np.array_str(reduced_Ay) + msg, n, kappa) # still not the hash but this is test run		
		b = util.crypt_secure_randint(0, 1)
		Sc = np.matmul(S,c)
		z = y + ((-1)**b) * Sc
		try:			
			exp_term = exp(float(Sc.dot(Sc)) / (2*sd**2))
			cosh_term = np.cosh(float(z.dot(Sc)) / (sd**2))
			val = exp_term / (cosh_term * M)				
		except OverflowError:
			print "OF"			
			continue			
		if(random.random() < min(val, 1.0)):
			break
		if(count > 10): # beyond 4 rejection sampling iterations are not expected in general 
			raise ValueError("The number of rejection sampling iterations are more than expected")
		count += 1								
	return z, c 
Example 36
Project: Lattice-Based-Signatures   Author: krishnacharya   File: lyu12vK.py    (license) View Source Project 5 votes vote down vote up
def Sign(**kwargs):
	'''
		i/p:
		msg: string, which the sender wants to brodcast
		A  : numpy array, Verification Key dimension nxm
		S  : numpy array, Signing key dimension mxk

		o/p:
		(z,c) : signature		
	'''	
	msg, A, S, q, n, m, k, d, sd, M = kwargs['msg'],kwargs['A'],kwargs['S'],kwargs['q'],kwargs['n'],kwargs['m'],kwargs['k'],kwargs['d'],kwargs['sd'],kwargs['M']	
	D = DiscreteGaussianDistributionLatticeSampler(ZZ**m, sd)
	count = 0
	while(True):
		y = np.array(D()) # discrete point in Zq^m
		c = util.hash_to_baseb(util.vector_to_Zq(np.matmul(A,y), q), msg, 3, k)  # 3 because we want b/w 0,1,2 small coefficients in Zq
		Sc = np.matmul(S,c)
		z = Sc + y #notice we didnt reduce (mod q)		
		try:					
			pxe = float(-2*z.dot(Sc) + Sc.dot(Sc))
			val = exp(pxe / (2*sd**2)) / M							
		except OverflowError:
			print "OF"			
			continue			
		if(random.random() < min(val, 1.0)):
			break
		if(count > 4): # beyond 4 rejection sampling iterations are not expected in general 
			raise ValueError("The number of rejection sampling iterations are more than expected")
		count += 1								
	return z, c 
Example 37
Project: git-of-theseus   Author: erikbern   File: survival_plot.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def fit(k):
    loss = 0.0
    for total_n, deltas in all_deltas:
        total_k = total_n
        P = 1.0
        for t in sorted(deltas.keys()):
            delta_k, delta_n = deltas[t]
            pred = total_n * math.exp(-k * t / YEAR)
            loss += (total_n * P - pred)**2
            P *= 1 + delta_k / total_n
            total_k += delta_k
            total_n += delta_n
    print(k, loss)
    return loss 
Example 38
Project: simple_rl   Author: david-abel   File: LinearQLearnerAgentClass.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def _rbf(x):
    return math.exp(-(x)**2) 
Example 39
Project: pogom-linux   Author: PokeHunterProject   File: pgoapi.py    (MIT License) View Source Project 5 votes vote down vote up
def _login(self, auth_provider, position):
        self.log.info('Attempting login: {}'.format(auth_provider.username))
        consecutive_fails = 0

        while not auth_provider.user_login():
            sleep_t = min(math.exp(consecutive_fails / 1.7), 5 * 60)
            self.log.info('Login failed, retrying in {:.2f} seconds'.format(sleep_t))
            consecutive_fails += 1
            time.sleep(sleep_t)
            if consecutive_fails == 5:
                raise AuthException('Login failed five times.')

        self.log.info('Login successful: {}'.format(auth_provider.username)) 
Example 40
Project: robocup-soccer   Author: kengz   File: text.py    (MIT License) View Source Project 5 votes vote down vote up
def score(self, ciphertext, code):
        """Score is product of word scores, unigram scores, and bigram scores.
        This can get very small, so we use logs and exp."""
        text = decode(ciphertext, code)
        logP = (sum([log(self.Pwords[word]) for word in words(text)]) +
                sum([log(self.P1[c]) for c in text]) +
                sum([log(self.P2[b]) for b in bigrams(text)]))
        return exp(logP) 
Example 41
Project: robocup-soccer   Author: kengz   File: search.py    (MIT License) View Source Project 5 votes vote down vote up
def exp_schedule(k=20, lam=0.005, limit=100):
    "One possible schedule function for simulated annealing"
    return lambda t: if_(t < limit, k * math.exp(-lam * t), 0) 
Example 42
Project: robocup-soccer   Author: kengz   File: search.py    (MIT License) View Source Project 5 votes vote down vote up
def simulated_annealing(problem, schedule=exp_schedule()):
    "[Fig. 4.5]"
    current = Node(problem.initial)
    for t in xrange(sys.maxint):
        T = schedule(t)
        if T == 0:
            return current
        next = random.choice(expand(node. problem))
        delta_e = next.path_cost - current.path_cost
        if delta_e > 0 or probability(math.exp(delta_e/T)):
            current = next 
Example 43
Project: NumpyDL   Author: oujago   File: mlp_bp.py    (license) View Source Project 5 votes vote down vote up
def squash(self, total_net_input):
        return 1 / (1 + math.exp(-total_net_input))

    # Determine how much the neuron's total input has to change to move closer to the expected output
    #
    # Now that we have the partial derivative of the error with respect to the output (?E/?y?) and
    # the derivative of the output with respect to the total net input (dy?/dz?) we can calculate
    # the partial derivative of the error with respect to the total net input.
    # This value is also known as the delta (?) [1]
    # ? = ?E/?z? = ?E/?y? * dy?/dz?
    # 
Example 44
Project: machine-learning   Author: zzw0929   File: backup_hot.py    (license) View Source Project 5 votes vote down vote up
def  hill_climbing_first_choice_simulated_annealing(status):
    '''??????????????????????????????????????

    ??????????
    '''
    global chess_status_count, temperature

    pos = [(x, y) for x in range(8) for y in range(8)]
    random.shuffle(pos)
    for col, row in pos:
        if status[col] == row:
            continue
        chess_status_count += 1
        status_copy = list(status)
        status_copy[col] = row
        delta = get_num_of_conglict(status) - get_num_of_conglict(status_copy)
        # ??
        if temperature > 0:
            temperature -= 0.0001
        if delta > 0:
            status[col] = row
            return status
        elif delta < 0 and temperature != 0:
            probability = math.exp(delta / temperature)
            random_num = random.random()
            if random_num < probability:
                status[col] = row
                return status
    return status 
Example 45
Project: DL2W   Author: gauravmm   File: bounding_box.py    (license) View Source Project 5 votes vote down vote up
def apply_regression(self, x_reg, y_reg, width_reg, height_reg):
        """Apply offsets to bounding box."""
        self.x_center += x_reg * self.width
        self.y_center += y_reg * self.height
        self.width *= math.exp(width_reg)
        self.height *= math.exp(height_reg)

        self.x_min = self.x_center - (self.width / 2.0)
        self.y_min = self.y_center - (self.height / 2.0)
        self.x_max = self.x_center + (self.width / 2.0)
        self.y_max = self.y_center + (self.height / 2.0)

        return self 
Example 46
Project: moloco   Author: jimmyzliu   File: moloco.py    (GNU General Public License v3.0) View Source Project 5 votes vote down vote up
def wakefield_bf(beta,se,p,w = 0.1):
  # calculate Wakefield bayes factor
  z = stats.norm.isf(p/2)
  r = w/(se**2 + w)
  bf = math.sqrt(1-r) * math.exp(z**2/2*r)
  return bf 
Example 47
Project: vad   Author: bond005   File: vad.py    (GNU General Public License v3.0) View Source Project 5 votes vote down vote up
def mel_to_hertz(frequency):
    return 700.0 * (math.exp(frequency / 1125.0) - 1.0) 
Example 48
Project: pyrsss   Author: butala   File: v6300.py    (MIT License) View Source Project 5 votes vote down vote up
def k1(Ti, exp=math.exp):
    """[cm^3 / s]"""
    return 3.23e-12 * exp(3.72/(Ti/300) - 1.87/(Ti/300)**2) 
Example 49
Project: pyrsss   Author: butala   File: v6300.py    (MIT License) View Source Project 5 votes vote down vote up
def k2(Ti, exp=math.exp):
    """[cm^3 / s]"""
    return 2.78e-13 * exp(2.07/(Ti/300) - 0.61/(Ti/300)**2) 
Example 50
Project: pyrsss   Author: butala   File: v6300.py    (MIT License) View Source Project 5 votes vote down vote up
def k3(Tn, exp=math.exp):
    """[cm^3 / s]"""
    return 2.0e-11 * exp(111.8/Tn)