Python math.fsum() Examples

The following are 30 code examples of math.fsum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module math , or try the search function .
Example #1
Source File: test_random.py    From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 6 votes vote down vote up
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
    # Reflection to right half of complex plane
    if z < 0.5:
        return pi / sin(pi*z) / gamma(1.0-z)
    # Lanczos approximation with g=7
    az = z + (7.0 - 0.5)
    return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
        0.9999999999995183,
        676.5203681218835 / z,
        -1259.139216722289 / (z+1.0),
        771.3234287757674 / (z+2.0),
        -176.6150291498386 / (z+3.0),
        12.50734324009056 / (z+4.0),
        -0.1385710331296526 / (z+5.0),
        0.9934937113930748e-05 / (z+6.0),
        0.1659470187408462e-06 / (z+7.0),
    ]) 
Example #2
Source File: commits.py    From Jandroid with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def main():
  for repository in _REPOSITORIES:
    commit_times = CommitTimes(repository, _REVISION_COUNT)

    commit_durations = []
    for time1, time2 in Pairwise(commit_times):
      #if not (IsWeekday(time1) and IsWeekday(time2)):
      #  continue
      commit_durations.append((time1 - time2).total_seconds() / 60.)
    commit_durations.sort()

    print 'REPOSITORY:', repository
    print 'Start Date:', min(commit_times), 'PDT'
    print '  End Date:', max(commit_times), 'PDT'
    print '  Duration:', max(commit_times) - min(commit_times)
    print '         n:', len(commit_times)

    for p in (0.25, 0.50, 0.90):
      percentile = Percentile(commit_durations, p)
      print '%3d%% commit duration:' % (p * 100), '%6.1fm' % percentile
    mean = math.fsum(commit_durations) / len(commit_durations)
    print 'Mean commit duration:', '%6.1fm' % mean
    print 
Example #3
Source File: commits.py    From Jandroid with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def main():
  for repository in _REPOSITORIES:
    commit_times = CommitTimes(repository, _REVISION_COUNT)

    commit_durations = []
    for time1, time2 in Pairwise(commit_times):
      #if not (IsWeekday(time1) and IsWeekday(time2)):
      #  continue
      commit_durations.append((time1 - time2).total_seconds() / 60.)
    commit_durations.sort()

    print 'REPOSITORY:', repository
    print 'Start Date:', min(commit_times), 'PDT'
    print '  End Date:', max(commit_times), 'PDT'
    print '  Duration:', max(commit_times) - min(commit_times)
    print '         n:', len(commit_times)

    for p in (0.25, 0.50, 0.90):
      percentile = Percentile(commit_durations, p)
      print '%3d%% commit duration:' % (p * 100), '%6.1fm' % percentile
    mean = math.fsum(commit_durations) / len(commit_durations)
    print 'Mean commit duration:', '%6.1fm' % mean
    print 
Example #4
Source File: test_random.py    From gcblue with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
    # Reflection to right half of complex plane
    if z < 0.5:
        return pi / sin(pi*z) / gamma(1.0-z)
    # Lanczos approximation with g=7
    az = z + (7.0 - 0.5)
    return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
        0.9999999999995183,
        676.5203681218835 / z,
        -1259.139216722289 / (z+1.0),
        771.3234287757674 / (z+2.0),
        -176.6150291498386 / (z+3.0),
        12.50734324009056 / (z+4.0),
        -0.1385710331296526 / (z+5.0),
        0.9934937113930748e-05 / (z+6.0),
        0.1659470187408462e-06 / (z+7.0),
    ]) 
Example #5
Source File: commits.py    From Jandroid with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def main():
  for repository in _REPOSITORIES:
    commit_times = CommitTimes(repository, _REVISION_COUNT)

    commit_durations = []
    for time1, time2 in Pairwise(commit_times):
      #if not (IsWeekday(time1) and IsWeekday(time2)):
      #  continue
      commit_durations.append((time1 - time2).total_seconds() / 60.)
    commit_durations.sort()

    print 'REPOSITORY:', repository
    print 'Start Date:', min(commit_times), 'PDT'
    print '  End Date:', max(commit_times), 'PDT'
    print '  Duration:', max(commit_times) - min(commit_times)
    print '         n:', len(commit_times)

    for p in (0.25, 0.50, 0.90):
      percentile = Percentile(commit_durations, p)
      print '%3d%% commit duration:' % (p * 100), '%6.1fm' % percentile
    mean = math.fsum(commit_durations) / len(commit_durations)
    print 'Mean commit duration:', '%6.1fm' % mean
    print 
Example #6
Source File: test_random.py    From Fluid-Designer with GNU General Public License v3.0 6 votes vote down vote up
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
    # Reflection to right half of complex plane
    if z < 0.5:
        return pi / sin(pi*z) / gamma(1.0-z)
    # Lanczos approximation with g=7
    az = z + (7.0 - 0.5)
    return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
        0.9999999999995183,
        676.5203681218835 / z,
        -1259.139216722289 / (z+1.0),
        771.3234287757674 / (z+2.0),
        -176.6150291498386 / (z+3.0),
        12.50734324009056 / (z+4.0),
        -0.1385710331296526 / (z+5.0),
        0.9934937113930748e-05 / (z+6.0),
        0.1659470187408462e-06 / (z+7.0),
    ]) 
Example #7
Source File: solution.py    From PyDDM with MIT License 6 votes vote down vote up
def __init__(self, pdf_corr, pdf_err, model, conditions, pdf_undec=None, pdf_evolution=None):
        """Create a Solution object from the results of a model
        simulation.

        Constructor takes four arguments.

            - `pdf_corr` - a size N numpy ndarray describing the correct portion of the joint pdf
            - `pdf_err` - a size N numpy ndarray describing the error portion of the joint pdf
            - `model` - the Model object used to generate `pdf_corr` and `pdf_err`
            - `conditions` - a dictionary of condition names/values used to generate the solution
            - `pdf_undec` - a size M numpy ndarray describing the final state of the simulation.  None if unavailable.
            - `pdf_evolution` - a size M-by-N numpy ndarray describing the state of the simulation at each time step. None if unavailable.
        """
        self.model = copy.deepcopy(model) # TODO this could cause a memory leak if I forget it is there...
        self.corr = pdf_corr 
        self.err = pdf_err
        self.undec = pdf_undec
        self.evolution = pdf_evolution
        # Correct floating point errors to always get prob <= 1
        if fsum(self.corr.tolist() + self.err.tolist()) > 1:
            self.corr /= 1.00000000001
            self.err /= 1.00000000001
        self.conditions = conditions 
Example #8
Source File: test_random.py    From oss-ftp with MIT License 6 votes vote down vote up
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
    # Reflection to right half of complex plane
    if z < 0.5:
        return pi / sin(pi*z) / gamma(1.0-z)
    # Lanczos approximation with g=7
    az = z + (7.0 - 0.5)
    return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
        0.9999999999995183,
        676.5203681218835 / z,
        -1259.139216722289 / (z+1.0),
        771.3234287757674 / (z+2.0),
        -176.6150291498386 / (z+3.0),
        12.50734324009056 / (z+4.0),
        -0.1385710331296526 / (z+5.0),
        0.9934937113930748e-05 / (z+6.0),
        0.1659470187408462e-06 / (z+7.0),
    ]) 
Example #9
Source File: test_random.py    From BinderFilter with MIT License 6 votes vote down vote up
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
    # Reflection to right half of complex plane
    if z < 0.5:
        return pi / sin(pi*z) / gamma(1.0-z)
    # Lanczos approximation with g=7
    az = z + (7.0 - 0.5)
    return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
        0.9999999999995183,
        676.5203681218835 / z,
        -1259.139216722289 / (z+1.0),
        771.3234287757674 / (z+2.0),
        -176.6150291498386 / (z+3.0),
        12.50734324009056 / (z+4.0),
        -0.1385710331296526 / (z+5.0),
        0.9934937113930748e-05 / (z+6.0),
        0.1659470187408462e-06 / (z+7.0),
    ]) 
Example #10
Source File: test_random.py    From ironpython2 with Apache License 2.0 6 votes vote down vote up
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
    # Reflection to right half of complex plane
    if z < 0.5:
        return pi / sin(pi*z) / gamma(1.0-z)
    # Lanczos approximation with g=7
    az = z + (7.0 - 0.5)
    return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
        0.9999999999995183,
        676.5203681218835 / z,
        -1259.139216722289 / (z+1.0),
        771.3234287757674 / (z+2.0),
        -176.6150291498386 / (z+3.0),
        12.50734324009056 / (z+4.0),
        -0.1385710331296526 / (z+5.0),
        0.9934937113930748e-05 / (z+6.0),
        0.1659470187408462e-06 / (z+7.0),
    ]) 
Example #11
Source File: quadratic_program.py    From qiskit-aqua with Apache License 2.0 6 votes vote down vote up
def _quadratic_constraints(self) -> bool:
        feasible = True
        for quad_cst in self._src.quadratic_constraints:
            const1, lin1 = self._linear_expression(quad_cst.linear)
            const2, lin2, quadratic = self._quadratic_expression(quad_cst.quadratic)
            rhs = -fsum([-quad_cst.rhs] + const1 + const2)
            linear = lin1.coefficients + lin2.coefficients

            if quadratic.coefficients.nnz > 0:
                self._dst.quadratic_constraint(name=quad_cst.name, linear=linear,
                                               quadratic=quadratic.coefficients,
                                               sense=quad_cst.sense, rhs=rhs)
            elif linear.nnz > 0:
                name = quad_cst.name
                lin_names = set(lin.name for lin in self._dst.linear_constraints)
                while name in lin_names:
                    name = '_' + name
                self._dst.linear_constraint(name=name, linear=linear, sense=quad_cst.sense, rhs=rhs)
            else:
                if not self._feasible(quad_cst.sense, rhs):
                    logger.warning('constraint %s is infeasible due to substitution', quad_cst.name)
                    feasible = False

        return feasible 
Example #12
Source File: ratio_hq.py    From eduActiv8 with GNU General Public License v3.0 6 votes vote down vote up
def draw_minicircles(self):
        ttl = int(fsum(self.numbers))
        angle_step = 2 * pi / ttl
        angle_start = -pi / 2
        r = self.size // 2.5
        r2 = self.size // 17
        # manually draw the arc - the 100% width of the arc does not impress

        for i in range(ttl):
            # angle for line
            angle = angle_start + angle_step * i

            # Calculate the x,y for the end point
            x = r * cos(angle) + self.center[0]
            y = r * sin(angle) + self.center[1]
            if i < self.numbers[0]:
                pygame.draw.circle(self.canvas, self.color1, [int(x), int(y)], r2, 0)
                pygame.draw.circle(self.canvas, self.border_color1, [int(x), int(y)], r2, 2)
            elif i < self.numbers[0] + self.numbers[1]:
                pygame.draw.circle(self.canvas, self.color2, [int(x), int(y)], r2, 0)
                pygame.draw.circle(self.canvas, self.border_color2, [int(x), int(y)], r2, 2)
            else:
                pygame.draw.circle(self.canvas, self.color3, [int(x), int(y)], r2, 0)
                pygame.draw.circle(self.canvas, self.border_color3, [int(x), int(y)], r2, 2)
            # Draw the line from the self.center to the calculated end point 
Example #13
Source File: test_random.py    From ironpython3 with Apache License 2.0 6 votes vote down vote up
def gamma(z, sqrt2pi=(2.0*pi)**0.5):
    # Reflection to right half of complex plane
    if z < 0.5:
        return pi / sin(pi*z) / gamma(1.0-z)
    # Lanczos approximation with g=7
    az = z + (7.0 - 0.5)
    return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([
        0.9999999999995183,
        676.5203681218835 / z,
        -1259.139216722289 / (z+1.0),
        771.3234287757674 / (z+2.0),
        -176.6150291498386 / (z+3.0),
        12.50734324009056 / (z+4.0),
        -0.1385710331296526 / (z+5.0),
        0.9934937113930748e-05 / (z+6.0),
        0.1659470187408462e-06 / (z+7.0),
    ]) 
Example #14
Source File: mesh_tetra_test.py    From meshplex with GNU General Public License v3.0 6 votes vote down vote up
def test_toy_geometric():
    mesh = meshplex.read(this_dir / "meshes" / "toy.vtk")

    mesh = meshplex.MeshTetra(mesh.node_coords, mesh.cells["nodes"])

    run(
        mesh,
        volume=9.3875504672601107,
        convol_norms=[0.20175742659663737, 0.0093164692200450819],
        ce_ratio_norms=[13.497977312281323, 0.42980191511570004],
        cellvol_norms=[0.091903119589148916, 0.0019959463063558944],
        tol=1.0e-6,
    )

    cc = mesh.cell_circumcenters
    cc_norm_2 = fsum(cc.flat)
    cc_norm_inf = max(cc.flat)
    assert abs(cc_norm_2 - 1103.7038287583791) < 1.0e-12
    assert abs(cc_norm_inf - 3.4234008596539662) < 1.0e-12 
Example #15
Source File: temporal_demo.py    From two-stream-pytorch with MIT License 5 votes vote down vote up
def softmax(x):
    y = [math.exp(k) for k in x]
    sum_y = math.fsum(y)
    z = [k/sum_y for k in y]

    return z 
Example #16
Source File: build_committee_dataset.py    From captionGAN with MIT License 5 votes vote down vote up
def BLEUscore(candidate, references, weights):
  p_ns = [BLEU.modified_precision(candidate, references, i) for i, _ in enumerate(weights, start=1)]
  if all([x > 0 for x in p_ns]):
      s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns))
      bp = BLEU.brevity_penalty(candidate, references)
      return bp * math.exp(s)
  else: # this is bad
      return 0 
Example #17
Source File: spatial_demo.py    From two-stream-pytorch with MIT License 5 votes vote down vote up
def softmax(x):
    y = [math.exp(k) for k in x]
    sum_y = math.fsum(y)
    z = [k/sum_y for k in y]

    return z 
Example #18
Source File: kmeans.py    From modernpython with MIT License 5 votes vote down vote up
def mean(data: Iterable[float]) -> float:
    'Accurate arithmetic mean'
    data = list(data)
    return fsum(data) / len(data) 
Example #19
Source File: nlp.py    From tatk with Apache License 2.0 5 votes vote down vote up
def sentence_bleu_4(hyp, refs, weights=[0.25, 0.25, 0.25, 0.25]):
    # input : single sentence, multiple references
    count = [0, 0, 0, 0]
    clip_count = [0, 0, 0, 0]
    r = 0
    c = 0

    for i in range(4):
        hypcnts = Counter(ngrams(hyp, i + 1))
        cnt = sum(hypcnts.values())
        count[i] += cnt

        # compute clipped counts
        max_counts = {}
        for ref in refs:
            refcnts = Counter(ngrams(ref, i + 1))
            for ng in hypcnts:
                max_counts[ng] = max(max_counts.get(ng, 0), refcnts[ng])
        clipcnt = dict((ng, min(count, max_counts[ng])) \
                       for ng, count in hypcnts.items())
        clip_count[i] += sum(clipcnt.values())

    bestmatch = [1000, 1000]
    for ref in refs:
        if bestmatch[0] == 0:
            break
        diff = abs(len(ref) - len(hyp))
        if diff < bestmatch[0]:
            bestmatch[0] = diff
            bestmatch[1] = len(ref)
    r = bestmatch[1]
    c = len(hyp)

    p0 = 1e-7
    bp = math.exp(-abs(1.0 - float(r) / float(c + p0)))

    p_ns = [float(clip_count[i]) / float(count[i] + p0) + p0 for i in range(4)]
    s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns) if p_n)
    bleu_hyp = bp * math.exp(s)

    return bleu_hyp 
Example #20
Source File: tdlm_model.py    From topically-driven-language-model with Apache License 2.0 5 votes vote down vote up
def sample(self, probs, temperature):
        if temperature == 0:
            return np.argmax(probs)

        probs = probs.astype(np.float64) #convert to float64 for higher precision
        probs = np.log(probs) / temperature
        probs = np.exp(probs) / math.fsum(np.exp(probs))
        return np.argmax(np.random.multinomial(1, probs, 1))

    #generate a sentence given conv_hidden 
Example #21
Source File: symbol_regression.py    From Artificial-Intelligence-with-Python with MIT License 5 votes vote down vote up
def eval_func(individual, points):
    # Transform the tree expression in a callable function
    func = toolbox.compile(expr=individual)

    # Evaluate the mean squared error
    mse = ((func(x) - (2 * x**3 - 3 * x**2 + 4 * x - 1))**2 for x in points)

    return math.fsum(mse) / len(points),

# Function to create the toolbox 
Example #22
Source File: text.py    From vortessence with GNU General Public License v2.0 5 votes vote down vote up
def partition_width(self, widths):
        """Determines if the widths are over the maximum available space, and if so shrinks them"""
        if math.fsum(widths) + (len(widths) - 1) > self.max_width:
            remainder = (int(math.fsum(widths)) + (len(widths) - 1)) - self.max_width
            # Take from the largest column first, eventually evening out
            for i in range(remainder):
                col_index = widths.index(max(widths))
                widths[col_index] -= 1
        return widths 
Example #23
Source File: plot.py    From quadpy with GNU General Public License v3.0 5 votes vote down vote up
def plot_disks(plt, pts, weights, total_area):
    """Plot a circles at quadrature points according to weights.
    """
    flt = numpy.vectorize(float)
    pts = flt(pts)
    weights = flt(weights)
    radii = numpy.sqrt(abs(weights) / math.fsum(weights) * total_area / math.pi)
    colors = [
        # use matplotlib 2.0's color scheme
        "tab:blue" if weight >= 0 else "tab:red"
        for weight in weights
    ]
    _plot_disks_helpers(plt, pts, radii, colors) 
Example #24
Source File: plot.py    From quadpy with GNU General Public License v3.0 5 votes vote down vote up
def plot_disks_1d(plt, pts, weights, total_area):
    """Plot a circles at quadrature points according to weights. The diameters
    sum up to the total area.
    """
    radii = 0.5 * abs(weights) / math.fsum(weights) * total_area
    colors = ["tab:blue" if weight >= 0 else "tab:red" for weight in weights]
    _plot_disks_helpers(plt, pts, radii, colors) 
Example #25
Source File: rank_based.py    From prioritized-experience-replay with MIT License 5 votes vote down vote up
def build_distributions(self):
        """
        preprocess pow of rank
        (rank i) ^ (-alpha) / sum ((rank i) ^ (-alpha))
        :return: distributions, dict
        """
        res = {}
        n_partitions = self.partition_num
        partition_num = 1
        # each part size
        partition_size = int(math.floor(self.size / n_partitions))

        for n in range(partition_size, self.size + 1, partition_size):
            if self.learn_start <= n <= self.priority_size:
                distribution = {}
                # P(i) = (rank i) ^ (-alpha) / sum ((rank i) ^ (-alpha))
                pdf = list(
                    map(lambda x: math.pow(x, -self.alpha), range(1, n + 1))
                )
                pdf_sum = math.fsum(pdf)
                distribution['pdf'] = list(map(lambda x: x / pdf_sum, pdf))
                # split to k segment, and than uniform sample in each k
                # set k = batch_size, each segment has total probability is 1 / batch_size
                # strata_ends keep each segment start pos and end pos
                cdf = np.cumsum(distribution['pdf'])
                strata_ends = {1: 0, self.batch_size + 1: n}
                step = 1 / float(self.batch_size)
                index = 1
                for s in range(2, self.batch_size + 1):
                    while cdf[index] < step:
                        index += 1
                    strata_ends[s] = index
                    step += 1 / float(self.batch_size)

                distribution['strata_ends'] = strata_ends

                res[partition_num] = distribution

            partition_num += 1

        return res 
Example #26
Source File: tests.py    From Vose-Alias-Method with Apache License 2.0 5 votes vote down vote up
def test_output_alias_generation(self):
        """Test vose_sampler.ProbDistribution.alias_generation to ensure it
        generates words with same distribution as the original corpus. This
        performs a 2-sided hypothesis test at the 1% significance level, that:
        H_0: observed proportion a randomly selected word is equal to the
             proportion seen in the original corpus (i.e. p_original == p_observed)
        H_1: p_original != p_observed
        """
        print("WARNING: There is a random element to test_output_alias_generation\n\
        so it is likely to occasionally fail, nonetheless if the alias_generation\n\
        method is working correctly failures will be very rare (testing at alpha=0.01\n\
        implies we should expect a Type I error about 1% of the time).")

        # Construct a ProbDistribution
        words = vose_sampler.get_words(valid_folder + "small.txt")
        word_dist = vose_sampler.sample2dist(words)
        VA_words = vose_sampler.VoseAlias(word_dist)

        # Generate sample and calculate the number of observations for a randomly selected word
        word = random.choice(list(VA_words.dist))

        n = 1000

        t = 0
        for i in range(n):
            if VA_words.alias_generation() == word:
                t += 1

        # Compute the p-value
        p_original = VA_words.dist[word]

        p_low = math.fsum([self.dbinom(x, n, p_original) for x in range(t,n+1)])
        p_high = math.fsum([self.dbinom(x, n, p_original) for x in range(t+1)])

        p = 2*min(p_low, p_high)

        # Do not accept H_0 if p <= alpha
        alpha = 0.01
        self.assertGreater(p, alpha) 
Example #27
Source File: recipe-577676.py    From code with MIT License 5 votes vote down vote up
def describe(data):
    'Simple reducer for descriptive statistics'
    n = len(data)
    lo = min(data)
    hi = max(data)
    mean = fsum(data) / n
    std_dev = (fsum((x - mean) ** 2 for x in data) / n) ** 0.5
    return Summary(n, lo, mean, hi, std_dev) 
Example #28
Source File: test_statistics.py    From ironpython3 with Apache License 2.0 5 votes vote down vote up
def test_compare_with_math_fsum(self):
        # Compare with the math.fsum function.
        # Ideally we ought to get the exact same result, but sometimes
        # we differ by a very slight amount :-(
        data = [random.uniform(-100, 1000) for _ in range(1000)]
        self.assertApproxEqual(float(self.func(data)[1]), math.fsum(data), rel=2e-16) 
Example #29
Source File: road.py    From terminus with Apache License 2.0 5 votes vote down vote up
def sum_control_points_distances(self, initial=0, final=None):
        distances = self.control_points_distances()
        if final is None:
            return math.fsum(distances[initial:])
        return math.fsum(distances[initial:final])

    # Lane management 
Example #30
Source File: sratio.py    From backtrader with GNU General Public License v3.0 5 votes vote down vote up
def average(x):
    return math.fsum(x) / len(x)