Python statistics.stdev() Examples

The following are 30 code examples for showing how to use statistics.stdev(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module statistics , or try the search function .

Example 1
Project: chainerrl   Author: chainer   File: evaluator.py    License: MIT License 6 votes vote down vote up
def evaluate_and_update_max_score(self, t, episodes):
        eval_stats = eval_performance(
            self.env, self.agent, self.n_steps, self.n_episodes,
            max_episode_len=self.max_episode_len,
            logger=self.logger)
        elapsed = time.time() - self.start_time
        custom_values = tuple(tup[1] for tup in self.agent.get_statistics())
        mean = eval_stats['mean']
        values = (t,
                  episodes,
                  elapsed,
                  mean,
                  eval_stats['median'],
                  eval_stats['stdev'],
                  eval_stats['max'],
                  eval_stats['min']) + custom_values
        record_stats(self.outdir, values)
        if mean > self.max_score:
            self.logger.info('The best score is updated %s -> %s',
                             self.max_score, mean)
            self.max_score = mean
            if self.save_best_so_far_agent:
                save_agent(self.agent, "best", self.outdir, self.logger)
        return mean 
Example 2
Project: chainerrl   Author: chainer   File: evaluator.py    License: MIT License 6 votes vote down vote up
def evaluate_and_update_max_score(self, t, episodes, env, agent):
        eval_stats = eval_performance(
            env, agent, self.n_steps, self.n_episodes,
            max_episode_len=self.max_episode_len,
            logger=self.logger)
        elapsed = time.time() - self.start_time
        custom_values = tuple(tup[1] for tup in agent.get_statistics())
        mean = eval_stats['mean']
        values = (t,
                  episodes,
                  elapsed,
                  mean,
                  eval_stats['median'],
                  eval_stats['stdev'],
                  eval_stats['max'],
                  eval_stats['min']) + custom_values
        record_stats(self.outdir, values)
        with self._max_score.get_lock():
            if mean > self._max_score.value:
                self.logger.info('The best score is updated %s -> %s',
                                 self._max_score.value, mean)
                self._max_score.value = mean
                if self.save_best_so_far_agent:
                    save_agent(agent, "best", self.outdir, self.logger)
        return mean 
Example 3
Project: open-synthesis   Author: twschiller   File: metrics.py    License: GNU General Public License v3.0 6 votes vote down vote up
def calc_disagreement(evaluations):
    """Return the disagreement level for evaluations, or None if no evaluations.

    Calculated as the max disagreement of (1) N/A and non-N/A responses and (2) non-N/A evaluations
    :param evaluations: an iterable of Eval
    """
    if evaluations:
        na_it, rated_it = partition(lambda x: x is not Eval.not_applicable, evaluations)
        na_votes = list(na_it)
        rated_votes = list(rated_it)

        # Here we use the sample standard deviation because we consider the evaluations are a sample of all the
        # evaluations that could be given.
        # Not clear the best way to make the N/A disagreement comparable to the evaluation disagreement calculation
        na_disagreement = (
            statistics.stdev(([0] * len(na_votes)) + ([1] * len(rated_votes)))
            if len(na_votes) + len(rated_votes) > 1
            else 0.0)
        rated_disagreement = (
            statistics.stdev([v.value for v in rated_votes])
            if len(rated_votes) > 1
            else 0.0)
        return max(na_disagreement, rated_disagreement)
    else:
        return None 
Example 4
Project: BiblioPixel   Author: ManiacalLabs   File: envelope_test.py    License: MIT License 6 votes vote down vote up
def _random(self, env, min_mean, max_mean, min_stdev, max_stdev,
                test_count=3, sample_count=300):
        mmin, smin, mmax, smax = 100, 100, 0, 0
        for i in range(test_count):
            values = [env(0) for i in range(sample_count)]
            mean, stdev = statistics.mean(values), statistics.stdev(values)
            mmax = max(mmax, mean)
            mmin = min(mmin, mean)
            smax = max(smax, stdev)
            smin = min(smin, stdev)

        self.assertGreater(mmin, min_mean)
        self.assertLess(mmax, max_mean)
        self.assertGreater(smin, min_stdev)
        self.assertLess(smax, max_stdev)

        return mmin, mmax, smin, smax 
Example 5
Project: async-rl   Author: muupan   File: run_a3c.py    License: MIT License 6 votes vote down vote up
def eval_performance(process_idx, make_env, model, phi, n_runs):
    assert n_runs > 1, 'Computing stdev requires at least two runs'
    scores = []
    for i in range(n_runs):
        model.reset_state()
        env = make_env(process_idx, test=True)
        obs = env.reset()
        done = False
        test_r = 0
        while not done:
            s = chainer.Variable(np.expand_dims(phi(obs), 0))
            pout, _ = model.pi_and_v(s)
            a = pout.action_indices[0]
            obs, r, done, info = env.step(a)
            test_r += r
        scores.append(test_r)
        print('test_{}:'.format(i), test_r)
    mean = statistics.mean(scores)
    median = statistics.median(scores)
    stdev = statistics.stdev(scores)
    return mean, median, stdev 
Example 6
Project: async-rl   Author: muupan   File: a3c_ale.py    License: MIT License 6 votes vote down vote up
def eval_performance(rom, p_func, n_runs):
    assert n_runs > 1, 'Computing stdev requires at least two runs'
    scores = []
    for i in range(n_runs):
        env = ale.ALE(rom, treat_life_lost_as_terminal=False)
        test_r = 0
        while not env.is_terminal:
            s = chainer.Variable(np.expand_dims(dqn_phi(env.state), 0))
            pout = p_func(s)
            a = pout.action_indices[0]
            test_r += env.receive_action(a)
        scores.append(test_r)
        print('test_{}:'.format(i), test_r)
    mean = statistics.mean(scores)
    median = statistics.median(scores)
    stdev = statistics.stdev(scores)
    return mean, median, stdev 
Example 7
Project: workload-collocation-agent   Author: intel   File: runner_analyzer.py    License: Apache License 2.0 6 votes vote down vote up
def to_dict(self):
        return {
            "LB_min": round(self.latency.min, 2),
            "LB_avg": round(self.latency.avg, 2),
            "LB_max": round(self.latency.max, 2),
            "L_stdev": round(self.latency.stdev, 2),
            "L_stdev[%]": round(self.latency.stdev / self.latency.avg * 100, 2),
            # ---
            "TB_min": round(self.throughput.min, 2),
            "TB_avg": round(self.throughput.avg, 2),
            "TB_max": round(self.throughput.max, 2),
            "T_stdev": round(self.throughput.stdev, 2),
            "T_stdev[%]": round(self.throughput.stdev / self.throughput.avg * 100, 2),
            # ---
            "B_count": self.count,
            "app": self.name
        } 
Example 8
Project: cloudtools   Author: Nealelab   File: describe.py    License: MIT License 6 votes vote down vote up
def get_partitions_info_str(j):
    partitions = j['components']['partition_counts']['counts']
    partitions_info = {
                          'Partitions': len(partitions),
                          'Rows': sum(partitions),
                          'Empty partitions': len([p for p in partitions if p == 0])
                      }
    if partitions_info['Partitions'] > 1:
        partitions_info.update({
            'Min(rows/partition)': min(partitions),
            'Max(rows/partition)': max(partitions),
            'Median(rows/partition)': median(partitions),
            'Mean(rows/partition)': int(mean(partitions)),
            'StdDev(rows/partition)': int(stdev(partitions))
        })


    return "\n{}".format(IDENT).join(['{}: {}'.format(k, v) for k, v in partitions_info.items()]) 
Example 9
Project: evmlab   Author: ethereum   File: fuzzer.py    License: GNU General Public License v3.0 6 votes vote down vote up
def status(self):
        import collections, statistics
        from datetime import datetime
        return {
            "starttime": datetime.utcfromtimestamp(self.stats["start_time"]).strftime('%Y-%m-%d %H:%M:%S'),
            "pass": self.numPass(),
            "fail": self.numFails(),
            "failures": self.failures,
            "speed": self.testsPerSecond(),
            "mean": statistics.mean(self.traceLengths) if self.traceLengths else "NA",
            "stdev": statistics.stdev(self.traceLengths) if len(self.traceLengths) > 2 else "NA",
            "numZero": self.traceLengths.count(0) if self.traceLengths else "NA",
            "max": max(self.traceLengths) if self.traceLengths else "NA",
            "maxDepth": max(self.traceDepths) if self.traceDepths else "NA",
            "numConst": statistics.mean(self.traceConstantinopleOps) if self.traceConstantinopleOps else "NA",
            "activeSockets": self.stats["num_active_sockets"],
            "activeTests": self.stats["num_active_tests"],
        } 
Example 10
Project: marLo   Author: crowdAI   File: evaluator.py    License: MIT License 6 votes vote down vote up
def evaluate_and_update_max_score(self, t, episodes):
        eval_stats = eval_performance(
            self.env, self.agent, self.n_runs,
            max_episode_len=self.max_episode_len, explorer=self.explorer,
            logger=self.logger)
        elapsed = time.time() - self.start_time
        custom_values = tuple(tup[1] for tup in self.agent.get_statistics())
        mean = eval_stats['mean']
        values = (t,
                  episodes,
                  elapsed,
                  mean,
                  eval_stats['median'],
                  eval_stats['stdev'],
                  eval_stats['max'],
                  eval_stats['min']) + custom_values
        record_stats(self.outdir, values)
        if mean > self.max_score:
            self.logger.info('The best score is updated %s -> %s',
                             self.max_score, mean)
            self.max_score = mean
            if self.save_best_so_far_agent:
                save_agent(self.agent, t, self.outdir, self.logger)
        return mean 
Example 11
Project: fossor   Author: linkedin   File: anomaly_detection.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def within_stdev_percent(values, x_stdev, percent_threshold, min_values=100):
    '''Return True if percent_threshold of values are within x_stdev of the mean.'''
    if len(values) < min_values:
        return True

    mean = statistics.mean(values)
    stdev = statistics.stdev(values)
    found = []
    for v in values:
        diff = abs(mean - v)
        if diff <= (stdev * x_stdev):
            found.append(v)
    percent_found = len(found) / len(values)
    result = percent_found > percent_threshold
    log.debug(f"Within {x_stdev} sigma check was {result}. {percent_found:.2f}%/{percent_threshold:.2f}% within stdev*{x_stdev}. "
              f"Mean: {mean:.2f}. Stdev: {stdev:.2f}. Acceptable range was: {mean - stdev * x_stdev:.2f} - {mean + stdev * x_stdev:.2f}")
    return result 
Example 12
Project: cherry   Author: learnables   File: plot_tests.py    License: Apache License 2.0 6 votes vote down vote up
def test_ci95(self):
        for length in [2, 3, 5, 10, 100, 1000]:
            numbers = [random.random() for _ in range(length)]
            ci = plot.ci95(numbers)
            mu = mean(numbers)
            std = stdev(numbers, xbar=mu)
            lower = mu - 2.0 * std / math.sqrt(length)
            upper = mu + 2.0 * std / math.sqrt(length)
            self.assertTrue(ci[0] - lower <= 1e-6)
            self.assertTrue(ci[1] - upper <= 1e-6)

            # Test the documentation example
            smoothed = []
            for replay in range(10):
                rewards = [random.random() for _ in range(100)]
                y_smoothed = plot.smooth(rewards)
                smoothed.append(y_smoothed)
            means = [mean(r) for r in zip(*smoothed)]
            confidences = [plot.ci95(r) for r in zip(*smoothed)]
            lower_bounds = [conf[0] for conf in confidences]
            upper_bounds = [conf[1] for conf in confidences]
            for lb, ub, m in zip(lower_bounds, upper_bounds, means):
                self.assertTrue(lb <= m)
                self.assertTrue(ub >= m) 
Example 13
Project: tea-lang   Author: emjun   File: evaluateHelperMethods.py    License: Apache License 2.0 6 votes vote down vote up
def cohens(dataset, predictions, combined_data: CombinedData):
    xs = combined_data.get_explanatory_variables()
    ys = combined_data.get_explained_variables()
    x = xs[0]
    y = ys[0]
    cat = [k for k,v in x.metadata[categories].items()]
    data = []

    pred = None
    if predictions:
        pred = predictions[0][0]

    lhs = None
    rhs = None
    for c in cat:
        cat_data = dataset.select(y.metadata[name], where=[f"{x.metadata[name]} == '{c}'"])
        if c == pred.lhs.value:
            lhs = cat_data
        if c == pred.rhs.value:
            rhs = cat_data
        data.append(cat_data)

    cohens_d = (mean(lhs) - mean(rhs)) / (sqrt((stdev(lhs) ** 2 + stdev(rhs) ** 2) / 2))
    return cohens_d 
Example 14
Project: agents-aea   Author: fetchai   File: report_printer.py    License: Apache License 2.0 6 votes vote down vote up
def _count_resource(self, attr_name, aggr_function=None) -> Tuple[float, float]:
        """
        Calculate resources from exec reports.

        :param attr_name: name of the attribute of execreport to count resource.
        :param aggr_function:  function to process value of execreport.

        :return: (mean_value, standart_deviation)
        """
        if not aggr_function:
            aggr_function = lambda x: x  # noqa: E731

        values = [aggr_function(getattr(i, attr_name)) for i in self.exec_reports]
        mean_value = mean(values)
        std_dev = stdev(values) if len(values) > 1 else 0

        return (mean_value, std_dev) 
Example 15
Project: Graph-Transformer   Author: daiquocnguyen   File: train_pytorch_U2GNN_UnSup.py    License: Apache License 2.0 6 votes vote down vote up
def evaluate():
    model.eval() # Turn on the evaluation mode
    with torch.no_grad():
        # evaluating
        node_embeddings = model.ss.weight
        graph_embeddings = torch.spmm(graph_pool, node_embeddings).data.cpu().numpy()
        acc_10folds = []
        for fold_idx in range(10):
            train_idx, test_idx = separate_data_idx(graphs, fold_idx)
            train_graph_embeddings = graph_embeddings[train_idx]
            test_graph_embeddings = graph_embeddings[test_idx]
            train_labels = graph_labels[train_idx]
            test_labels = graph_labels[test_idx]

            cls = LogisticRegression(solver="liblinear", tol=0.001)
            cls.fit(train_graph_embeddings, train_labels)
            ACC = cls.score(test_graph_embeddings, test_labels)
            acc_10folds.append(ACC)
            print('epoch ', epoch, ' fold ', fold_idx, ' acc ', ACC)

        mean_10folds = statistics.mean(acc_10folds)
        std_10folds = statistics.stdev(acc_10folds)
        # print('epoch ', epoch, ' mean: ', str(mean_10folds), ' std: ', str(std_10folds))

    return mean_10folds, std_10folds 
Example 16
Project: chainerrl   Author: chainer   File: evaluator.py    License: MIT License 5 votes vote down vote up
def eval_performance(env, agent, n_steps, n_episodes, max_episode_len=None,
                     logger=None):
    """Run multiple evaluation episodes and return statistics.

    Args:
        env (Environment): Environment used for evaluation
        agent (Agent): Agent to evaluate.
        n_steps (int): Number of timesteps to evaluate for.
        n_episodes (int): Number of evaluation episodes.
        max_episode_len (int or None): If specified, episodes longer than this
            value will be truncated.
        logger (Logger or None): If specified, the given Logger object will be
            used for logging results. If not specified, the default logger of
            this module will be used.
    Returns:
        Dict of statistics.
    """

    assert (n_steps is None) != (n_episodes is None)

    if isinstance(env, chainerrl.env.VectorEnv):
        scores = batch_run_evaluation_episodes(
            env, agent, n_steps, n_episodes,
            max_episode_len=max_episode_len,
            logger=logger)
    else:
        scores = run_evaluation_episodes(
            env, agent, n_steps, n_episodes,
            max_episode_len=max_episode_len,
            logger=logger)
    stats = dict(
        episodes=len(scores),
        mean=statistics.mean(scores),
        median=statistics.median(scores),
        stdev=statistics.stdev(scores) if len(scores) >= 2 else 0.0,
        max=np.max(scores),
        min=np.min(scores))
    return stats 
Example 17
Project: BiblioPixel   Author: ManiacalLabs   File: envelope_test.py    License: MIT License 5 votes vote down vote up
def test_gaussian(self):
        self._random(envelope.Gaussian(), 0.4, 0.6, 0.15, 0.35)
        self._random(envelope.Gaussian(stdev=0.1), 0.45, 0.55, 0.08, 0.12)
        self._random(envelope.Gaussian(mean=1.0), 0.9, 1.1, 0.20, 0.30) 
Example 18
Project: python-devtools   Author: samuelcolvin   File: timer.py    License: MIT License 5 votes vote down vote up
def summary(self, verbose=False):
        times = set()
        for r in self.results:
            if not r.finish:
                r.capture()
            if verbose:
                print('    {}'.format(r.str(self.dp)), file=self.file)
            times.add(r.elapsed())

        if times:
            from statistics import mean, stdev

            print(
                _SUMMARY_TEMPLATE.format(
                    count=len(times),
                    mean=mean(times),
                    stddev=stdev(times) if len(times) > 1 else 0,
                    min=min(times),
                    max=max(times),
                    dp=self.dp,
                ),
                file=self.file,
                flush=True,
            )
        else:
            raise RuntimeError('timer not started')
        return times 
Example 19
Project: workload-collocation-agent   Author: intel   File: runner_analyzer.py    License: Apache License 2.0 5 votes vote down vote up
def calculate_per_workload_wstats_per_stage(self, workloads: Iterable[str],
                                                stage_index: int, filter_nodes: List[str]) -> Dict[
                                                str, WStat]:
        """
        Calculate WStat for all workloads in list for stage (stage_index).
        Takes data from all nodes.
        """
        workloads_wstats: Dict[str, WStat] = {}
        for workload in workloads:
            # filter tasks of a given workload
            tasks = [task for task in self.stages[stage_index].tasks.values() if
                     task.workload_name == workload]
            # filter out tasks which were run on >>filter_nodes<<
            tasks = [task for task in tasks if task.node not in filter_nodes]

            # avg but from 12 sec for a single task
            throughputs_list = [task.get_throughput('avg') for task in tasks if
                                task.get_throughput('avg') is not None]
            latencies_list = [task.get_latency('avg') for task in tasks if
                              task.get_latency('avg') is not None]

            if len(throughputs_list) == 0:
                exception_value = float('inf')
                t_max, t_min, t_avg, t_stdev = [exception_value] * 4
                l_max, l_min, l_avg, l_stdev = [exception_value] * 4
            elif len(throughputs_list) == 1:
                t_max, t_min, t_avg, t_stdev = [throughputs_list[0], throughputs_list[0],
                                                throughputs_list[0], 0]
                l_max, l_min, l_avg, l_stdev = [throughputs_list[0], throughputs_list[0],
                                                throughputs_list[0], 0]
            else:
                t_max, t_min, t_avg, t_stdev = max(throughputs_list), min(throughputs_list), \
                                               statistics.mean(throughputs_list), statistics.stdev(
                    throughputs_list)
                l_max, l_min, l_avg, l_stdev = max(latencies_list), min(latencies_list), \
                    statistics.mean(latencies_list), statistics.stdev(latencies_list)

            workloads_wstats[workload] = WStat(latency=Stat(l_avg, l_min, l_max, l_stdev),
                                               throughput=Stat(t_avg, t_min, t_max, t_stdev),
                                               count=len(tasks), name=workload)
        return workloads_wstats 
Example 20
Project: workload-collocation-agent   Author: intel   File: hierbar.py    License: Apache License 2.0 5 votes vote down vote up
def _shape_diff(first: Shape, second: Shape) -> float:
    """Return resources variance between first and second."""
    first_resources = dict(first)
    second_resources = dict(second)
    assert len(first_resources.keys()) > 1, 'variance requires at least 2 data points'
    assert len(second_resources.keys()) > 1, 'variance requires at least 2 data points'
    resdiff = subtract_resources(first_resources, second_resources)
    diff_variance = statistics.stdev(resdiff.values())
    log.log(TRACE, '[Filter2][shape_diff] first=%s second=%s shape_diff=%s',
            first, second, diff_variance)
    return diff_variance 
Example 21
Project: workload-collocation-agent   Author: intel   File: hierbar.py    License: Apache License 2.0 5 votes vote down vote up
def calculate_class_variances(app_name: str,
                              node_capacities: NodeCapacities,
                              requested: Resources,
                              shapes_to_nodes: ShapeToNodes
                              ) -> Tuple[Dict[Shape, float], List[Metric]]:
    """Calculate all classes bar (fitness) score"""
    metrics = []
    class_variances: Dict[Shape, float] = {}  # dict: class_shape->fit
    for class_shape, node_names_of_this_shape in shapes_to_nodes.items():
        node_capacities_of_this_shape = [node_capacities[node_name] for node_name in
                                         node_names_of_this_shape]
        averaged_resources_of_class = _calc_average_resources(
            node_capacities_of_this_shape)
        requested_empty_fraction = divide_resources(
            requested,
            averaged_resources_of_class,
            calculate_read_write_ratio(averaged_resources_of_class)
        )

        variance = statistics.stdev(requested_empty_fraction.values())
        log.log(TRACE, '[Filter2] class_shape=%s average_resources_of_class=%s '
                       'requested=%s requested_fraction=%s variance=%s', class_shape,
                averaged_resources_of_class, requested, requested_empty_fraction, variance)
        class_variances[class_shape] = variance

        class_shape_str = shape_to_str(class_shape)
        metrics.extend([
            Metric(
                name='wca_scheduler_hierbar_node_shape_app_variance',
                labels=dict(app=app_name,
                            app_requested=resource_to_str(requested),
                            shape=class_shape_str),
                value=variance
            ),
            Metric(
                name='wca_scheduler_hierbar_node_shape_numbers',
                labels=dict(shape=class_shape_str),
                value=len(node_capacities_of_this_shape),
            )
        ])
    return class_variances, metrics 
Example 22
Project: pyquarkchain   Author: QuarkChain   File: test_diff.py    License: MIT License 5 votes vote down vote up
def main():
    target_interval_sec = 5
    diff_calc = diff.MADifficultyCalculator(
        ma_samples=32, target_interval_sec=target_interval_sec
    )
    hash_power = 100

    cTime = 0.0
    chain = [Block(0, 0.002)]
    usedTimeList = []
    p = proof_of_work.PoW(hash_power)

    for i in range(1000):
        required_diff = diff_calc.calculate_diff(chain)
        cTime = cTime + p.mine(required_diff)
        block = Block(cTime, required_diff)
        used_time = block.n_time - chain[-1].n_time
        chain.append(block)
        usedTimeList.append(used_time)
        print(
            "Time %.2f, block %d, requiredWork %.2f, used_time %.2f"
            % (block.n_time, i + 1, 1 / block.required_diff, used_time)
        )

    print(
        "Max: %.2f, min: %.2f, avg: %.2f, std: %.2f"
        % (
            max(usedTimeList),
            min(usedTimeList),
            stat.mean(usedTimeList),
            stat.stdev(usedTimeList),
        )
    ) 
Example 23
Project: TCAG-WGS-CNV-workflow   Author: bjtrost   File: CNVworkflowlib.py    License: MIT License 5 votes vote down vote up
def stdev(list):
    try:
        stdev = statistics.stdev(list)
    except statistics.StatisticsError:
        stdev = 0
    return(stdev) 
Example 24
Project: indy-plenum   Author: hyperledger   File: profiler.py    License: Apache License 2.0 5 votes vote down vote up
def calibrate():
    pr = profile.Profile()
    samples = []
    for i in range(20):
        samples.append(pr.calibrate(100000))
        print("calculated {:02d}: {}".format(i + 1, samples[i]))
    print("------------------------------------")
    print("         mean: {}".format(mean(samples)))
    print("std deviation: {}".format(stdev(samples))) 
Example 25
Project: indy-plenum   Author: hyperledger   File: test_value_accumulator.py    License: Apache License 2.0 5 votes vote down vote up
def test_value_accumulator_can_add_several_values():
    values = [4.2, -1.3, 10.8]
    acc = ValueAccumulator()
    for value in values:
        acc.add(value)

    assert acc.count == len(values)
    assert acc.sum == sum(values)
    assert acc.avg == statistics.mean(values)
    assert acc.stddev == statistics.stdev(values)
    assert acc.min == min(values)
    assert acc.max == max(values)
    assert acc.min < acc.lo < acc.avg
    assert acc.avg < acc.hi < acc.max
    assert math.isclose(acc.hi - acc.lo, acc.stddev) 
Example 26
Project: zatt   Author: simonacca   File: loadLocal.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def client_pool(func, entries_count, workers, additional_args=[]):
    pool = Pool(workers)
    start_time = timer()
    worker_args = [[entries_count // workers] + additional_args]
    finish_times = pool.starmap(func, worker_args * workers)
    return (statistics.stdev(finish_times),
            statistics.mean(finish_times) - start_time) 
Example 27
Project: airflow   Author: apache   File: sql_queries.py    License: Apache License 2.0 5 votes vote down vote up
def main() -> None:
    """
    Run the tests aand write stats to a csv file.
    """
    reset_db()
    rows = []
    times = []

    for i in range(4):
        sleep(5)
        queries, exec_time = run_test()
        if i == 0:
            continue
        times.append(exec_time)
        for qry in queries:
            info = qry.to_dict()
            info["test_no"] = i  # type: ignore
            rows.append(info)

    rows_to_csv(rows, name="/files/sql_after_remote.csv")
    print(times)
    msg = "Time for %d dag runs: %.4fs"

    if len(times) > 1:
        print((msg + " (±%.3fs)") % (len(times), statistics.mean(times), statistics.stdev(times)))
    else:
        print(msg % (len(times), times[0])) 
Example 28
Project: QUANTAXIS   Author: QUANTAXIS   File: QAAnalysis_dataframe.py    License: MIT License 5 votes vote down vote up
def stdev(self):

        return statistics.stdev(self.price)
    # 样本标准差 
Example 29
Project: QUANTAXIS   Author: QUANTAXIS   File: base_datastruct.py    License: MIT License 5 votes vote down vote up
def stdev(self):
        '返回DataStruct.price的样本标准差 Sample standard deviation'
        res = self.price.groupby(level=1).apply(lambda x: statistics.stdev(x))
        res.name = 'stdev'
        return res

    # 总体标准差 
Example 30
Project: coveragepy-bbmirror   Author: nedbat   File: perf_measure.py    License: Apache License 2.0 5 votes vote down vote up
def stress_test(self):
        # For checking the overhead for each component:
        def time_thing(thing):
            per_thing = []
            pct_thing = []
            for _ in range(self.runs):
                for n in range(self.numlo, self.numhi+1, self.step):
                    kwargs = {
                        "file_count": self.fixed,
                        "call_count": self.fixed,
                        "line_count": self.fixed,
                    }
                    kwargs[thing+"_count"] = n
                    res = self._compute_overhead(**kwargs)
                    per_thing.append(res.overhead / getattr(res, "{}s".format(thing)))
                    pct_thing.append(res.covered / res.baseline * 100)

            out = "Per {}: ".format(thing)
            out += "mean = {:9.3f}us, stddev = {:8.3f}us, ".format(
                statistics.mean(per_thing)*1e6, statistics.stdev(per_thing)*1e6
            )
            out += "min = {:9.3f}us, ".format(min(per_thing)*1e6)
            out += "pct = {:6.1f}%, stddev = {:6.1f}%".format(
                statistics.mean(pct_thing), statistics.stdev(pct_thing)
            )
            print(out)

        time_thing("file")
        time_thing("call")
        time_thing("line")