Python statistics.stdev() Examples
The following are 30
code examples of statistics.stdev().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
statistics
, or try the search function
.
Example #1
Source File: runner_analyzer.py From workload-collocation-agent with Apache License 2.0 | 6 votes |
def to_dict(self): return { "LB_min": round(self.latency.min, 2), "LB_avg": round(self.latency.avg, 2), "LB_max": round(self.latency.max, 2), "L_stdev": round(self.latency.stdev, 2), "L_stdev[%]": round(self.latency.stdev / self.latency.avg * 100, 2), # --- "TB_min": round(self.throughput.min, 2), "TB_avg": round(self.throughput.avg, 2), "TB_max": round(self.throughput.max, 2), "T_stdev": round(self.throughput.stdev, 2), "T_stdev[%]": round(self.throughput.stdev / self.throughput.avg * 100, 2), # --- "B_count": self.count, "app": self.name }
Example #2
Source File: fuzzer.py From evmlab with GNU General Public License v3.0 | 6 votes |
def status(self): import collections, statistics from datetime import datetime return { "starttime": datetime.utcfromtimestamp(self.stats["start_time"]).strftime('%Y-%m-%d %H:%M:%S'), "pass": self.numPass(), "fail": self.numFails(), "failures": self.failures, "speed": self.testsPerSecond(), "mean": statistics.mean(self.traceLengths) if self.traceLengths else "NA", "stdev": statistics.stdev(self.traceLengths) if len(self.traceLengths) > 2 else "NA", "numZero": self.traceLengths.count(0) if self.traceLengths else "NA", "max": max(self.traceLengths) if self.traceLengths else "NA", "maxDepth": max(self.traceDepths) if self.traceDepths else "NA", "numConst": statistics.mean(self.traceConstantinopleOps) if self.traceConstantinopleOps else "NA", "activeSockets": self.stats["num_active_sockets"], "activeTests": self.stats["num_active_tests"], }
Example #3
Source File: evaluateHelperMethods.py From tea-lang with Apache License 2.0 | 6 votes |
def cohens(dataset, predictions, combined_data: CombinedData): xs = combined_data.get_explanatory_variables() ys = combined_data.get_explained_variables() x = xs[0] y = ys[0] cat = [k for k,v in x.metadata[categories].items()] data = [] pred = None if predictions: pred = predictions[0][0] lhs = None rhs = None for c in cat: cat_data = dataset.select(y.metadata[name], where=[f"{x.metadata[name]} == '{c}'"]) if c == pred.lhs.value: lhs = cat_data if c == pred.rhs.value: rhs = cat_data data.append(cat_data) cohens_d = (mean(lhs) - mean(rhs)) / (sqrt((stdev(lhs) ** 2 + stdev(rhs) ** 2) / 2)) return cohens_d
Example #4
Source File: report_printer.py From agents-aea with Apache License 2.0 | 6 votes |
def _count_resource(self, attr_name, aggr_function=None) -> Tuple[float, float]: """ Calculate resources from exec reports. :param attr_name: name of the attribute of execreport to count resource. :param aggr_function: function to process value of execreport. :return: (mean_value, standart_deviation) """ if not aggr_function: aggr_function = lambda x: x # noqa: E731 values = [aggr_function(getattr(i, attr_name)) for i in self.exec_reports] mean_value = mean(values) std_dev = stdev(values) if len(values) > 1 else 0 return (mean_value, std_dev)
Example #5
Source File: describe.py From cloudtools with MIT License | 6 votes |
def get_partitions_info_str(j): partitions = j['components']['partition_counts']['counts'] partitions_info = { 'Partitions': len(partitions), 'Rows': sum(partitions), 'Empty partitions': len([p for p in partitions if p == 0]) } if partitions_info['Partitions'] > 1: partitions_info.update({ 'Min(rows/partition)': min(partitions), 'Max(rows/partition)': max(partitions), 'Median(rows/partition)': median(partitions), 'Mean(rows/partition)': int(mean(partitions)), 'StdDev(rows/partition)': int(stdev(partitions)) }) return "\n{}".format(IDENT).join(['{}: {}'.format(k, v) for k, v in partitions_info.items()])
Example #6
Source File: train_pytorch_U2GNN_UnSup.py From Graph-Transformer with Apache License 2.0 | 6 votes |
def evaluate(): model.eval() # Turn on the evaluation mode with torch.no_grad(): # evaluating node_embeddings = model.ss.weight graph_embeddings = torch.spmm(graph_pool, node_embeddings).data.cpu().numpy() acc_10folds = [] for fold_idx in range(10): train_idx, test_idx = separate_data_idx(graphs, fold_idx) train_graph_embeddings = graph_embeddings[train_idx] test_graph_embeddings = graph_embeddings[test_idx] train_labels = graph_labels[train_idx] test_labels = graph_labels[test_idx] cls = LogisticRegression(solver="liblinear", tol=0.001) cls.fit(train_graph_embeddings, train_labels) ACC = cls.score(test_graph_embeddings, test_labels) acc_10folds.append(ACC) print('epoch ', epoch, ' fold ', fold_idx, ' acc ', ACC) mean_10folds = statistics.mean(acc_10folds) std_10folds = statistics.stdev(acc_10folds) # print('epoch ', epoch, ' mean: ', str(mean_10folds), ' std: ', str(std_10folds)) return mean_10folds, std_10folds
Example #7
Source File: plot_tests.py From cherry with Apache License 2.0 | 6 votes |
def test_ci95(self): for length in [2, 3, 5, 10, 100, 1000]: numbers = [random.random() for _ in range(length)] ci = plot.ci95(numbers) mu = mean(numbers) std = stdev(numbers, xbar=mu) lower = mu - 2.0 * std / math.sqrt(length) upper = mu + 2.0 * std / math.sqrt(length) self.assertTrue(ci[0] - lower <= 1e-6) self.assertTrue(ci[1] - upper <= 1e-6) # Test the documentation example smoothed = [] for replay in range(10): rewards = [random.random() for _ in range(100)] y_smoothed = plot.smooth(rewards) smoothed.append(y_smoothed) means = [mean(r) for r in zip(*smoothed)] confidences = [plot.ci95(r) for r in zip(*smoothed)] lower_bounds = [conf[0] for conf in confidences] upper_bounds = [conf[1] for conf in confidences] for lb, ub, m in zip(lower_bounds, upper_bounds, means): self.assertTrue(lb <= m) self.assertTrue(ub >= m)
Example #8
Source File: a3c_ale.py From async-rl with MIT License | 6 votes |
def eval_performance(rom, p_func, n_runs): assert n_runs > 1, 'Computing stdev requires at least two runs' scores = [] for i in range(n_runs): env = ale.ALE(rom, treat_life_lost_as_terminal=False) test_r = 0 while not env.is_terminal: s = chainer.Variable(np.expand_dims(dqn_phi(env.state), 0)) pout = p_func(s) a = pout.action_indices[0] test_r += env.receive_action(a) scores.append(test_r) print('test_{}:'.format(i), test_r) mean = statistics.mean(scores) median = statistics.median(scores) stdev = statistics.stdev(scores) return mean, median, stdev
Example #9
Source File: anomaly_detection.py From fossor with BSD 2-Clause "Simplified" License | 6 votes |
def within_stdev_percent(values, x_stdev, percent_threshold, min_values=100): '''Return True if percent_threshold of values are within x_stdev of the mean.''' if len(values) < min_values: return True mean = statistics.mean(values) stdev = statistics.stdev(values) found = [] for v in values: diff = abs(mean - v) if diff <= (stdev * x_stdev): found.append(v) percent_found = len(found) / len(values) result = percent_found > percent_threshold log.debug(f"Within {x_stdev} sigma check was {result}. {percent_found:.2f}%/{percent_threshold:.2f}% within stdev*{x_stdev}. " f"Mean: {mean:.2f}. Stdev: {stdev:.2f}. Acceptable range was: {mean - stdev * x_stdev:.2f} - {mean + stdev * x_stdev:.2f}") return result
Example #10
Source File: run_a3c.py From async-rl with MIT License | 6 votes |
def eval_performance(process_idx, make_env, model, phi, n_runs): assert n_runs > 1, 'Computing stdev requires at least two runs' scores = [] for i in range(n_runs): model.reset_state() env = make_env(process_idx, test=True) obs = env.reset() done = False test_r = 0 while not done: s = chainer.Variable(np.expand_dims(phi(obs), 0)) pout, _ = model.pi_and_v(s) a = pout.action_indices[0] obs, r, done, info = env.step(a) test_r += r scores.append(test_r) print('test_{}:'.format(i), test_r) mean = statistics.mean(scores) median = statistics.median(scores) stdev = statistics.stdev(scores) return mean, median, stdev
Example #11
Source File: envelope_test.py From BiblioPixel with MIT License | 6 votes |
def _random(self, env, min_mean, max_mean, min_stdev, max_stdev, test_count=3, sample_count=300): mmin, smin, mmax, smax = 100, 100, 0, 0 for i in range(test_count): values = [env(0) for i in range(sample_count)] mean, stdev = statistics.mean(values), statistics.stdev(values) mmax = max(mmax, mean) mmin = min(mmin, mean) smax = max(smax, stdev) smin = min(smin, stdev) self.assertGreater(mmin, min_mean) self.assertLess(mmax, max_mean) self.assertGreater(smin, min_stdev) self.assertLess(smax, max_stdev) return mmin, mmax, smin, smax
Example #12
Source File: metrics.py From open-synthesis with GNU General Public License v3.0 | 6 votes |
def calc_disagreement(evaluations): """Return the disagreement level for evaluations, or None if no evaluations. Calculated as the max disagreement of (1) N/A and non-N/A responses and (2) non-N/A evaluations :param evaluations: an iterable of Eval """ if evaluations: na_it, rated_it = partition(lambda x: x is not Eval.not_applicable, evaluations) na_votes = list(na_it) rated_votes = list(rated_it) # Here we use the sample standard deviation because we consider the evaluations are a sample of all the # evaluations that could be given. # Not clear the best way to make the N/A disagreement comparable to the evaluation disagreement calculation na_disagreement = ( statistics.stdev(([0] * len(na_votes)) + ([1] * len(rated_votes))) if len(na_votes) + len(rated_votes) > 1 else 0.0) rated_disagreement = ( statistics.stdev([v.value for v in rated_votes]) if len(rated_votes) > 1 else 0.0) return max(na_disagreement, rated_disagreement) else: return None
Example #13
Source File: evaluator.py From chainerrl with MIT License | 6 votes |
def evaluate_and_update_max_score(self, t, episodes, env, agent): eval_stats = eval_performance( env, agent, self.n_steps, self.n_episodes, max_episode_len=self.max_episode_len, logger=self.logger) elapsed = time.time() - self.start_time custom_values = tuple(tup[1] for tup in agent.get_statistics()) mean = eval_stats['mean'] values = (t, episodes, elapsed, mean, eval_stats['median'], eval_stats['stdev'], eval_stats['max'], eval_stats['min']) + custom_values record_stats(self.outdir, values) with self._max_score.get_lock(): if mean > self._max_score.value: self.logger.info('The best score is updated %s -> %s', self._max_score.value, mean) self._max_score.value = mean if self.save_best_so_far_agent: save_agent(agent, "best", self.outdir, self.logger) return mean
Example #14
Source File: evaluator.py From chainerrl with MIT License | 6 votes |
def evaluate_and_update_max_score(self, t, episodes): eval_stats = eval_performance( self.env, self.agent, self.n_steps, self.n_episodes, max_episode_len=self.max_episode_len, logger=self.logger) elapsed = time.time() - self.start_time custom_values = tuple(tup[1] for tup in self.agent.get_statistics()) mean = eval_stats['mean'] values = (t, episodes, elapsed, mean, eval_stats['median'], eval_stats['stdev'], eval_stats['max'], eval_stats['min']) + custom_values record_stats(self.outdir, values) if mean > self.max_score: self.logger.info('The best score is updated %s -> %s', self.max_score, mean) self.max_score = mean if self.save_best_so_far_agent: save_agent(self.agent, "best", self.outdir, self.logger) return mean
Example #15
Source File: evaluator.py From marLo with MIT License | 6 votes |
def evaluate_and_update_max_score(self, t, episodes): eval_stats = eval_performance( self.env, self.agent, self.n_runs, max_episode_len=self.max_episode_len, explorer=self.explorer, logger=self.logger) elapsed = time.time() - self.start_time custom_values = tuple(tup[1] for tup in self.agent.get_statistics()) mean = eval_stats['mean'] values = (t, episodes, elapsed, mean, eval_stats['median'], eval_stats['stdev'], eval_stats['max'], eval_stats['min']) + custom_values record_stats(self.outdir, values) if mean > self.max_score: self.logger.info('The best score is updated %s -> %s', self.max_score, mean) self.max_score = mean if self.save_best_so_far_agent: save_agent(self.agent, t, self.outdir, self.logger) return mean
Example #16
Source File: stats.py From Turing with MIT License | 5 votes |
def stand_dev_sample(lst): return statistics.stdev(lst)
Example #17
Source File: translatewiki.py From editquality with MIT License | 5 votes |
def process_unicode_stats(words): code_points = [ord(c) for w in words for c in w] while len(code_points) < 2: code_points.append(ord("-")) return (statistics.mean(code_points), statistics.median(code_points), statistics.stdev(code_points))
Example #18
Source File: log_analyzer.py From randovania with GNU General Public License v3.0 | 5 votes |
def calculate_stddev(pickup_count: Dict[str, int], item_counts: Dict[str, float]) -> float: balanced_freq = { item: count / pickup_count[item] for item, count in item_counts.items() } return stdev(balanced_freq.values())
Example #19
Source File: test_clock_AsyncTempoClock.py From supriya with MIT License | 5 votes |
def calculate_skew(store): skews = [ abs(current_moment.seconds - desired_moment.seconds) for current_moment, desired_moment, event in store ] return { "min": min(skews), "mean": statistics.mean(skews), "median": statistics.median(skews), "stdev": statistics.stdev(skews), "max": max(skews), }
Example #20
Source File: test_statistics.py From mpyc with MIT License | 5 votes |
def test_statistics_error(self): self.assertRaises(statistics.StatisticsError, mean, []) self.assertRaises(statistics.StatisticsError, variance, [0]) self.assertRaises(statistics.StatisticsError, stdev, [0]) self.assertRaises(statistics.StatisticsError, pvariance, []) self.assertRaises(statistics.StatisticsError, pstdev, []) self.assertRaises(statistics.StatisticsError, mode, []) self.assertRaises(statistics.StatisticsError, median, [])
Example #21
Source File: measurement_statistics.py From Offline-MapMatching with GNU General Public License v3.0 | 5 votes |
def getStandardDeviation(self): return statistics.stdev(self.measurments)
Example #22
Source File: gene_compare.py From collaboration with GNU General Public License v3.0 | 5 votes |
def add_gausian_lines(self, column_pos, max_value, min_value, values): for gene in values: for category in values[gene]: gene_values = values[gene][category] if len(gene_values) > 1: stddev = statistics.stdev(gene_values) median = statistics.median(gene_values) colour = self.colour_helper.get_category_colour(category) scale_x = 1 scale_y = float(self.plottable_y) / (max_value - min_value) gausian_curve_path = self.calculate_gausian_curve(pos=column_pos[gene], height=30, stddev=stddev, scale_x=scale_x, scale_y=scale_y, horizontal=False, median=median, max_value=max_value, min_value=min_value) self.plot.add(Path(stroke=colour, stroke_width=2, stroke_linecap='round', stroke_opacity=0.5, fill=colour, fill_opacity=0.1, d=gausian_curve_path))
Example #23
Source File: perf_measure.py From coveragepy with Apache License 2.0 | 5 votes |
def stress_test(self): # For checking the overhead for each component: def time_thing(thing): per_thing = [] pct_thing = [] for _ in range(self.runs): for n in range(self.numlo, self.numhi+1, self.step): kwargs = { "file_count": self.fixed, "call_count": self.fixed, "line_count": self.fixed, } kwargs[thing+"_count"] = n res = self._compute_overhead(**kwargs) per_thing.append(res.overhead / getattr(res, "{}s".format(thing))) pct_thing.append(res.covered / res.baseline * 100) out = "Per {}: ".format(thing) out += "mean = {:9.3f}us, stddev = {:8.3f}us, ".format( statistics.mean(per_thing)*1e6, statistics.stdev(per_thing)*1e6 ) out += "min = {:9.3f}us, ".format(min(per_thing)*1e6) out += "pct = {:6.1f}%, stddev = {:6.1f}%".format( statistics.mean(pct_thing), statistics.stdev(pct_thing) ) print(out) time_thing("file") time_thing("call") time_thing("line")
Example #24
Source File: profiler.py From deoplete-jedi with MIT License | 5 votes |
def profile(func): name = func.__name__ samples = queue.deque(maxlen=5) @functools.wraps(func) def wrapper(self, *args, **kwargs): if not self.is_debug_enabled: return func(self, *args, **kwargs) start = clock() ret = func(self, *args, **kwargs) n = tfloat(clock() - start) if len(samples) < 2: m = 0 d = 0 n.color = 36 else: m = mean(samples) if stdev: d = tfloat(stdev(samples)) else: d = 0 if n <= m + d: n.color = 32 elif n > m + d * 2: n.color = 31 else: n.color = 33 samples.append(n) self.info('\x1b[34m%s\x1b[m t = %s, \u00b5 = %s, \u03c3 = %s)', name, n, m, d) return ret return wrapper
Example #25
Source File: pytorch_utils.py From DensePoint with MIT License | 5 votes |
def print_metrics(self): for name, samples in self.metrics.items(): xbar = stats.mean(samples) sx = stats.stdev(samples, xbar) tstar = student_t.ppf(1.0 - 0.025, len(samples) - 1) margin_of_error = tstar * sx / sqrt(len(samples)) print("{}: {} +/- {}".format(name, xbar, margin_of_error))
Example #26
Source File: joystick.py From FlowState with GNU General Public License v3.0 | 5 votes |
def isSettled(): if not own['settled']: logic.setTimeScale(0.001) if(flowState.getGameMode()!=flowState.GAME_MODE_MULTIPLAYER): logic.isSettled = False fps = logic.getAverageFrameRate() avgFPSList = own['settleFrameRates'] avgFPSList.append(fps) deviation = 100 if(len(avgFPSList)>1): deviation = statistics.stdev(avgFPSList) if len(avgFPSList)>100: if deviation < 300: settle() else: own.setLinearVelocity([0,0,0],True) own.position = own['launchPosition'] if len(avgFPSList)>1000: del avgFPSList[0] settle() flowState.log("WARNING!!!: FPS did not become stable after 2000 frames. Expect physics instability...") flowState.log("standard deviation: "+str(deviation)) else: #we are in multiplayer and should wait a fixed time if ((time.perf_counter()-own['settleStartTime'])>3): settle() flowState.log("settling due to time expiration in multiplayer") #else: # #if(logic.finishedLastLap): # # logic.setTimeScale(0.001) # # #own.setLinearVelocity([0,0,0],True)
Example #27
Source File: plot.py From cherry with Apache License 2.0 | 5 votes |
def ci95(values): """ [[Source]](https://github.com/seba-1511/cherry/blob/master/cherry/plot.py) **Description** Computes the 95% confidence interval around the given values. **Arguments** * **values** (list) - List of values for which to compute the 95% confidence interval. **Returns** * **(float, float)** The lower and upper bounds of the confidence interval. **Example** ~~~python from statistics import mean smoothed = [] for replay in replays: rewards = replay.rewards.view(-1).tolist() y_smoothed = ch.plot.smooth(rewards) smoothed.append(y_smoothed) means = [mean(r) for r in zip(*smoothed)] confidences = [ch.plot.ci95(r) for r in zip(*smoothed)] lower_bound = [conf[0] for conf in confidences] upper_bound = [conf[1] for conf in confidences] ~~~ """ mu = mean(values) sigma = stdev(values, xbar=mu) N = len(values) bound = 2.0 * sigma / math.sqrt(N) lower = mu - bound upper = mu + bound return lower, upper
Example #28
Source File: benchmark_base.py From submission-criteria with Apache License 2.0 | 5 votes |
def format_stats(times: list, unit: str) -> str: return 'median: %.2f%s, mean: %.2f%s, stdev: %.2f, max: %.2f%s, min: %.2f%s' % ( statistics.median(times), unit, statistics.mean(times), unit, statistics.stdev(times), max(times), unit, min(times), unit)
Example #29
Source File: runme.py From icontract with MIT License | 5 votes |
def main() -> None: """"Execute the main routine.""" modules = [ "functions_100_with_no_contract", "functions_100_with_1_contract", "functions_100_with_5_contracts", "functions_100_with_10_contracts", "functions_100_with_1_disabled_contract", "functions_100_with_5_disabled_contracts", "functions_100_with_10_disabled_contracts", "classes_100_with_no_invariant", "classes_100_with_1_invariant", "classes_100_with_5_invariants", "classes_100_with_10_invariants", "classes_100_with_1_disabled_invariant", "classes_100_with_5_disabled_invariants", "classes_100_with_10_disabled_invariants", ] for a_module in modules: durations = [] # type: List[float] for i in range(0, 10): duration = float( subprocess.check_output(["./measure.py", "--module", a_module], cwd=os.path.dirname(__file__)).strip()) durations.append(duration) print("Duration to import the module {} (in milliseconds): {:.2f} ± {:.2f}".format( a_module, statistics.mean(durations) * 10e3, statistics.stdev(durations) * 10e3))
Example #30
Source File: profiler.py From deoplete-go with MIT License | 5 votes |
def profile(func): name = func.__name__ samples = queue.deque(maxlen=5) @functools.wraps(func) def wrapper(self, *args, **kwargs): if not self.debug_enabled: return func(self, *args, **kwargs) start = clock() ret = func(self, *args, **kwargs) n = tfloat(clock() - start) if len(samples) < 2: m = 0 d = 0 n.color = 36 else: m = mean(samples) if stdev: d = tfloat(stdev(samples)) else: d = 0 if n <= m + d: n.color = 32 elif n > m + d * 2: n.color = 31 else: n.color = 33 samples.append(n) self.info("\x1b[34m%s\x1b[m t = %s, \u00b5 = %s, \u03c3 = %s)", name, n, m, d) return ret return wrapper