Python statistics.median() Examples

The following are 30 code examples of statistics.median(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module statistics , or try the search function .
Example #1
Source File: network_info.py    From lndmanage with MIT License 6 votes vote down vote up
def node_info_basic(self, node_pub_key):
        node_info = self.node.get_node_info(node_pub_key)
        # calculate average and mean channel fees

        base_fees = []
        fee_rates_milli_msat = []
        capacities = []
        for c in node_info['channels']:
            # Determine which policy to look at.
            if node_pub_key == c.node1_pub:
                policy = c.node1_policy
            else:
                policy = c.node2_policy
            base_fees.append(policy.fee_base_msat)
            fee_rates_milli_msat.append(policy.fee_rate_milli_msat)
            capacities.append(c.capacity)

        node_info['mean_base_fee'] = int(mean(base_fees))
        node_info['median_base_fee'] = int(median(base_fees))
        node_info['mean_fee_rate'] = round(mean(fee_rates_milli_msat) / 1E6, 6)
        node_info['median_fee_rate'] = round(median(fee_rates_milli_msat) / 1E6, 6)
        node_info['mean_capacity'] = int(mean(capacities))
        node_info['median_capacity'] = int(median(capacities))

        return node_info 
Example #2
Source File: getmetrics_sar.py    From InsightAgent with Apache License 2.0 6 votes vote down vote up
def transpose_metrics():
    """ flatten data up to the timestamp"""
    for timestamp in track['current_dict'].keys():
        track['line_count'] += 1
        new_row = dict()
        new_row['timestamp'] = timestamp
        for key in track['current_dict'][timestamp]:
            value = track['current_dict'][timestamp][key]
            if '|' in value:
                value = statistics.median(map(lambda v: float(v), value.split('|')))
            new_row[key] = str(value)
        track['current_row'].append(new_row)


################################
# Functions to send data to IF #
################################ 
Example #3
Source File: getmetrics_zipkin.py    From InsightAgent with Apache License 2.0 6 votes vote down vote up
def transpose_metrics():
    """ builds a flatten data up to the timestamp"""
    for timestamp in track['current_dict'].keys():
        new_row = dict()
        new_row['timestamp'] = timestamp
        for key in track['current_dict'][timestamp]:
            value = track['current_dict'][timestamp][key]
            if '|' in value:
                value = median(map(lambda v: int(v), value.split('|')))
            new_row[key] = str(value)
        track['current_row'].append(new_row)


################################
# Functions to send data to IF #
################################ 
Example #4
Source File: evaluator.py    From chainerrl with MIT License 6 votes vote down vote up
def evaluate_and_update_max_score(self, t, episodes):
        eval_stats = eval_performance(
            self.env, self.agent, self.n_steps, self.n_episodes,
            max_episode_len=self.max_episode_len,
            logger=self.logger)
        elapsed = time.time() - self.start_time
        custom_values = tuple(tup[1] for tup in self.agent.get_statistics())
        mean = eval_stats['mean']
        values = (t,
                  episodes,
                  elapsed,
                  mean,
                  eval_stats['median'],
                  eval_stats['stdev'],
                  eval_stats['max'],
                  eval_stats['min']) + custom_values
        record_stats(self.outdir, values)
        if mean > self.max_score:
            self.logger.info('The best score is updated %s -> %s',
                             self.max_score, mean)
            self.max_score = mean
            if self.save_best_so_far_agent:
                save_agent(self.agent, "best", self.outdir, self.logger)
        return mean 
Example #5
Source File: evaluator.py    From chainerrl with MIT License 6 votes vote down vote up
def evaluate_and_update_max_score(self, t, episodes, env, agent):
        eval_stats = eval_performance(
            env, agent, self.n_steps, self.n_episodes,
            max_episode_len=self.max_episode_len,
            logger=self.logger)
        elapsed = time.time() - self.start_time
        custom_values = tuple(tup[1] for tup in agent.get_statistics())
        mean = eval_stats['mean']
        values = (t,
                  episodes,
                  elapsed,
                  mean,
                  eval_stats['median'],
                  eval_stats['stdev'],
                  eval_stats['max'],
                  eval_stats['min']) + custom_values
        record_stats(self.outdir, values)
        with self._max_score.get_lock():
            if mean > self._max_score.value:
                self.logger.info('The best score is updated %s -> %s',
                                 self._max_score.value, mean)
                self._max_score.value = mean
                if self.save_best_so_far_agent:
                    save_agent(agent, "best", self.outdir, self.logger)
        return mean 
Example #6
Source File: metrics.py    From rally with Apache License 2.0 6 votes vote down vote up
def summary_stats(self, metric_name, task_name):
        mean = self.store.get_mean(metric_name, task=task_name, sample_type=SampleType.Normal)
        median = self.store.get_median(metric_name, task=task_name, sample_type=SampleType.Normal)
        unit = self.store.get_unit(metric_name, task=task_name)
        stats = self.store.get_stats(metric_name, task=task_name, sample_type=SampleType.Normal)
        if median and stats:
            return {
                "min": stats["min"],
                "mean": mean,
                "median": median,
                "max": stats["max"],
                "unit": unit
            }
        else:
            return {
                "min": None,
                "median": None,
                "max": None,
                "unit": unit
            } 
Example #7
Source File: getmessages_prometheus.py    From InsightAgent with Apache License 2.0 6 votes vote down vote up
def transpose_metrics():
    """ flatten data up to the timestamp"""
    for timestamp in track['current_dict'].keys():
        logger.debug(timestamp)
        track['line_count'] += 1
        new_row = dict()
        new_row['timestamp'] = timestamp
        for key in track['current_dict'][timestamp]:
            value = track['current_dict'][timestamp][key]
            if '|' in value:
                value = statistics.median(map(lambda v: float(v), value.split('|')))
            new_row[key] = str(value)
        track['current_row'].append(new_row)


################################
# Functions to send data to IF #
################################ 
Example #8
Source File: run_a3c.py    From async-rl with MIT License 6 votes vote down vote up
def eval_performance(process_idx, make_env, model, phi, n_runs):
    assert n_runs > 1, 'Computing stdev requires at least two runs'
    scores = []
    for i in range(n_runs):
        model.reset_state()
        env = make_env(process_idx, test=True)
        obs = env.reset()
        done = False
        test_r = 0
        while not done:
            s = chainer.Variable(np.expand_dims(phi(obs), 0))
            pout, _ = model.pi_and_v(s)
            a = pout.action_indices[0]
            obs, r, done, info = env.step(a)
            test_r += r
        scores.append(test_r)
        print('test_{}:'.format(i), test_r)
    mean = statistics.mean(scores)
    median = statistics.median(scores)
    stdev = statistics.stdev(scores)
    return mean, median, stdev 
Example #9
Source File: getlogs_k8s.py    From InsightAgent with Apache License 2.0 6 votes vote down vote up
def transpose_metrics():
    """ flatten data up to the timestamp"""
    for timestamp in track['current_dict'].keys():
        track['line_count'] += 1
        new_row = dict()
        new_row['timestamp'] = timestamp
        for key in track['current_dict'][timestamp]:
            value = track['current_dict'][timestamp][key]
            if '|' in value:
                value = statistics.median(map(lambda v: float(v), value.split('|')))
            new_row[key] = str(value)
        track['current_row'].append(new_row)


################################
# Functions to send data to IF #
################################ 
Example #10
Source File: a3c_ale.py    From async-rl with MIT License 6 votes vote down vote up
def eval_performance(rom, p_func, n_runs):
    assert n_runs > 1, 'Computing stdev requires at least two runs'
    scores = []
    for i in range(n_runs):
        env = ale.ALE(rom, treat_life_lost_as_terminal=False)
        test_r = 0
        while not env.is_terminal:
            s = chainer.Variable(np.expand_dims(dqn_phi(env.state), 0))
            pout = p_func(s)
            a = pout.action_indices[0]
            test_r += env.receive_action(a)
        scores.append(test_r)
        print('test_{}:'.format(i), test_r)
    mean = statistics.mean(scores)
    median = statistics.median(scores)
    stdev = statistics.stdev(scores)
    return mean, median, stdev 
Example #11
Source File: describe.py    From cloudtools with MIT License 6 votes vote down vote up
def get_partitions_info_str(j):
    partitions = j['components']['partition_counts']['counts']
    partitions_info = {
                          'Partitions': len(partitions),
                          'Rows': sum(partitions),
                          'Empty partitions': len([p for p in partitions if p == 0])
                      }
    if partitions_info['Partitions'] > 1:
        partitions_info.update({
            'Min(rows/partition)': min(partitions),
            'Max(rows/partition)': max(partitions),
            'Median(rows/partition)': median(partitions),
            'Mean(rows/partition)': int(mean(partitions)),
            'StdDev(rows/partition)': int(stdev(partitions))
        })


    return "\n{}".format(IDENT).join(['{}: {}'.format(k, v) for k, v in partitions_info.items()]) 
Example #12
Source File: test_random_sampling.py    From pdsa with MIT License 6 votes vote down vote up
def test_median_and_rank():
    error = 0.01
    rs = RandomSampling.create_from_error(error)

    print(rs)

    random.seed(42)
    num_of_elements = 100000

    dataset = []
    for i in range(num_of_elements):
        element = random.randrange(0, 16)
        dataset.append(element)
        rs.add(element)

    exact_median = int(median(dataset))

    approx_rank = rs.inverse_quantile_query(exact_median)
    approx_median = rs.quantile_query(0.5)

    rank_lower_boundary = (0.5 - error) * num_of_elements
    rank_upper_boundary = (0.5 + error) * num_of_elements

    assert rank_lower_boundary <= approx_rank <= rank_upper_boundary
    assert approx_median == exact_median 
Example #13
Source File: evaluator.py    From marLo with MIT License 6 votes vote down vote up
def evaluate_and_update_max_score(self, t, episodes):
        eval_stats = eval_performance(
            self.env, self.agent, self.n_runs,
            max_episode_len=self.max_episode_len, explorer=self.explorer,
            logger=self.logger)
        elapsed = time.time() - self.start_time
        custom_values = tuple(tup[1] for tup in self.agent.get_statistics())
        mean = eval_stats['mean']
        values = (t,
                  episodes,
                  elapsed,
                  mean,
                  eval_stats['median'],
                  eval_stats['stdev'],
                  eval_stats['max'],
                  eval_stats['min']) + custom_values
        record_stats(self.outdir, values)
        if mean > self.max_score:
            self.logger.info('The best score is updated %s -> %s',
                             self.max_score, mean)
            self.max_score = mean
            if self.save_best_so_far_agent:
                save_agent(self.agent, t, self.outdir, self.logger)
        return mean 
Example #14
Source File: foliummap.py    From msticpy with MIT License 6 votes vote down vote up
def get_center_ip_entities(
    ip_entities: Iterable[IpAddress], mode: str = "median"
) -> Tuple[float, float]:
    """
    Return the geographical center of the IP address locations.

    Parameters
    ----------
    ip_entities : Iterable[IpAddress]
        IpAddress entities with location information
    mode : str, optional
        The averaging method to us, by default "median".
        "median" and "mean" are the supported values.

    Returns
    -------
    Tuple[Union[int, float], Union[int, float]]
        Tuple of latitude, longitude

    """
    ip_locs_longs = _extract_locs_ip_entities(ip_entities)
    return get_center_geo_locs(ip_locs_longs, mode=mode) 
Example #15
Source File: foliummap.py    From msticpy with MIT License 6 votes vote down vote up
def get_center_geo_locs(
    loc_entities: Iterable[GeoLocation], mode: str = "median"
) -> Tuple[float, float]:
    """
    Return the geographical center of the geo locations.

    Parameters
    ----------
    loc_entities : Iterable[GeoLocation]
        GeoLocation entities with location information
    mode : str, optional
        The averaging method to use, by default "median".
        "median" and "mean" are the supported values.

    Returns
    -------
    Tuple[Union[int, float], Union[int, float]]
        Tuple of latitude, longitude

    """
    lat_longs = _extract_coords_loc_entities(loc_entities)
    return _get_center_coords(lat_longs, mode=mode) 
Example #16
Source File: foliummap.py    From msticpy with MIT License 6 votes vote down vote up
def _get_center_coords(
    locations: Iterable[Tuple[float, float]], mode: str = "median"
) -> Tuple[float, float]:
    """Return the center (median) of the coordinates."""
    if not locations:
        return 0, 0
    locs = list(locations)
    if mode == "median":
        try:
            return (
                stats.median([loc[0] for loc in locs if not math.isnan(loc[0])]),
                stats.median([loc[1] for loc in locs if not math.isnan(loc[1])]),
            )
        except stats.StatisticsError:
            pass
    return (
        stats.mean([loc[0] for loc in locs if not math.isnan(loc[0])]),
        stats.mean([loc[1] for loc in locs if not math.isnan(loc[1])]),
    ) 
Example #17
Source File: http_api_stress_test.py    From dp-agent with Apache License 2.0 6 votes vote down vote up
def run_users(url, payload, mnu, mxu):
    payload_len = len(payload)
    async with aiohttp.ClientSession() as session:
        for i in range(mnu, mxu + 1):
            tasks = []
            for _ in range(0, i):
                user_id = uuid.uuid4().hex
                tasks.append(asyncio.ensure_future(perform_test_dialogue(session, url, user_id, payload)))
            test_start_time = time()
            responses = await asyncio.gather(*tasks)
            test_time = time() - test_start_time
            times = []
            for resp in responses:
                times.extend(resp)

            print(f'test No {i} finished: {max(times)} {min(times)} {mean(times)} {median(times)} '
                  f'total_time {test_time} msgs {i*payload_len} mean_rps {(i*payload_len)/test_time}') 
Example #18
Source File: utils.py    From ee-outliers with GNU General Public License v3.0 6 votes vote down vote up
def get_mad_decision_frontier(values_array, trigger_sensitivity, trigger_on):
    """
    Compute median decision frontier

    :param values_array: list of values used to make the computation
    :param trigger_sensitivity: sensitivity
    :param trigger_on: high or low
    :return: the decision frontier
    """
    mad = np.nanmedian(np.absolute(values_array - np.nanmedian(values_array, 0)), 0)  # median absolute deviation

    if trigger_on == "high":
        decision_frontier = np.nanmedian(values_array) + trigger_sensitivity * mad

    elif trigger_on == "low":
        decision_frontier = np.nanmedian(values_array) - trigger_sensitivity * mad
    else:
        raise ValueError("Unexpected trigger condition " + trigger_on + ", could not calculate decision frontier")

    return decision_frontier 
Example #19
Source File: test_statistics.py    From ironpython3 with Apache License 2.0 6 votes vote down vote up
def test_even_number_repeated(self):
        # Test median.grouped with repeated median values.
        data = [5, 10, 10, 15, 20, 20, 20, 25, 25, 30]
        assert len(data)%2 == 0
        self.assertApproxEqual(self.func(data, 5), 19.16666667, tol=1e-8)
        #---
        data = [2, 3, 4, 4, 4, 5]
        assert len(data)%2 == 0
        self.assertApproxEqual(self.func(data), 3.83333333, tol=1e-8)
        #---
        data = [2, 3, 3, 4, 4, 4, 5, 5, 5, 5, 6, 6]
        assert len(data)%2 == 0
        self.assertEqual(self.func(data), 4.5)
        #---
        data = [3, 4, 4, 4, 5, 5, 5, 5, 6, 6]
        assert len(data)%2 == 0
        self.assertEqual(self.func(data), 4.75) 
Example #20
Source File: test_statistics.py    From ironpython3 with Apache License 2.0 6 votes vote down vote up
def test_odd_number_repeated(self):
        # Test median.grouped with repeated median values.
        data = [12, 13, 14, 14, 14, 15, 15]
        assert len(data)%2 == 1
        self.assertEqual(self.func(data), 14)
        #---
        data = [12, 13, 14, 14, 14, 14, 15]
        assert len(data)%2 == 1
        self.assertEqual(self.func(data), 13.875)
        #---
        data = [5, 10, 10, 15, 20, 20, 20, 20, 25, 25, 30]
        assert len(data)%2 == 1
        self.assertEqual(self.func(data, 5), 19.375)
        #---
        data = [16, 18, 18, 18, 18, 20, 20, 20, 22, 22, 22, 24, 24, 26, 28]
        assert len(data)%2 == 1
        self.assertApproxEqual(self.func(data, 2), 20.66666667, tol=1e-8) 
Example #21
Source File: test_statistics.py    From Fluid-Designer with GNU General Public License v3.0 6 votes vote down vote up
def test_even_number_repeated(self):
        # Test median.grouped with repeated median values.
        data = [5, 10, 10, 15, 20, 20, 20, 25, 25, 30]
        assert len(data)%2 == 0
        self.assertApproxEqual(self.func(data, 5), 19.16666667, tol=1e-8)
        #---
        data = [2, 3, 4, 4, 4, 5]
        assert len(data)%2 == 0
        self.assertApproxEqual(self.func(data), 3.83333333, tol=1e-8)
        #---
        data = [2, 3, 3, 4, 4, 4, 5, 5, 5, 5, 6, 6]
        assert len(data)%2 == 0
        self.assertEqual(self.func(data), 4.5)
        #---
        data = [3, 4, 4, 4, 5, 5, 5, 5, 6, 6]
        assert len(data)%2 == 0
        self.assertEqual(self.func(data), 4.75) 
Example #22
Source File: test_statistics.py    From Fluid-Designer with GNU General Public License v3.0 6 votes vote down vote up
def test_odd_number_repeated(self):
        # Test median.grouped with repeated median values.
        data = [12, 13, 14, 14, 14, 15, 15]
        assert len(data)%2 == 1
        self.assertEqual(self.func(data), 14)
        #---
        data = [12, 13, 14, 14, 14, 14, 15]
        assert len(data)%2 == 1
        self.assertEqual(self.func(data), 13.875)
        #---
        data = [5, 10, 10, 15, 20, 20, 20, 20, 25, 25, 30]
        assert len(data)%2 == 1
        self.assertEqual(self.func(data, 5), 19.375)
        #---
        data = [16, 18, 18, 18, 18, 20, 20, 20, 22, 22, 22, 24, 24, 26, 28]
        assert len(data)%2 == 1
        self.assertApproxEqual(self.func(data, 2), 20.66666667, tol=1e-8) 
Example #23
Source File: test_statistics.py    From Fluid-Designer with GNU General Public License v3.0 5 votes vote down vote up
def test_odd_decimals(self):
        # Test median works with an odd number of Decimals.
        D = Decimal
        data = [D('2.5'), D('3.1'), D('4.2'), D('5.7'), D('5.8')]
        assert len(data)%2 == 1
        random.shuffle(data)
        self.assertEqual(self.func(data), D('4.2')) 
Example #24
Source File: test_statistics.py    From Fluid-Designer with GNU General Public License v3.0 5 votes vote down vote up
def test_even_fractions(self):
        # Test median works with an even number of Fractions.
        F = Fraction
        data = [F(1, 7), F(2, 7), F(3, 7), F(4, 7), F(5, 7), F(6, 7)]
        assert len(data)%2 == 0
        random.shuffle(data)
        self.assertEqual(self.func(data), F(1, 2)) 
Example #25
Source File: gene_compare.py    From collaboration with GNU General Public License v3.0 5 votes vote down vote up
def calculate_gausian_curve(self, pos, height, stddev, scale_x, scale_y, max_value, min_value,
                                horizontal=True, median=0, sigmas=3,
                                shift=8):
        """ path points will be at (-3stddev,0), (0,height), (3stddev,0)
            Control points at (-1stddev,0), (-1stddev,height), (1stddev,height), (1stddev,0)
         """

        curve_dist = [-sigmas * stddev, -1 * stddev, -1 * stddev, 0, stddev, stddev, sigmas * stddev]
        curve_heights = [0, 0, height, height, height, 0, 0]

        if horizontal is True:
            x_axis_values = [round((x - self.margin_left + pos) * scale_x, 2) + self.margin_left for x in curve_dist]
            # Scale Y and inverse the coordinates
            y_temp = [round(y1 * scale_y, 2) for y1 in curve_heights]
            y_axis_values = [(self.plottable_y - self.margin_top - y2) for y2 in y_temp]

        else:
            x_axis_values = [round(((pos - self.margin_left) * scale_x) + y
                                   + self.margin_left + shift, 2) for y in curve_heights]

            if self.log_graph:
                y_temp = [self.scale_y_log(cd + median, max_value, min_value) for cd in curve_dist]
            else:
                y_temp = [(cd + median) * scale_y for cd in curve_dist]
            y_axis_values = [round((self.margin_top + self.plottable_y - y2), 2) for y2 in y_temp]

        d_string = "M " + str(x_axis_values[0]) + "," + str(y_axis_values[0]) + " C"  # point 1
        for i in range(1, 7):
            d_string += " " + str(x_axis_values[i]) + "," + str(y_axis_values[i])
        return d_string 
Example #26
Source File: test_statistics.py    From ironpython3 with Apache License 2.0 5 votes vote down vote up
def test_even_ints(self):
        # Test median with an even number of int data points.
        data = [1, 2, 3, 4, 5, 6]
        assert len(data)%2 == 0
        self.assertEqual(self.func(data), 3.5) 
Example #27
Source File: test_statistics.py    From ironpython3 with Apache License 2.0 5 votes vote down vote up
def test_odd_ints(self):
        # Test median with an odd number of int data points.
        data = [1, 2, 3, 4, 5, 6, 9]
        assert len(data)%2 == 1
        self.assertEqual(self.func(data), 4) 
Example #28
Source File: test_statistics.py    From ironpython3 with Apache License 2.0 5 votes vote down vote up
def test_odd_fractions(self):
        # Test median works with an odd number of Fractions.
        F = Fraction
        data = [F(1, 7), F(2, 7), F(3, 7), F(4, 7), F(5, 7)]
        assert len(data)%2 == 1
        random.shuffle(data)
        self.assertEqual(self.func(data), F(3, 7)) 
Example #29
Source File: test_statistics.py    From ironpython3 with Apache License 2.0 5 votes vote down vote up
def test_even_fractions(self):
        # Test median works with an even number of Fractions.
        F = Fraction
        data = [F(1, 7), F(2, 7), F(3, 7), F(4, 7), F(5, 7), F(6, 7)]
        assert len(data)%2 == 0
        random.shuffle(data)
        self.assertEqual(self.func(data), F(1, 2)) 
Example #30
Source File: aritmeticlogic.py    From chepy with GNU General Public License v3.0 5 votes vote down vote up
def median(self):
        """Calculate the median of the state
        
        Returns:
            Chepy: The Chepy object. 
        """
        assert isinstance(self.state, list), StateNotList()
        numbers = list(self.__hex_to_int(x) for x in self.state)
        self.state = statistics.median(numbers)
        return self